11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/buffer.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 111da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 141da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds #include <linux/kernel.h> 22f361bf4aSIngo Molnar #include <linux/sched/signal.h> 231da177e4SLinus Torvalds #include <linux/syscalls.h> 241da177e4SLinus Torvalds #include <linux/fs.h> 25ae259a9cSChristoph Hellwig #include <linux/iomap.h> 261da177e4SLinus Torvalds #include <linux/mm.h> 271da177e4SLinus Torvalds #include <linux/percpu.h> 281da177e4SLinus Torvalds #include <linux/slab.h> 2916f7e0feSRandy Dunlap #include <linux/capability.h> 301da177e4SLinus Torvalds #include <linux/blkdev.h> 311da177e4SLinus Torvalds #include <linux/file.h> 321da177e4SLinus Torvalds #include <linux/quotaops.h> 331da177e4SLinus Torvalds #include <linux/highmem.h> 34630d9c47SPaul Gortmaker #include <linux/export.h> 35bafc0dbaSTejun Heo #include <linux/backing-dev.h> 361da177e4SLinus Torvalds #include <linux/writeback.h> 371da177e4SLinus Torvalds #include <linux/hash.h> 381da177e4SLinus Torvalds #include <linux/suspend.h> 391da177e4SLinus Torvalds #include <linux/buffer_head.h> 4055e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 411da177e4SLinus Torvalds #include <linux/bio.h> 421da177e4SLinus Torvalds #include <linux/notifier.h> 431da177e4SLinus Torvalds #include <linux/cpu.h> 441da177e4SLinus Torvalds #include <linux/bitops.h> 451da177e4SLinus Torvalds #include <linux/mpage.h> 46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 4729f3ad7dSJan Kara #include <linux/pagevec.h> 485305cb83STejun Heo #include <trace/events/block.h> 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 512a222ca9SMike Christie static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 52b16b1debSTejun Heo struct writeback_control *wbc); 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 551da177e4SLinus Torvalds 56a3f3c29cSYan Hong void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 571da177e4SLinus Torvalds { 581da177e4SLinus Torvalds bh->b_end_io = handler; 591da177e4SLinus Torvalds bh->b_private = private; 601da177e4SLinus Torvalds } 611fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(init_buffer); 621da177e4SLinus Torvalds 63f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh) 64f0059afdSTejun Heo { 655305cb83STejun Heo trace_block_touch_buffer(bh); 66f0059afdSTejun Heo mark_page_accessed(bh->b_page); 67f0059afdSTejun Heo } 68f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer); 69f0059afdSTejun Heo 70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh) 711da177e4SLinus Torvalds { 7274316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 751da177e4SLinus Torvalds 76fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh) 771da177e4SLinus Torvalds { 7851b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state); 794e857c58SPeter Zijlstra smp_mb__after_atomic(); 801da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 811da177e4SLinus Torvalds } 821fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer); 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds /* 85b4597226SMel Gorman * Returns if the page has dirty or writeback buffers. If all the buffers 86b4597226SMel Gorman * are unlocked and clean then the PageDirty information is stale. If 87b4597226SMel Gorman * any of the pages are locked, it is assumed they are locked for IO. 88b4597226SMel Gorman */ 89b4597226SMel Gorman void buffer_check_dirty_writeback(struct page *page, 90b4597226SMel Gorman bool *dirty, bool *writeback) 91b4597226SMel Gorman { 92b4597226SMel Gorman struct buffer_head *head, *bh; 93b4597226SMel Gorman *dirty = false; 94b4597226SMel Gorman *writeback = false; 95b4597226SMel Gorman 96b4597226SMel Gorman BUG_ON(!PageLocked(page)); 97b4597226SMel Gorman 98b4597226SMel Gorman if (!page_has_buffers(page)) 99b4597226SMel Gorman return; 100b4597226SMel Gorman 101b4597226SMel Gorman if (PageWriteback(page)) 102b4597226SMel Gorman *writeback = true; 103b4597226SMel Gorman 104b4597226SMel Gorman head = page_buffers(page); 105b4597226SMel Gorman bh = head; 106b4597226SMel Gorman do { 107b4597226SMel Gorman if (buffer_locked(bh)) 108b4597226SMel Gorman *writeback = true; 109b4597226SMel Gorman 110b4597226SMel Gorman if (buffer_dirty(bh)) 111b4597226SMel Gorman *dirty = true; 112b4597226SMel Gorman 113b4597226SMel Gorman bh = bh->b_this_page; 114b4597226SMel Gorman } while (bh != head); 115b4597226SMel Gorman } 116b4597226SMel Gorman EXPORT_SYMBOL(buffer_check_dirty_writeback); 117b4597226SMel Gorman 118b4597226SMel Gorman /* 1191da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 1201da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 1211da177e4SLinus Torvalds * if you want to preserve its state. 1221da177e4SLinus Torvalds */ 1231da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 1241da177e4SLinus Torvalds { 12574316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 1261da177e4SLinus Torvalds } 1271fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer); 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds static void 1301da177e4SLinus Torvalds __clear_page_buffers(struct page *page) 1311da177e4SLinus Torvalds { 1321da177e4SLinus Torvalds ClearPagePrivate(page); 1334c21e2f2SHugh Dickins set_page_private(page, 0); 13409cbfeafSKirill A. Shutemov put_page(page); 1351da177e4SLinus Torvalds } 1361da177e4SLinus Torvalds 137b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg) 1381da177e4SLinus Torvalds { 139432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state)) 140432f16e6SRobert Elliott printk_ratelimited(KERN_ERR 141a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n", 142a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds /* 14668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after 14768671f35SDmitry Monakhov * unlocking it. 14868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 14968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for 15068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh 15168671f35SDmitry Monakhov * itself. 1521da177e4SLinus Torvalds */ 15368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 1541da177e4SLinus Torvalds { 1551da177e4SLinus Torvalds if (uptodate) { 1561da177e4SLinus Torvalds set_buffer_uptodate(bh); 1571da177e4SLinus Torvalds } else { 15870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */ 1591da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds unlock_buffer(bh); 16268671f35SDmitry Monakhov } 16368671f35SDmitry Monakhov 16468671f35SDmitry Monakhov /* 16568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and 16668671f35SDmitry Monakhov * unlock the buffer. This is what ll_rw_block uses too. 16768671f35SDmitry Monakhov */ 16868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 16968671f35SDmitry Monakhov { 17068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 1711da177e4SLinus Torvalds put_bh(bh); 1721da177e4SLinus Torvalds } 1731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync); 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1761da177e4SLinus Torvalds { 1771da177e4SLinus Torvalds if (uptodate) { 1781da177e4SLinus Torvalds set_buffer_uptodate(bh); 1791da177e4SLinus Torvalds } else { 180b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write"); 181*87354e5dSJeff Layton mark_buffer_write_io_error(bh); 1821da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1831da177e4SLinus Torvalds } 1841da177e4SLinus Torvalds unlock_buffer(bh); 1851da177e4SLinus Torvalds put_bh(bh); 1861da177e4SLinus Torvalds } 1871fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync); 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds /* 1901da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 1911da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 1921da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 1931da177e4SLinus Torvalds * private_lock. 1941da177e4SLinus Torvalds * 1951da177e4SLinus Torvalds * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 1961da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 1971da177e4SLinus Torvalds * succeeds, there is no need to take private_lock. (But if 1981da177e4SLinus Torvalds * private_lock is contended then so is mapping->tree_lock). 1991da177e4SLinus Torvalds */ 2001da177e4SLinus Torvalds static struct buffer_head * 201385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 2021da177e4SLinus Torvalds { 2031da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 2041da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 2051da177e4SLinus Torvalds struct buffer_head *ret = NULL; 2061da177e4SLinus Torvalds pgoff_t index; 2071da177e4SLinus Torvalds struct buffer_head *bh; 2081da177e4SLinus Torvalds struct buffer_head *head; 2091da177e4SLinus Torvalds struct page *page; 2101da177e4SLinus Torvalds int all_mapped = 1; 2111da177e4SLinus Torvalds 21209cbfeafSKirill A. Shutemov index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 2132457aec6SMel Gorman page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 2141da177e4SLinus Torvalds if (!page) 2151da177e4SLinus Torvalds goto out; 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock); 2181da177e4SLinus Torvalds if (!page_has_buffers(page)) 2191da177e4SLinus Torvalds goto out_unlock; 2201da177e4SLinus Torvalds head = page_buffers(page); 2211da177e4SLinus Torvalds bh = head; 2221da177e4SLinus Torvalds do { 22397f76d3dSNikanth Karthikesan if (!buffer_mapped(bh)) 22497f76d3dSNikanth Karthikesan all_mapped = 0; 22597f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) { 2261da177e4SLinus Torvalds ret = bh; 2271da177e4SLinus Torvalds get_bh(bh); 2281da177e4SLinus Torvalds goto out_unlock; 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds bh = bh->b_this_page; 2311da177e4SLinus Torvalds } while (bh != head); 2321da177e4SLinus Torvalds 2331da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2341da177e4SLinus Torvalds * not mapped. This is due to various races between 2351da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2361da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2371da177e4SLinus Torvalds */ 2381da177e4SLinus Torvalds if (all_mapped) { 2391da177e4SLinus Torvalds printk("__find_get_block_slow() failed. " 2401da177e4SLinus Torvalds "block=%llu, b_blocknr=%llu\n", 241205f87f6SBadari Pulavarty (unsigned long long)block, 242205f87f6SBadari Pulavarty (unsigned long long)bh->b_blocknr); 243205f87f6SBadari Pulavarty printk("b_state=0x%08lx, b_size=%zu\n", 244205f87f6SBadari Pulavarty bh->b_state, bh->b_size); 245a1c6f057SDmitry Monakhov printk("device %pg blocksize: %d\n", bdev, 24672a2ebd8STao Ma 1 << bd_inode->i_blkbits); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds out_unlock: 2491da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock); 25009cbfeafSKirill A. Shutemov put_page(page); 2511da177e4SLinus Torvalds out: 2521da177e4SLinus Torvalds return ret; 2531da177e4SLinus Torvalds } 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds /* 2565b0830cbSJens Axboe * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 2571da177e4SLinus Torvalds */ 2581da177e4SLinus Torvalds static void free_more_memory(void) 2591da177e4SLinus Torvalds { 260c33d6c06SMel Gorman struct zoneref *z; 2610e88460dSMel Gorman int nid; 2621da177e4SLinus Torvalds 2630e175a18SCurt Wohlgemuth wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); 2641da177e4SLinus Torvalds yield(); 2651da177e4SLinus Torvalds 2660e88460dSMel Gorman for_each_online_node(nid) { 267c33d6c06SMel Gorman 268c33d6c06SMel Gorman z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 269c33d6c06SMel Gorman gfp_zone(GFP_NOFS), NULL); 270c33d6c06SMel Gorman if (z->zone) 27154a6eb5cSMel Gorman try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 272327c0e96SKAMEZAWA Hiroyuki GFP_NOFS, NULL); 2731da177e4SLinus Torvalds } 2741da177e4SLinus Torvalds } 2751da177e4SLinus Torvalds 2761da177e4SLinus Torvalds /* 2771da177e4SLinus Torvalds * I/O completion handler for block_read_full_page() - pages 2781da177e4SLinus Torvalds * which come unlocked at the end of I/O. 2791da177e4SLinus Torvalds */ 2801da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 2811da177e4SLinus Torvalds { 2821da177e4SLinus Torvalds unsigned long flags; 283a3972203SNick Piggin struct buffer_head *first; 2841da177e4SLinus Torvalds struct buffer_head *tmp; 2851da177e4SLinus Torvalds struct page *page; 2861da177e4SLinus Torvalds int page_uptodate = 1; 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds page = bh->b_page; 2911da177e4SLinus Torvalds if (uptodate) { 2921da177e4SLinus Torvalds set_buffer_uptodate(bh); 2931da177e4SLinus Torvalds } else { 2941da177e4SLinus Torvalds clear_buffer_uptodate(bh); 295b744c2acSRobert Elliott buffer_io_error(bh, ", async page read"); 2961da177e4SLinus Torvalds SetPageError(page); 2971da177e4SLinus Torvalds } 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds /* 3001da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 3011da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 3021da177e4SLinus Torvalds * decide that the page is now completely done. 3031da177e4SLinus Torvalds */ 304a3972203SNick Piggin first = page_buffers(page); 305a3972203SNick Piggin local_irq_save(flags); 306a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 3071da177e4SLinus Torvalds clear_buffer_async_read(bh); 3081da177e4SLinus Torvalds unlock_buffer(bh); 3091da177e4SLinus Torvalds tmp = bh; 3101da177e4SLinus Torvalds do { 3111da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 3121da177e4SLinus Torvalds page_uptodate = 0; 3131da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 3141da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 3151da177e4SLinus Torvalds goto still_busy; 3161da177e4SLinus Torvalds } 3171da177e4SLinus Torvalds tmp = tmp->b_this_page; 3181da177e4SLinus Torvalds } while (tmp != bh); 319a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 320a3972203SNick Piggin local_irq_restore(flags); 3211da177e4SLinus Torvalds 3221da177e4SLinus Torvalds /* 3231da177e4SLinus Torvalds * If none of the buffers had errors and they are all 3241da177e4SLinus Torvalds * uptodate then we can set the page uptodate. 3251da177e4SLinus Torvalds */ 3261da177e4SLinus Torvalds if (page_uptodate && !PageError(page)) 3271da177e4SLinus Torvalds SetPageUptodate(page); 3281da177e4SLinus Torvalds unlock_page(page); 3291da177e4SLinus Torvalds return; 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds still_busy: 332a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 333a3972203SNick Piggin local_irq_restore(flags); 3341da177e4SLinus Torvalds return; 3351da177e4SLinus Torvalds } 3361da177e4SLinus Torvalds 3371da177e4SLinus Torvalds /* 3381da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked 3391da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion. 3401da177e4SLinus Torvalds */ 34135c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate) 3421da177e4SLinus Torvalds { 3431da177e4SLinus Torvalds unsigned long flags; 344a3972203SNick Piggin struct buffer_head *first; 3451da177e4SLinus Torvalds struct buffer_head *tmp; 3461da177e4SLinus Torvalds struct page *page; 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 3491da177e4SLinus Torvalds 3501da177e4SLinus Torvalds page = bh->b_page; 3511da177e4SLinus Torvalds if (uptodate) { 3521da177e4SLinus Torvalds set_buffer_uptodate(bh); 3531da177e4SLinus Torvalds } else { 354b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write"); 355*87354e5dSJeff Layton mark_buffer_write_io_error(bh); 3561da177e4SLinus Torvalds clear_buffer_uptodate(bh); 3571da177e4SLinus Torvalds SetPageError(page); 3581da177e4SLinus Torvalds } 3591da177e4SLinus Torvalds 360a3972203SNick Piggin first = page_buffers(page); 361a3972203SNick Piggin local_irq_save(flags); 362a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 363a3972203SNick Piggin 3641da177e4SLinus Torvalds clear_buffer_async_write(bh); 3651da177e4SLinus Torvalds unlock_buffer(bh); 3661da177e4SLinus Torvalds tmp = bh->b_this_page; 3671da177e4SLinus Torvalds while (tmp != bh) { 3681da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 3691da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 3701da177e4SLinus Torvalds goto still_busy; 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds tmp = tmp->b_this_page; 3731da177e4SLinus Torvalds } 374a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 375a3972203SNick Piggin local_irq_restore(flags); 3761da177e4SLinus Torvalds end_page_writeback(page); 3771da177e4SLinus Torvalds return; 3781da177e4SLinus Torvalds 3791da177e4SLinus Torvalds still_busy: 380a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 381a3972203SNick Piggin local_irq_restore(flags); 3821da177e4SLinus Torvalds return; 3831da177e4SLinus Torvalds } 3841fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write); 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds /* 3871da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 3881da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 3891da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 3901da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 3911da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 3921da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 3931da177e4SLinus Torvalds * that this buffer is not under async I/O. 3941da177e4SLinus Torvalds * 3951da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 3961da177e4SLinus Torvalds * left. 3971da177e4SLinus Torvalds * 3981da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 3991da177e4SLinus Torvalds * the buffers. 4001da177e4SLinus Torvalds * 4011da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 4021da177e4SLinus Torvalds * page. 4031da177e4SLinus Torvalds * 4041da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 4051da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 4061da177e4SLinus Torvalds */ 4071da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 4081da177e4SLinus Torvalds { 4091da177e4SLinus Torvalds bh->b_end_io = end_buffer_async_read; 4101da177e4SLinus Torvalds set_buffer_async_read(bh); 4111da177e4SLinus Torvalds } 4121da177e4SLinus Torvalds 4131fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh, 41435c80d5fSChris Mason bh_end_io_t *handler) 41535c80d5fSChris Mason { 41635c80d5fSChris Mason bh->b_end_io = handler; 41735c80d5fSChris Mason set_buffer_async_write(bh); 41835c80d5fSChris Mason } 41935c80d5fSChris Mason 4201da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 4211da177e4SLinus Torvalds { 42235c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write); 4231da177e4SLinus Torvalds } 4241da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds 4271da177e4SLinus Torvalds /* 4281da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 4291da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 4301da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 4311da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 4321da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 4331da177e4SLinus Torvalds * 4341da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 4351da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 4361da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list. 4371da177e4SLinus Torvalds * 4381da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 4391da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 4401da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 4411da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 4421da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space 4431da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 4441da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 4451da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact, 4461da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's 4471da177e4SLinus Torvalds * ->private_lock. 4481da177e4SLinus Torvalds * 4491da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 4501da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's. 4511da177e4SLinus Torvalds * 4521da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these 4531da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for 4541da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list) 4551da177e4SLinus Torvalds * be true at clear_inode() time. 4561da177e4SLinus Torvalds * 4571da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 4581da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 4591da177e4SLinus Torvalds * BUG_ON(!list_empty). 4601da177e4SLinus Torvalds * 4611da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 4621da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 4631da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 4641da177e4SLinus Torvalds * queued up. 4651da177e4SLinus Torvalds * 4661da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 4671da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 4681da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 4691da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 4701da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 4711da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 4721da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 4731da177e4SLinus Torvalds * b_inode back. 4741da177e4SLinus Torvalds */ 4751da177e4SLinus Torvalds 4761da177e4SLinus Torvalds /* 4771da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held 4781da177e4SLinus Torvalds */ 479dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh) 4801da177e4SLinus Torvalds { 4811da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 48258ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 48358ff407bSJan Kara bh->b_assoc_map = NULL; 4841da177e4SLinus Torvalds } 4851da177e4SLinus Torvalds 4861da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 4871da177e4SLinus Torvalds { 4881da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list); 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 4911da177e4SLinus Torvalds /* 4921da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 4931da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 4941da177e4SLinus Torvalds * writes to the disk. 4951da177e4SLinus Torvalds * 4961da177e4SLinus Torvalds * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 4971da177e4SLinus Torvalds * you dirty the buffers, and then use osync_inode_buffers to wait for 4981da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 4991da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 5001da177e4SLinus Torvalds */ 5011da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 5021da177e4SLinus Torvalds { 5031da177e4SLinus Torvalds struct buffer_head *bh; 5041da177e4SLinus Torvalds struct list_head *p; 5051da177e4SLinus Torvalds int err = 0; 5061da177e4SLinus Torvalds 5071da177e4SLinus Torvalds spin_lock(lock); 5081da177e4SLinus Torvalds repeat: 5091da177e4SLinus Torvalds list_for_each_prev(p, list) { 5101da177e4SLinus Torvalds bh = BH_ENTRY(p); 5111da177e4SLinus Torvalds if (buffer_locked(bh)) { 5121da177e4SLinus Torvalds get_bh(bh); 5131da177e4SLinus Torvalds spin_unlock(lock); 5141da177e4SLinus Torvalds wait_on_buffer(bh); 5151da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 5161da177e4SLinus Torvalds err = -EIO; 5171da177e4SLinus Torvalds brelse(bh); 5181da177e4SLinus Torvalds spin_lock(lock); 5191da177e4SLinus Torvalds goto repeat; 5201da177e4SLinus Torvalds } 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds spin_unlock(lock); 5231da177e4SLinus Torvalds return err; 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 52601a05b33SAl Viro static void do_thaw_one(struct super_block *sb, void *unused) 527c2d75438SEric Sandeen { 528c2d75438SEric Sandeen while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) 529a1c6f057SDmitry Monakhov printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 530c2d75438SEric Sandeen } 53101a05b33SAl Viro 53201a05b33SAl Viro static void do_thaw_all(struct work_struct *work) 53301a05b33SAl Viro { 53401a05b33SAl Viro iterate_supers(do_thaw_one, NULL); 535053c525fSJens Axboe kfree(work); 536c2d75438SEric Sandeen printk(KERN_WARNING "Emergency Thaw complete\n"); 537c2d75438SEric Sandeen } 538c2d75438SEric Sandeen 539c2d75438SEric Sandeen /** 540c2d75438SEric Sandeen * emergency_thaw_all -- forcibly thaw every frozen filesystem 541c2d75438SEric Sandeen * 542c2d75438SEric Sandeen * Used for emergency unfreeze of all filesystems via SysRq 543c2d75438SEric Sandeen */ 544c2d75438SEric Sandeen void emergency_thaw_all(void) 545c2d75438SEric Sandeen { 546053c525fSJens Axboe struct work_struct *work; 547053c525fSJens Axboe 548053c525fSJens Axboe work = kmalloc(sizeof(*work), GFP_ATOMIC); 549053c525fSJens Axboe if (work) { 550053c525fSJens Axboe INIT_WORK(work, do_thaw_all); 551053c525fSJens Axboe schedule_work(work); 552053c525fSJens Axboe } 553c2d75438SEric Sandeen } 554c2d75438SEric Sandeen 5551da177e4SLinus Torvalds /** 55678a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 55767be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 5581da177e4SLinus Torvalds * 5591da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon 5601da177e4SLinus Torvalds * that I/O. 5611da177e4SLinus Torvalds * 56267be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 56367be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 56467be2dd1SMartin Waitz * a successful fsync(). 5651da177e4SLinus Torvalds */ 5661da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 5671da177e4SLinus Torvalds { 568252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 5691da177e4SLinus Torvalds 5701da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 5711da177e4SLinus Torvalds return 0; 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock, 5741da177e4SLinus Torvalds &mapping->private_list); 5751da177e4SLinus Torvalds } 5761da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 5771da177e4SLinus Torvalds 5781da177e4SLinus Torvalds /* 5791da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 5801da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 5811da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 5821da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 5831da177e4SLinus Torvalds */ 5841da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 5851da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 5861da177e4SLinus Torvalds { 5871da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 5881da177e4SLinus Torvalds if (bh) { 5891da177e4SLinus Torvalds if (buffer_dirty(bh)) 590dfec8a14SMike Christie ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 5911da177e4SLinus Torvalds put_bh(bh); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 5961da177e4SLinus Torvalds { 5971da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 5981da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 5991da177e4SLinus Torvalds 6001da177e4SLinus Torvalds mark_buffer_dirty(bh); 601252aa6f5SRafael Aquini if (!mapping->private_data) { 602252aa6f5SRafael Aquini mapping->private_data = buffer_mapping; 6031da177e4SLinus Torvalds } else { 604252aa6f5SRafael Aquini BUG_ON(mapping->private_data != buffer_mapping); 6051da177e4SLinus Torvalds } 606535ee2fbSJan Kara if (!bh->b_assoc_map) { 6071da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 6081da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 6091da177e4SLinus Torvalds &mapping->private_list); 61058ff407bSJan Kara bh->b_assoc_map = mapping; 6111da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 6121da177e4SLinus Torvalds } 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 6151da177e4SLinus Torvalds 6161da177e4SLinus Torvalds /* 617787d2214SNick Piggin * Mark the page dirty, and set it dirty in the radix tree, and mark the inode 618787d2214SNick Piggin * dirty. 619787d2214SNick Piggin * 620787d2214SNick Piggin * If warn is true, then emit a warning if the page is not uptodate and has 621787d2214SNick Piggin * not been truncated. 622c4843a75SGreg Thelen * 62381f8c3a4SJohannes Weiner * The caller must hold lock_page_memcg(). 624787d2214SNick Piggin */ 625c4843a75SGreg Thelen static void __set_page_dirty(struct page *page, struct address_space *mapping, 62662cccb8cSJohannes Weiner int warn) 627787d2214SNick Piggin { 628227d53b3SKOSAKI Motohiro unsigned long flags; 629227d53b3SKOSAKI Motohiro 630227d53b3SKOSAKI Motohiro spin_lock_irqsave(&mapping->tree_lock, flags); 631787d2214SNick Piggin if (page->mapping) { /* Race with truncate? */ 632787d2214SNick Piggin WARN_ON_ONCE(warn && !PageUptodate(page)); 63362cccb8cSJohannes Weiner account_page_dirtied(page, mapping); 634787d2214SNick Piggin radix_tree_tag_set(&mapping->page_tree, 635787d2214SNick Piggin page_index(page), PAGECACHE_TAG_DIRTY); 636787d2214SNick Piggin } 637227d53b3SKOSAKI Motohiro spin_unlock_irqrestore(&mapping->tree_lock, flags); 638787d2214SNick Piggin } 639787d2214SNick Piggin 640787d2214SNick Piggin /* 6411da177e4SLinus Torvalds * Add a page to the dirty page list. 6421da177e4SLinus Torvalds * 6431da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places 6441da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep. 6451da177e4SLinus Torvalds * 6461da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve 6471da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does 6481da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set 6491da177e4SLinus Torvalds * dirty. 6501da177e4SLinus Torvalds * 6511da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race 6521da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the 6531da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty 6541da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty 6551da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 6561da177e4SLinus Torvalds * page on the dirty page list. 6571da177e4SLinus Torvalds * 6581da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the 6591da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being 6601da177e4SLinus Torvalds * added to the page after it was set dirty. 6611da177e4SLinus Torvalds * 6621da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the 6631da177e4SLinus Torvalds * address_space though. 6641da177e4SLinus Torvalds */ 6651da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page) 6661da177e4SLinus Torvalds { 667a8e7d49aSLinus Torvalds int newly_dirty; 668787d2214SNick Piggin struct address_space *mapping = page_mapping(page); 669ebf7a227SNick Piggin 670ebf7a227SNick Piggin if (unlikely(!mapping)) 671ebf7a227SNick Piggin return !TestSetPageDirty(page); 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 6741da177e4SLinus Torvalds if (page_has_buffers(page)) { 6751da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 6761da177e4SLinus Torvalds struct buffer_head *bh = head; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds do { 6791da177e4SLinus Torvalds set_buffer_dirty(bh); 6801da177e4SLinus Torvalds bh = bh->b_this_page; 6811da177e4SLinus Torvalds } while (bh != head); 6821da177e4SLinus Torvalds } 683c4843a75SGreg Thelen /* 68481f8c3a4SJohannes Weiner * Lock out page->mem_cgroup migration to keep PageDirty 68581f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters. 686c4843a75SGreg Thelen */ 68762cccb8cSJohannes Weiner lock_page_memcg(page); 688a8e7d49aSLinus Torvalds newly_dirty = !TestSetPageDirty(page); 6891da177e4SLinus Torvalds spin_unlock(&mapping->private_lock); 6901da177e4SLinus Torvalds 691a8e7d49aSLinus Torvalds if (newly_dirty) 69262cccb8cSJohannes Weiner __set_page_dirty(page, mapping, 1); 693c4843a75SGreg Thelen 69462cccb8cSJohannes Weiner unlock_page_memcg(page); 695c4843a75SGreg Thelen 696c4843a75SGreg Thelen if (newly_dirty) 697c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 698c4843a75SGreg Thelen 699a8e7d49aSLinus Torvalds return newly_dirty; 7001da177e4SLinus Torvalds } 7011da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers); 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds /* 7041da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 7051da177e4SLinus Torvalds * 7061da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 7071da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 7081da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 7091da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 7101da177e4SLinus Torvalds * 7111da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 7121da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 7131da177e4SLinus Torvalds * up, waiting for those writes to complete. 7141da177e4SLinus Torvalds * 7151da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 7161da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 7171da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 7181da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 7191da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 7201da177e4SLinus Torvalds * any newly dirty buffers for write. 7211da177e4SLinus Torvalds */ 7221da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 7231da177e4SLinus Torvalds { 7241da177e4SLinus Torvalds struct buffer_head *bh; 7251da177e4SLinus Torvalds struct list_head tmp; 7267eaceaccSJens Axboe struct address_space *mapping; 7271da177e4SLinus Torvalds int err = 0, err2; 7284ee2491eSJens Axboe struct blk_plug plug; 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 7314ee2491eSJens Axboe blk_start_plug(&plug); 7321da177e4SLinus Torvalds 7331da177e4SLinus Torvalds spin_lock(lock); 7341da177e4SLinus Torvalds while (!list_empty(list)) { 7351da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 736535ee2fbSJan Kara mapping = bh->b_assoc_map; 73758ff407bSJan Kara __remove_assoc_queue(bh); 738535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 739535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 740535ee2fbSJan Kara smp_mb(); 7411da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 7421da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 743535ee2fbSJan Kara bh->b_assoc_map = mapping; 7441da177e4SLinus Torvalds if (buffer_dirty(bh)) { 7451da177e4SLinus Torvalds get_bh(bh); 7461da177e4SLinus Torvalds spin_unlock(lock); 7471da177e4SLinus Torvalds /* 7481da177e4SLinus Torvalds * Ensure any pending I/O completes so that 7499cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the 7509cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is 7519cb569d6SChristoph Hellwig * still in flight on potentially older 7529cb569d6SChristoph Hellwig * contents. 7531da177e4SLinus Torvalds */ 75470fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC); 7559cf6b720SJens Axboe 7569cf6b720SJens Axboe /* 7579cf6b720SJens Axboe * Kick off IO for the previous mapping. Note 7589cf6b720SJens Axboe * that we will not run the very last mapping, 7599cf6b720SJens Axboe * wait_on_buffer() will do that for us 7609cf6b720SJens Axboe * through sync_buffer(). 7619cf6b720SJens Axboe */ 7621da177e4SLinus Torvalds brelse(bh); 7631da177e4SLinus Torvalds spin_lock(lock); 7641da177e4SLinus Torvalds } 7651da177e4SLinus Torvalds } 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds 7684ee2491eSJens Axboe spin_unlock(lock); 7694ee2491eSJens Axboe blk_finish_plug(&plug); 7704ee2491eSJens Axboe spin_lock(lock); 7714ee2491eSJens Axboe 7721da177e4SLinus Torvalds while (!list_empty(&tmp)) { 7731da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 7741da177e4SLinus Torvalds get_bh(bh); 775535ee2fbSJan Kara mapping = bh->b_assoc_map; 776535ee2fbSJan Kara __remove_assoc_queue(bh); 777535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 778535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 779535ee2fbSJan Kara smp_mb(); 780535ee2fbSJan Kara if (buffer_dirty(bh)) { 781535ee2fbSJan Kara list_add(&bh->b_assoc_buffers, 782e3892296SJan Kara &mapping->private_list); 783535ee2fbSJan Kara bh->b_assoc_map = mapping; 784535ee2fbSJan Kara } 7851da177e4SLinus Torvalds spin_unlock(lock); 7861da177e4SLinus Torvalds wait_on_buffer(bh); 7871da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 7881da177e4SLinus Torvalds err = -EIO; 7891da177e4SLinus Torvalds brelse(bh); 7901da177e4SLinus Torvalds spin_lock(lock); 7911da177e4SLinus Torvalds } 7921da177e4SLinus Torvalds 7931da177e4SLinus Torvalds spin_unlock(lock); 7941da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 7951da177e4SLinus Torvalds if (err) 7961da177e4SLinus Torvalds return err; 7971da177e4SLinus Torvalds else 7981da177e4SLinus Torvalds return err2; 7991da177e4SLinus Torvalds } 8001da177e4SLinus Torvalds 8011da177e4SLinus Torvalds /* 8021da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 8031da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 8041da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 8051da177e4SLinus Torvalds * 8061da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which 8071da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 8081da177e4SLinus Torvalds * for reiserfs. 8091da177e4SLinus Torvalds */ 8101da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 8111da177e4SLinus Torvalds { 8121da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8131da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8141da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 815252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 8161da177e4SLinus Torvalds 8171da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8181da177e4SLinus Torvalds while (!list_empty(list)) 8191da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 8201da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8211da177e4SLinus Torvalds } 8221da177e4SLinus Torvalds } 82352b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers); 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds /* 8261da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 8271da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 8281da177e4SLinus Torvalds * 8291da177e4SLinus Torvalds * Returns true if all buffers were removed. 8301da177e4SLinus Torvalds */ 8311da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 8321da177e4SLinus Torvalds { 8331da177e4SLinus Torvalds int ret = 1; 8341da177e4SLinus Torvalds 8351da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8361da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8371da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 838252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8411da177e4SLinus Torvalds while (!list_empty(list)) { 8421da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 8431da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8441da177e4SLinus Torvalds ret = 0; 8451da177e4SLinus Torvalds break; 8461da177e4SLinus Torvalds } 8471da177e4SLinus Torvalds __remove_assoc_queue(bh); 8481da177e4SLinus Torvalds } 8491da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds return ret; 8521da177e4SLinus Torvalds } 8531da177e4SLinus Torvalds 8541da177e4SLinus Torvalds /* 8551da177e4SLinus Torvalds * Create the appropriate buffers when given a page for data area and 8561da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 8571da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 8581da177e4SLinus Torvalds * buffers. 8591da177e4SLinus Torvalds * 8601da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 8611da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 8621da177e4SLinus Torvalds */ 8631da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 8641da177e4SLinus Torvalds int retry) 8651da177e4SLinus Torvalds { 8661da177e4SLinus Torvalds struct buffer_head *bh, *head; 8671da177e4SLinus Torvalds long offset; 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds try_again: 8701da177e4SLinus Torvalds head = NULL; 8711da177e4SLinus Torvalds offset = PAGE_SIZE; 8721da177e4SLinus Torvalds while ((offset -= size) >= 0) { 8731da177e4SLinus Torvalds bh = alloc_buffer_head(GFP_NOFS); 8741da177e4SLinus Torvalds if (!bh) 8751da177e4SLinus Torvalds goto no_grow; 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds bh->b_this_page = head; 8781da177e4SLinus Torvalds bh->b_blocknr = -1; 8791da177e4SLinus Torvalds head = bh; 8801da177e4SLinus Torvalds 8811da177e4SLinus Torvalds bh->b_size = size; 8821da177e4SLinus Torvalds 8831da177e4SLinus Torvalds /* Link the buffer to its page */ 8841da177e4SLinus Torvalds set_bh_page(bh, page, offset); 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds return head; 8871da177e4SLinus Torvalds /* 8881da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 8891da177e4SLinus Torvalds */ 8901da177e4SLinus Torvalds no_grow: 8911da177e4SLinus Torvalds if (head) { 8921da177e4SLinus Torvalds do { 8931da177e4SLinus Torvalds bh = head; 8941da177e4SLinus Torvalds head = head->b_this_page; 8951da177e4SLinus Torvalds free_buffer_head(bh); 8961da177e4SLinus Torvalds } while (head); 8971da177e4SLinus Torvalds } 8981da177e4SLinus Torvalds 8991da177e4SLinus Torvalds /* 9001da177e4SLinus Torvalds * Return failure for non-async IO requests. Async IO requests 9011da177e4SLinus Torvalds * are not allowed to fail, so we have to wait until buffer heads 9021da177e4SLinus Torvalds * become available. But we don't want tasks sleeping with 9031da177e4SLinus Torvalds * partially complete buffers, so all were released above. 9041da177e4SLinus Torvalds */ 9051da177e4SLinus Torvalds if (!retry) 9061da177e4SLinus Torvalds return NULL; 9071da177e4SLinus Torvalds 9081da177e4SLinus Torvalds /* We're _really_ low on memory. Now we just 9091da177e4SLinus Torvalds * wait for old buffer heads to become free due to 9101da177e4SLinus Torvalds * finishing IO. Since this is an async request and 9111da177e4SLinus Torvalds * the reserve list is empty, we're sure there are 9121da177e4SLinus Torvalds * async buffer heads in use. 9131da177e4SLinus Torvalds */ 9141da177e4SLinus Torvalds free_more_memory(); 9151da177e4SLinus Torvalds goto try_again; 9161da177e4SLinus Torvalds } 9171da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 9181da177e4SLinus Torvalds 9191da177e4SLinus Torvalds static inline void 9201da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head) 9211da177e4SLinus Torvalds { 9221da177e4SLinus Torvalds struct buffer_head *bh, *tail; 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds bh = head; 9251da177e4SLinus Torvalds do { 9261da177e4SLinus Torvalds tail = bh; 9271da177e4SLinus Torvalds bh = bh->b_this_page; 9281da177e4SLinus Torvalds } while (bh); 9291da177e4SLinus Torvalds tail->b_this_page = head; 9301da177e4SLinus Torvalds attach_page_buffers(page, head); 9311da177e4SLinus Torvalds } 9321da177e4SLinus Torvalds 933bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 934bbec0270SLinus Torvalds { 935bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0); 936bbec0270SLinus Torvalds loff_t sz = i_size_read(bdev->bd_inode); 937bbec0270SLinus Torvalds 938bbec0270SLinus Torvalds if (sz) { 939bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size); 940bbec0270SLinus Torvalds retval = (sz >> sizebits); 941bbec0270SLinus Torvalds } 942bbec0270SLinus Torvalds return retval; 943bbec0270SLinus Torvalds } 944bbec0270SLinus Torvalds 9451da177e4SLinus Torvalds /* 9461da177e4SLinus Torvalds * Initialise the state of a blockdev page's buffers. 9471da177e4SLinus Torvalds */ 948676ce6d5SHugh Dickins static sector_t 9491da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev, 9501da177e4SLinus Torvalds sector_t block, int size) 9511da177e4SLinus Torvalds { 9521da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 9531da177e4SLinus Torvalds struct buffer_head *bh = head; 9541da177e4SLinus Torvalds int uptodate = PageUptodate(page); 955bbec0270SLinus Torvalds sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds do { 9581da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 9591da177e4SLinus Torvalds init_buffer(bh, NULL, NULL); 9601da177e4SLinus Torvalds bh->b_bdev = bdev; 9611da177e4SLinus Torvalds bh->b_blocknr = block; 9621da177e4SLinus Torvalds if (uptodate) 9631da177e4SLinus Torvalds set_buffer_uptodate(bh); 964080399aaSJeff Moyer if (block < end_block) 9651da177e4SLinus Torvalds set_buffer_mapped(bh); 9661da177e4SLinus Torvalds } 9671da177e4SLinus Torvalds block++; 9681da177e4SLinus Torvalds bh = bh->b_this_page; 9691da177e4SLinus Torvalds } while (bh != head); 970676ce6d5SHugh Dickins 971676ce6d5SHugh Dickins /* 972676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device. 973676ce6d5SHugh Dickins */ 974676ce6d5SHugh Dickins return end_block; 9751da177e4SLinus Torvalds } 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds /* 9781da177e4SLinus Torvalds * Create the page-cache page that contains the requested block. 9791da177e4SLinus Torvalds * 980676ce6d5SHugh Dickins * This is used purely for blockdev mappings. 9811da177e4SLinus Torvalds */ 982676ce6d5SHugh Dickins static int 9831da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block, 9843b5e6454SGioh Kim pgoff_t index, int size, int sizebits, gfp_t gfp) 9851da177e4SLinus Torvalds { 9861da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 9871da177e4SLinus Torvalds struct page *page; 9881da177e4SLinus Torvalds struct buffer_head *bh; 989676ce6d5SHugh Dickins sector_t end_block; 990676ce6d5SHugh Dickins int ret = 0; /* Will call free_more_memory() */ 99184235de3SJohannes Weiner gfp_t gfp_mask; 9921da177e4SLinus Torvalds 993c62d2555SMichal Hocko gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; 9943b5e6454SGioh Kim 99584235de3SJohannes Weiner /* 99684235de3SJohannes Weiner * XXX: __getblk_slow() can not really deal with failure and 99784235de3SJohannes Weiner * will endlessly loop on improvised global reclaim. Prefer 99884235de3SJohannes Weiner * looping in the allocator rather than here, at least that 99984235de3SJohannes Weiner * code knows what it's doing. 100084235de3SJohannes Weiner */ 100184235de3SJohannes Weiner gfp_mask |= __GFP_NOFAIL; 100284235de3SJohannes Weiner 100384235de3SJohannes Weiner page = find_or_create_page(inode->i_mapping, index, gfp_mask); 10041da177e4SLinus Torvalds if (!page) 1005676ce6d5SHugh Dickins return ret; 10061da177e4SLinus Torvalds 1007e827f923SEric Sesterhenn BUG_ON(!PageLocked(page)); 10081da177e4SLinus Torvalds 10091da177e4SLinus Torvalds if (page_has_buffers(page)) { 10101da177e4SLinus Torvalds bh = page_buffers(page); 10111da177e4SLinus Torvalds if (bh->b_size == size) { 1012676ce6d5SHugh Dickins end_block = init_page_buffers(page, bdev, 1013f2d5a944SAnton Altaparmakov (sector_t)index << sizebits, 1014f2d5a944SAnton Altaparmakov size); 1015676ce6d5SHugh Dickins goto done; 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds if (!try_to_free_buffers(page)) 10181da177e4SLinus Torvalds goto failed; 10191da177e4SLinus Torvalds } 10201da177e4SLinus Torvalds 10211da177e4SLinus Torvalds /* 10221da177e4SLinus Torvalds * Allocate some buffers for this page 10231da177e4SLinus Torvalds */ 10241da177e4SLinus Torvalds bh = alloc_page_buffers(page, size, 0); 10251da177e4SLinus Torvalds if (!bh) 10261da177e4SLinus Torvalds goto failed; 10271da177e4SLinus Torvalds 10281da177e4SLinus Torvalds /* 10291da177e4SLinus Torvalds * Link the page to the buffers and initialise them. Take the 10301da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 10311da177e4SLinus Torvalds * run under the page lock. 10321da177e4SLinus Torvalds */ 10331da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock); 10341da177e4SLinus Torvalds link_dev_buffers(page, bh); 1035f2d5a944SAnton Altaparmakov end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, 1036f2d5a944SAnton Altaparmakov size); 10371da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock); 1038676ce6d5SHugh Dickins done: 1039676ce6d5SHugh Dickins ret = (block < end_block) ? 1 : -ENXIO; 10401da177e4SLinus Torvalds failed: 10411da177e4SLinus Torvalds unlock_page(page); 104209cbfeafSKirill A. Shutemov put_page(page); 1043676ce6d5SHugh Dickins return ret; 10441da177e4SLinus Torvalds } 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds /* 10471da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If 10481da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also. 10491da177e4SLinus Torvalds */ 1050858119e1SArjan van de Ven static int 10513b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 10521da177e4SLinus Torvalds { 10531da177e4SLinus Torvalds pgoff_t index; 10541da177e4SLinus Torvalds int sizebits; 10551da177e4SLinus Torvalds 10561da177e4SLinus Torvalds sizebits = -1; 10571da177e4SLinus Torvalds do { 10581da177e4SLinus Torvalds sizebits++; 10591da177e4SLinus Torvalds } while ((size << sizebits) < PAGE_SIZE); 10601da177e4SLinus Torvalds 10611da177e4SLinus Torvalds index = block >> sizebits; 10621da177e4SLinus Torvalds 1063e5657933SAndrew Morton /* 1064e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible 1065e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types). 1066e5657933SAndrew Morton */ 1067e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) { 1068e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for " 1069a1c6f057SDmitry Monakhov "device %pg\n", 10708e24eea7SHarvey Harrison __func__, (unsigned long long)block, 1071a1c6f057SDmitry Monakhov bdev); 1072e5657933SAndrew Morton return -EIO; 1073e5657933SAndrew Morton } 1074676ce6d5SHugh Dickins 10751da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */ 10763b5e6454SGioh Kim return grow_dev_page(bdev, block, index, size, sizebits, gfp); 10771da177e4SLinus Torvalds } 10781da177e4SLinus Torvalds 10790026ba40SEric Biggers static struct buffer_head * 10803b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block, 10813b5e6454SGioh Kim unsigned size, gfp_t gfp) 10821da177e4SLinus Torvalds { 10831da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 1084e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 10851da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 10861da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 10871da177e4SLinus Torvalds size); 1088e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n", 1089e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev)); 10901da177e4SLinus Torvalds 10911da177e4SLinus Torvalds dump_stack(); 10921da177e4SLinus Torvalds return NULL; 10931da177e4SLinus Torvalds } 10941da177e4SLinus Torvalds 1095676ce6d5SHugh Dickins for (;;) { 1096676ce6d5SHugh Dickins struct buffer_head *bh; 1097676ce6d5SHugh Dickins int ret; 1098676ce6d5SHugh Dickins 10991da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 11001da177e4SLinus Torvalds if (bh) 11011da177e4SLinus Torvalds return bh; 11021da177e4SLinus Torvalds 11033b5e6454SGioh Kim ret = grow_buffers(bdev, block, size, gfp); 1104676ce6d5SHugh Dickins if (ret < 0) 110591f68c89SJeff Moyer return NULL; 1106676ce6d5SHugh Dickins if (ret == 0) 1107676ce6d5SHugh Dickins free_more_memory(); 1108676ce6d5SHugh Dickins } 11091da177e4SLinus Torvalds } 11101da177e4SLinus Torvalds 11111da177e4SLinus Torvalds /* 11121da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 11131da177e4SLinus Torvalds * 11141da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 11151da177e4SLinus Torvalds * the page is tagged dirty in its radix tree. 11161da177e4SLinus Torvalds * 11171da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 11181da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 11191da177e4SLinus Torvalds * merely a hint about the true dirty state. 11201da177e4SLinus Torvalds * 11211da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 11221da177e4SLinus Torvalds * (if the page has buffers). 11231da177e4SLinus Torvalds * 11241da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 11251da177e4SLinus Torvalds * buffers are not. 11261da177e4SLinus Torvalds * 11271da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 11281da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 11291da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 11301da177e4SLinus Torvalds * block_read_full_page() against that page will discover all the uptodate 11311da177e4SLinus Torvalds * buffers, will set the page uptodate and will perform no I/O. 11321da177e4SLinus Torvalds */ 11331da177e4SLinus Torvalds 11341da177e4SLinus Torvalds /** 11351da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 113667be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 11371da177e4SLinus Torvalds * 11381da177e4SLinus Torvalds * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 11391da177e4SLinus Torvalds * backing page dirty, then tag the page as dirty in its address_space's radix 11401da177e4SLinus Torvalds * tree and then attach the address_space's inode to its superblock's dirty 11411da177e4SLinus Torvalds * inode list. 11421da177e4SLinus Torvalds * 11431da177e4SLinus Torvalds * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1144250df6edSDave Chinner * mapping->tree_lock and mapping->host->i_lock. 11451da177e4SLinus Torvalds */ 1146fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh) 11471da177e4SLinus Torvalds { 1148787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh)); 11491be62dc1SLinus Torvalds 11505305cb83STejun Heo trace_block_dirty_buffer(bh); 11515305cb83STejun Heo 11521be62dc1SLinus Torvalds /* 11531be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case. 11541be62dc1SLinus Torvalds * 11551be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we 11561be62dc1SLinus Torvalds * perhaps modified the buffer. 11571be62dc1SLinus Torvalds */ 11581be62dc1SLinus Torvalds if (buffer_dirty(bh)) { 11591be62dc1SLinus Torvalds smp_mb(); 11601be62dc1SLinus Torvalds if (buffer_dirty(bh)) 11611be62dc1SLinus Torvalds return; 11621be62dc1SLinus Torvalds } 11631be62dc1SLinus Torvalds 1164a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) { 1165a8e7d49aSLinus Torvalds struct page *page = bh->b_page; 1166c4843a75SGreg Thelen struct address_space *mapping = NULL; 1167c4843a75SGreg Thelen 116862cccb8cSJohannes Weiner lock_page_memcg(page); 11698e9d78edSLinus Torvalds if (!TestSetPageDirty(page)) { 1170c4843a75SGreg Thelen mapping = page_mapping(page); 11718e9d78edSLinus Torvalds if (mapping) 117262cccb8cSJohannes Weiner __set_page_dirty(page, mapping, 0); 11738e9d78edSLinus Torvalds } 117462cccb8cSJohannes Weiner unlock_page_memcg(page); 1175c4843a75SGreg Thelen if (mapping) 1176c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1177a8e7d49aSLinus Torvalds } 11781da177e4SLinus Torvalds } 11791fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty); 11801da177e4SLinus Torvalds 1181*87354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh) 1182*87354e5dSJeff Layton { 1183*87354e5dSJeff Layton set_buffer_write_io_error(bh); 1184*87354e5dSJeff Layton /* FIXME: do we need to set this in both places? */ 1185*87354e5dSJeff Layton if (bh->b_page && bh->b_page->mapping) 1186*87354e5dSJeff Layton mapping_set_error(bh->b_page->mapping, -EIO); 1187*87354e5dSJeff Layton if (bh->b_assoc_map) 1188*87354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO); 1189*87354e5dSJeff Layton } 1190*87354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error); 1191*87354e5dSJeff Layton 11921da177e4SLinus Torvalds /* 11931da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page 11941da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean 11951da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page 11961da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from 11971da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached). 11981da177e4SLinus Torvalds */ 11991da177e4SLinus Torvalds void __brelse(struct buffer_head * buf) 12001da177e4SLinus Torvalds { 12011da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) { 12021da177e4SLinus Torvalds put_bh(buf); 12031da177e4SLinus Torvalds return; 12041da177e4SLinus Torvalds } 12055c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 12061da177e4SLinus Torvalds } 12071fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse); 12081da177e4SLinus Torvalds 12091da177e4SLinus Torvalds /* 12101da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any 12111da177e4SLinus Torvalds * potentially dirty data. 12121da177e4SLinus Torvalds */ 12131da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 12141da177e4SLinus Torvalds { 12151da177e4SLinus Torvalds clear_buffer_dirty(bh); 1216535ee2fbSJan Kara if (bh->b_assoc_map) { 12171da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 12181da177e4SLinus Torvalds 12191da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 12201da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 122158ff407bSJan Kara bh->b_assoc_map = NULL; 12221da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 12231da177e4SLinus Torvalds } 12241da177e4SLinus Torvalds __brelse(bh); 12251da177e4SLinus Torvalds } 12261fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget); 12271da177e4SLinus Torvalds 12281da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 12291da177e4SLinus Torvalds { 12301da177e4SLinus Torvalds lock_buffer(bh); 12311da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 12321da177e4SLinus Torvalds unlock_buffer(bh); 12331da177e4SLinus Torvalds return bh; 12341da177e4SLinus Torvalds } else { 12351da177e4SLinus Torvalds get_bh(bh); 12361da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 12372a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 12381da177e4SLinus Torvalds wait_on_buffer(bh); 12391da177e4SLinus Torvalds if (buffer_uptodate(bh)) 12401da177e4SLinus Torvalds return bh; 12411da177e4SLinus Torvalds } 12421da177e4SLinus Torvalds brelse(bh); 12431da177e4SLinus Torvalds return NULL; 12441da177e4SLinus Torvalds } 12451da177e4SLinus Torvalds 12461da177e4SLinus Torvalds /* 12471da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 12481da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 12491da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 12501da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 12511da177e4SLinus Torvalds * CPU's LRUs at the same time. 12521da177e4SLinus Torvalds * 12531da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 12541da177e4SLinus Torvalds * sb_find_get_block(). 12551da177e4SLinus Torvalds * 12561da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 12571da177e4SLinus Torvalds * a local interrupt disable for that. 12581da177e4SLinus Torvalds */ 12591da177e4SLinus Torvalds 126086cf78d7SSebastien Buisson #define BH_LRU_SIZE 16 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds struct bh_lru { 12631da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12641da177e4SLinus Torvalds }; 12651da177e4SLinus Torvalds 12661da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds #ifdef CONFIG_SMP 12691da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 12701da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 12711da177e4SLinus Torvalds #else 12721da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 12731da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 12741da177e4SLinus Torvalds #endif 12751da177e4SLinus Torvalds 12761da177e4SLinus Torvalds static inline void check_irqs_on(void) 12771da177e4SLinus Torvalds { 12781da177e4SLinus Torvalds #ifdef irqs_disabled 12791da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 12801da177e4SLinus Torvalds #endif 12811da177e4SLinus Torvalds } 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds /* 12841da177e4SLinus Torvalds * The LRU management algorithm is dopey-but-simple. Sorry. 12851da177e4SLinus Torvalds */ 12861da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 12871da177e4SLinus Torvalds { 12881da177e4SLinus Torvalds struct buffer_head *evictee = NULL; 12891da177e4SLinus Torvalds 12901da177e4SLinus Torvalds check_irqs_on(); 12911da177e4SLinus Torvalds bh_lru_lock(); 1292c7b92516SChristoph Lameter if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { 12931da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12941da177e4SLinus Torvalds int in; 12951da177e4SLinus Torvalds int out = 0; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds get_bh(bh); 12981da177e4SLinus Torvalds bhs[out++] = bh; 12991da177e4SLinus Torvalds for (in = 0; in < BH_LRU_SIZE; in++) { 1300c7b92516SChristoph Lameter struct buffer_head *bh2 = 1301c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[in]); 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds if (bh2 == bh) { 13041da177e4SLinus Torvalds __brelse(bh2); 13051da177e4SLinus Torvalds } else { 13061da177e4SLinus Torvalds if (out >= BH_LRU_SIZE) { 13071da177e4SLinus Torvalds BUG_ON(evictee != NULL); 13081da177e4SLinus Torvalds evictee = bh2; 13091da177e4SLinus Torvalds } else { 13101da177e4SLinus Torvalds bhs[out++] = bh2; 13111da177e4SLinus Torvalds } 13121da177e4SLinus Torvalds } 13131da177e4SLinus Torvalds } 13141da177e4SLinus Torvalds while (out < BH_LRU_SIZE) 13151da177e4SLinus Torvalds bhs[out++] = NULL; 1316ca6673b0SChristoph Lameter memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds bh_lru_unlock(); 13191da177e4SLinus Torvalds 13201da177e4SLinus Torvalds if (evictee) 13211da177e4SLinus Torvalds __brelse(evictee); 13221da177e4SLinus Torvalds } 13231da177e4SLinus Torvalds 13241da177e4SLinus Torvalds /* 13251da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 13261da177e4SLinus Torvalds */ 1327858119e1SArjan van de Ven static struct buffer_head * 13283991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 13291da177e4SLinus Torvalds { 13301da177e4SLinus Torvalds struct buffer_head *ret = NULL; 13313991d3bdSTomasz Kvarsin unsigned int i; 13321da177e4SLinus Torvalds 13331da177e4SLinus Torvalds check_irqs_on(); 13341da177e4SLinus Torvalds bh_lru_lock(); 13351da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 1336c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 13371da177e4SLinus Torvalds 13389470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 13399470dd5dSZach Brown bh->b_size == size) { 13401da177e4SLinus Torvalds if (i) { 13411da177e4SLinus Torvalds while (i) { 1342c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i], 1343c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1])); 13441da177e4SLinus Torvalds i--; 13451da177e4SLinus Torvalds } 1346c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh); 13471da177e4SLinus Torvalds } 13481da177e4SLinus Torvalds get_bh(bh); 13491da177e4SLinus Torvalds ret = bh; 13501da177e4SLinus Torvalds break; 13511da177e4SLinus Torvalds } 13521da177e4SLinus Torvalds } 13531da177e4SLinus Torvalds bh_lru_unlock(); 13541da177e4SLinus Torvalds return ret; 13551da177e4SLinus Torvalds } 13561da177e4SLinus Torvalds 13571da177e4SLinus Torvalds /* 13581da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 13591da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 13601da177e4SLinus Torvalds * NULL 13611da177e4SLinus Torvalds */ 13621da177e4SLinus Torvalds struct buffer_head * 13633991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 13641da177e4SLinus Torvalds { 13651da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds if (bh == NULL) { 13682457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */ 1369385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 13701da177e4SLinus Torvalds if (bh) 13711da177e4SLinus Torvalds bh_lru_install(bh); 13722457aec6SMel Gorman } else 13731da177e4SLinus Torvalds touch_buffer(bh); 13742457aec6SMel Gorman 13751da177e4SLinus Torvalds return bh; 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 13781da177e4SLinus Torvalds 13791da177e4SLinus Torvalds /* 13803b5e6454SGioh Kim * __getblk_gfp() will locate (and, if necessary, create) the buffer_head 13811da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The 13821da177e4SLinus Torvalds * returned buffer has its reference count incremented. 13831da177e4SLinus Torvalds * 13843b5e6454SGioh Kim * __getblk_gfp() will lock up the machine if grow_dev_page's 13853b5e6454SGioh Kim * try_to_free_buffers() attempt is failing. FIXME, perhaps? 13861da177e4SLinus Torvalds */ 13871da177e4SLinus Torvalds struct buffer_head * 13883b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block, 13893b5e6454SGioh Kim unsigned size, gfp_t gfp) 13901da177e4SLinus Torvalds { 13911da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size); 13921da177e4SLinus Torvalds 13931da177e4SLinus Torvalds might_sleep(); 13941da177e4SLinus Torvalds if (bh == NULL) 13953b5e6454SGioh Kim bh = __getblk_slow(bdev, block, size, gfp); 13961da177e4SLinus Torvalds return bh; 13971da177e4SLinus Torvalds } 13983b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp); 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds /* 14011da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 14021da177e4SLinus Torvalds */ 14033991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 14041da177e4SLinus Torvalds { 14051da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 1406a3e713b5SAndrew Morton if (likely(bh)) { 140770246286SChristoph Hellwig ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 14081da177e4SLinus Torvalds brelse(bh); 14091da177e4SLinus Torvalds } 1410a3e713b5SAndrew Morton } 14111da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 14121da177e4SLinus Torvalds 14131da177e4SLinus Torvalds /** 14143b5e6454SGioh Kim * __bread_gfp() - reads a specified block and returns the bh 141567be2dd1SMartin Waitz * @bdev: the block_device to read from 14161da177e4SLinus Torvalds * @block: number of block 14171da177e4SLinus Torvalds * @size: size (in bytes) to read 14183b5e6454SGioh Kim * @gfp: page allocation flag 14191da177e4SLinus Torvalds * 14201da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it. 14213b5e6454SGioh Kim * The page cache can be allocated from non-movable area 14223b5e6454SGioh Kim * not to prevent page migration if you set gfp to zero. 14231da177e4SLinus Torvalds * It returns NULL if the block was unreadable. 14241da177e4SLinus Torvalds */ 14251da177e4SLinus Torvalds struct buffer_head * 14263b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block, 14273b5e6454SGioh Kim unsigned size, gfp_t gfp) 14281da177e4SLinus Torvalds { 14293b5e6454SGioh Kim struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 14301da177e4SLinus Torvalds 1431a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 14321da177e4SLinus Torvalds bh = __bread_slow(bh); 14331da177e4SLinus Torvalds return bh; 14341da177e4SLinus Torvalds } 14353b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp); 14361da177e4SLinus Torvalds 14371da177e4SLinus Torvalds /* 14381da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 14391da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 14401da177e4SLinus Torvalds * or with preempt disabled. 14411da177e4SLinus Torvalds */ 14421da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 14431da177e4SLinus Torvalds { 14441da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 14451da177e4SLinus Torvalds int i; 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 14481da177e4SLinus Torvalds brelse(b->bhs[i]); 14491da177e4SLinus Torvalds b->bhs[i] = NULL; 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds put_cpu_var(bh_lrus); 14521da177e4SLinus Torvalds } 14531da177e4SLinus Torvalds 145442be35d0SGilad Ben-Yossef static bool has_bh_in_lru(int cpu, void *dummy) 145542be35d0SGilad Ben-Yossef { 145642be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 145742be35d0SGilad Ben-Yossef int i; 145842be35d0SGilad Ben-Yossef 145942be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) { 146042be35d0SGilad Ben-Yossef if (b->bhs[i]) 146142be35d0SGilad Ben-Yossef return 1; 146242be35d0SGilad Ben-Yossef } 146342be35d0SGilad Ben-Yossef 146442be35d0SGilad Ben-Yossef return 0; 146542be35d0SGilad Ben-Yossef } 146642be35d0SGilad Ben-Yossef 1467f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 14681da177e4SLinus Torvalds { 146942be35d0SGilad Ben-Yossef on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); 14701da177e4SLinus Torvalds } 14719db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 14721da177e4SLinus Torvalds 14731da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh, 14741da177e4SLinus Torvalds struct page *page, unsigned long offset) 14751da177e4SLinus Torvalds { 14761da177e4SLinus Torvalds bh->b_page = page; 1477e827f923SEric Sesterhenn BUG_ON(offset >= PAGE_SIZE); 14781da177e4SLinus Torvalds if (PageHighMem(page)) 14791da177e4SLinus Torvalds /* 14801da177e4SLinus Torvalds * This catches illegal uses and preserves the offset: 14811da177e4SLinus Torvalds */ 14821da177e4SLinus Torvalds bh->b_data = (char *)(0 + offset); 14831da177e4SLinus Torvalds else 14841da177e4SLinus Torvalds bh->b_data = page_address(page) + offset; 14851da177e4SLinus Torvalds } 14861da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page); 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds /* 14891da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 14901da177e4SLinus Torvalds */ 1491e7470ee8SMel Gorman 1492e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */ 1493e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \ 1494e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1495e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten) 1496e7470ee8SMel Gorman 1497858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 14981da177e4SLinus Torvalds { 1499e7470ee8SMel Gorman unsigned long b_state, b_state_old; 1500e7470ee8SMel Gorman 15011da177e4SLinus Torvalds lock_buffer(bh); 15021da177e4SLinus Torvalds clear_buffer_dirty(bh); 15031da177e4SLinus Torvalds bh->b_bdev = NULL; 1504e7470ee8SMel Gorman b_state = bh->b_state; 1505e7470ee8SMel Gorman for (;;) { 1506e7470ee8SMel Gorman b_state_old = cmpxchg(&bh->b_state, b_state, 1507e7470ee8SMel Gorman (b_state & ~BUFFER_FLAGS_DISCARD)); 1508e7470ee8SMel Gorman if (b_state_old == b_state) 1509e7470ee8SMel Gorman break; 1510e7470ee8SMel Gorman b_state = b_state_old; 1511e7470ee8SMel Gorman } 15121da177e4SLinus Torvalds unlock_buffer(bh); 15131da177e4SLinus Torvalds } 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds /** 1516814e1d25SWang Sheng-Hui * block_invalidatepage - invalidate part or all of a buffer-backed page 15171da177e4SLinus Torvalds * 15181da177e4SLinus Torvalds * @page: the page which is affected 1519d47992f8SLukas Czerner * @offset: start of the range to invalidate 1520d47992f8SLukas Czerner * @length: length of the range to invalidate 15211da177e4SLinus Torvalds * 15221da177e4SLinus Torvalds * block_invalidatepage() is called when all or part of the page has become 15231da177e4SLinus Torvalds * invalidated by a truncate operation. 15241da177e4SLinus Torvalds * 15251da177e4SLinus Torvalds * block_invalidatepage() does not have to release all buffers, but it must 15261da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 15271da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 15281da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 15291da177e4SLinus Torvalds * blocks on-disk. 15301da177e4SLinus Torvalds */ 1531d47992f8SLukas Czerner void block_invalidatepage(struct page *page, unsigned int offset, 1532d47992f8SLukas Czerner unsigned int length) 15331da177e4SLinus Torvalds { 15341da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 15351da177e4SLinus Torvalds unsigned int curr_off = 0; 1536d47992f8SLukas Czerner unsigned int stop = length + offset; 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 15391da177e4SLinus Torvalds if (!page_has_buffers(page)) 15401da177e4SLinus Torvalds goto out; 15411da177e4SLinus Torvalds 1542d47992f8SLukas Czerner /* 1543d47992f8SLukas Czerner * Check for overflow 1544d47992f8SLukas Czerner */ 154509cbfeafSKirill A. Shutemov BUG_ON(stop > PAGE_SIZE || stop < length); 1546d47992f8SLukas Czerner 15471da177e4SLinus Torvalds head = page_buffers(page); 15481da177e4SLinus Torvalds bh = head; 15491da177e4SLinus Torvalds do { 15501da177e4SLinus Torvalds unsigned int next_off = curr_off + bh->b_size; 15511da177e4SLinus Torvalds next = bh->b_this_page; 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds /* 1554d47992f8SLukas Czerner * Are we still fully in range ? 1555d47992f8SLukas Czerner */ 1556d47992f8SLukas Czerner if (next_off > stop) 1557d47992f8SLukas Czerner goto out; 1558d47992f8SLukas Czerner 1559d47992f8SLukas Czerner /* 15601da177e4SLinus Torvalds * is this block fully invalidated? 15611da177e4SLinus Torvalds */ 15621da177e4SLinus Torvalds if (offset <= curr_off) 15631da177e4SLinus Torvalds discard_buffer(bh); 15641da177e4SLinus Torvalds curr_off = next_off; 15651da177e4SLinus Torvalds bh = next; 15661da177e4SLinus Torvalds } while (bh != head); 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds /* 15691da177e4SLinus Torvalds * We release buffers only if the entire page is being invalidated. 15701da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 15711da177e4SLinus Torvalds * so real IO is not possible anymore. 15721da177e4SLinus Torvalds */ 15731da177e4SLinus Torvalds if (offset == 0) 15742ff28e22SNeilBrown try_to_release_page(page, 0); 15751da177e4SLinus Torvalds out: 15762ff28e22SNeilBrown return; 15771da177e4SLinus Torvalds } 15781da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage); 15791da177e4SLinus Torvalds 1580d47992f8SLukas Czerner 15811da177e4SLinus Torvalds /* 15821da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 15831da177e4SLinus Torvalds * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 15841da177e4SLinus Torvalds * is already excluded via the page lock. 15851da177e4SLinus Torvalds */ 15861da177e4SLinus Torvalds void create_empty_buffers(struct page *page, 15871da177e4SLinus Torvalds unsigned long blocksize, unsigned long b_state) 15881da177e4SLinus Torvalds { 15891da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 15901da177e4SLinus Torvalds 15911da177e4SLinus Torvalds head = alloc_page_buffers(page, blocksize, 1); 15921da177e4SLinus Torvalds bh = head; 15931da177e4SLinus Torvalds do { 15941da177e4SLinus Torvalds bh->b_state |= b_state; 15951da177e4SLinus Torvalds tail = bh; 15961da177e4SLinus Torvalds bh = bh->b_this_page; 15971da177e4SLinus Torvalds } while (bh); 15981da177e4SLinus Torvalds tail->b_this_page = head; 15991da177e4SLinus Torvalds 16001da177e4SLinus Torvalds spin_lock(&page->mapping->private_lock); 16011da177e4SLinus Torvalds if (PageUptodate(page) || PageDirty(page)) { 16021da177e4SLinus Torvalds bh = head; 16031da177e4SLinus Torvalds do { 16041da177e4SLinus Torvalds if (PageDirty(page)) 16051da177e4SLinus Torvalds set_buffer_dirty(bh); 16061da177e4SLinus Torvalds if (PageUptodate(page)) 16071da177e4SLinus Torvalds set_buffer_uptodate(bh); 16081da177e4SLinus Torvalds bh = bh->b_this_page; 16091da177e4SLinus Torvalds } while (bh != head); 16101da177e4SLinus Torvalds } 16111da177e4SLinus Torvalds attach_page_buffers(page, head); 16121da177e4SLinus Torvalds spin_unlock(&page->mapping->private_lock); 16131da177e4SLinus Torvalds } 16141da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 16151da177e4SLinus Torvalds 161629f3ad7dSJan Kara /** 161729f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device 161829f3ad7dSJan Kara * @bdev: Block device to clean buffers in 161929f3ad7dSJan Kara * @block: Start of a range of blocks to clean 162029f3ad7dSJan Kara * @len: Number of blocks to clean 16211da177e4SLinus Torvalds * 162229f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any 162329f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the 162429f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that 162529f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark 162629f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer 162729f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was 162829f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it 162929f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards... 163029f3ad7dSJan Kara * 163129f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be 163229f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that 163329f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really 163429f3ad7dSJan Kara * need to. That happens here. 16351da177e4SLinus Torvalds */ 163629f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 16371da177e4SLinus Torvalds { 163829f3ad7dSJan Kara struct inode *bd_inode = bdev->bd_inode; 163929f3ad7dSJan Kara struct address_space *bd_mapping = bd_inode->i_mapping; 164029f3ad7dSJan Kara struct pagevec pvec; 164129f3ad7dSJan Kara pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 164229f3ad7dSJan Kara pgoff_t end; 164329f3ad7dSJan Kara int i; 164429f3ad7dSJan Kara struct buffer_head *bh; 164529f3ad7dSJan Kara struct buffer_head *head; 16461da177e4SLinus Torvalds 164729f3ad7dSJan Kara end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 164829f3ad7dSJan Kara pagevec_init(&pvec, 0); 164929f3ad7dSJan Kara while (index <= end && pagevec_lookup(&pvec, bd_mapping, index, 165029f3ad7dSJan Kara min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 165129f3ad7dSJan Kara for (i = 0; i < pagevec_count(&pvec); i++) { 165229f3ad7dSJan Kara struct page *page = pvec.pages[i]; 16531da177e4SLinus Torvalds 165429f3ad7dSJan Kara index = page->index; 165529f3ad7dSJan Kara if (index > end) 165629f3ad7dSJan Kara break; 165729f3ad7dSJan Kara if (!page_has_buffers(page)) 165829f3ad7dSJan Kara continue; 165929f3ad7dSJan Kara /* 166029f3ad7dSJan Kara * We use page lock instead of bd_mapping->private_lock 166129f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and 166229f3ad7dSJan Kara * it scales better than a global spinlock lock. 166329f3ad7dSJan Kara */ 166429f3ad7dSJan Kara lock_page(page); 166529f3ad7dSJan Kara /* Recheck when the page is locked which pins bhs */ 166629f3ad7dSJan Kara if (!page_has_buffers(page)) 166729f3ad7dSJan Kara goto unlock_page; 166829f3ad7dSJan Kara head = page_buffers(page); 166929f3ad7dSJan Kara bh = head; 167029f3ad7dSJan Kara do { 16716c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 167229f3ad7dSJan Kara goto next; 167329f3ad7dSJan Kara if (bh->b_blocknr >= block + len) 167429f3ad7dSJan Kara break; 167529f3ad7dSJan Kara clear_buffer_dirty(bh); 167629f3ad7dSJan Kara wait_on_buffer(bh); 167729f3ad7dSJan Kara clear_buffer_req(bh); 167829f3ad7dSJan Kara next: 167929f3ad7dSJan Kara bh = bh->b_this_page; 168029f3ad7dSJan Kara } while (bh != head); 168129f3ad7dSJan Kara unlock_page: 168229f3ad7dSJan Kara unlock_page(page); 168329f3ad7dSJan Kara } 168429f3ad7dSJan Kara pagevec_release(&pvec); 168529f3ad7dSJan Kara cond_resched(); 168629f3ad7dSJan Kara index++; 16871da177e4SLinus Torvalds } 16881da177e4SLinus Torvalds } 168929f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases); 16901da177e4SLinus Torvalds 16911da177e4SLinus Torvalds /* 169245bce8f3SLinus Torvalds * Size is a power-of-two in the range 512..PAGE_SIZE, 169345bce8f3SLinus Torvalds * and the case we care about most is PAGE_SIZE. 169445bce8f3SLinus Torvalds * 169545bce8f3SLinus Torvalds * So this *could* possibly be written with those 169645bce8f3SLinus Torvalds * constraints in mind (relevant mostly if some 169745bce8f3SLinus Torvalds * architecture has a slow bit-scan instruction) 169845bce8f3SLinus Torvalds */ 169945bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize) 170045bce8f3SLinus Torvalds { 170145bce8f3SLinus Torvalds return ilog2(blocksize); 170245bce8f3SLinus Torvalds } 170345bce8f3SLinus Torvalds 170445bce8f3SLinus Torvalds static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) 170545bce8f3SLinus Torvalds { 170645bce8f3SLinus Torvalds BUG_ON(!PageLocked(page)); 170745bce8f3SLinus Torvalds 170845bce8f3SLinus Torvalds if (!page_has_buffers(page)) 170945bce8f3SLinus Torvalds create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); 171045bce8f3SLinus Torvalds return page_buffers(page); 171145bce8f3SLinus Torvalds } 171245bce8f3SLinus Torvalds 171345bce8f3SLinus Torvalds /* 17141da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 17151da177e4SLinus Torvalds * 17161da177e4SLinus Torvalds * Mapped Uptodate Meaning 17171da177e4SLinus Torvalds * 17181da177e4SLinus Torvalds * No No "unknown" - must do get_block() 17191da177e4SLinus Torvalds * No Yes "hole" - zero-filled 17201da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 17211da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 17221da177e4SLinus Torvalds * 17231da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 17241da177e4SLinus Torvalds */ 17251da177e4SLinus Torvalds 17261da177e4SLinus Torvalds /* 17271da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under 17281da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 17291da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 17301da177e4SLinus Torvalds * state inside lock_buffer(). 17311da177e4SLinus Torvalds * 17321da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback 17331da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 17341da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 17351da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 17361da177e4SLinus Torvalds * prevents this contention from occurring. 17376e34eeddSTheodore Ts'o * 17386e34eeddSTheodore Ts'o * If block_write_full_page() is called with wbc->sync_mode == 173970fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1740721a9602SJens Axboe * causes the writes to be flagged as synchronous writes. 17411da177e4SLinus Torvalds */ 1742b4bba389SBenjamin Marzinski int __block_write_full_page(struct inode *inode, struct page *page, 174335c80d5fSChris Mason get_block_t *get_block, struct writeback_control *wbc, 174435c80d5fSChris Mason bh_end_io_t *handler) 17451da177e4SLinus Torvalds { 17461da177e4SLinus Torvalds int err; 17471da177e4SLinus Torvalds sector_t block; 17481da177e4SLinus Torvalds sector_t last_block; 1749f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 175045bce8f3SLinus Torvalds unsigned int blocksize, bbits; 17511da177e4SLinus Torvalds int nr_underway = 0; 17527637241eSJens Axboe int write_flags = wbc_to_write_flags(wbc); 17531da177e4SLinus Torvalds 175445bce8f3SLinus Torvalds head = create_page_buffers(page, inode, 17551da177e4SLinus Torvalds (1 << BH_Dirty)|(1 << BH_Uptodate)); 17561da177e4SLinus Torvalds 17571da177e4SLinus Torvalds /* 17581da177e4SLinus Torvalds * Be very careful. We have no exclusion from __set_page_dirty_buffers 17591da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 17601da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 17611da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty. 17621da177e4SLinus Torvalds * 17631da177e4SLinus Torvalds * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 17641da177e4SLinus Torvalds * handle that here by just cleaning them. 17651da177e4SLinus Torvalds */ 17661da177e4SLinus Torvalds 17671da177e4SLinus Torvalds bh = head; 176845bce8f3SLinus Torvalds blocksize = bh->b_size; 176945bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 177045bce8f3SLinus Torvalds 177109cbfeafSKirill A. Shutemov block = (sector_t)page->index << (PAGE_SHIFT - bbits); 177245bce8f3SLinus Torvalds last_block = (i_size_read(inode) - 1) >> bbits; 17731da177e4SLinus Torvalds 17741da177e4SLinus Torvalds /* 17751da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 17761da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 17771da177e4SLinus Torvalds */ 17781da177e4SLinus Torvalds do { 17791da177e4SLinus Torvalds if (block > last_block) { 17801da177e4SLinus Torvalds /* 17811da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 17821da177e4SLinus Torvalds * this page can be outside i_size when there is a 17831da177e4SLinus Torvalds * truncate in progress. 17841da177e4SLinus Torvalds */ 17851da177e4SLinus Torvalds /* 17861da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page() 17871da177e4SLinus Torvalds */ 17881da177e4SLinus Torvalds clear_buffer_dirty(bh); 17891da177e4SLinus Torvalds set_buffer_uptodate(bh); 179029a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 179129a814d2SAlex Tomas buffer_dirty(bh)) { 1792b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 17931da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 17941da177e4SLinus Torvalds if (err) 17951da177e4SLinus Torvalds goto recover; 179629a814d2SAlex Tomas clear_buffer_delay(bh); 17971da177e4SLinus Torvalds if (buffer_new(bh)) { 17981da177e4SLinus Torvalds /* blockdev mappings never come here */ 17991da177e4SLinus Torvalds clear_buffer_new(bh); 1800e64855c6SJan Kara clean_bdev_bh_alias(bh); 18011da177e4SLinus Torvalds } 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds bh = bh->b_this_page; 18041da177e4SLinus Torvalds block++; 18051da177e4SLinus Torvalds } while (bh != head); 18061da177e4SLinus Torvalds 18071da177e4SLinus Torvalds do { 18081da177e4SLinus Torvalds if (!buffer_mapped(bh)) 18091da177e4SLinus Torvalds continue; 18101da177e4SLinus Torvalds /* 18111da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 18121da177e4SLinus Torvalds * lock the buffer then redirty the page. Note that this can 18135b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads 18145b0830cbSJens Axboe * and kswapd activity, but those code paths have their own 18155b0830cbSJens Axboe * higher-level throttling. 18161da177e4SLinus Torvalds */ 18171b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) { 18181da177e4SLinus Torvalds lock_buffer(bh); 1819ca5de404SNick Piggin } else if (!trylock_buffer(bh)) { 18201da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page); 18211da177e4SLinus Torvalds continue; 18221da177e4SLinus Torvalds } 18231da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 182435c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 18251da177e4SLinus Torvalds } else { 18261da177e4SLinus Torvalds unlock_buffer(bh); 18271da177e4SLinus Torvalds } 18281da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 18291da177e4SLinus Torvalds 18301da177e4SLinus Torvalds /* 18311da177e4SLinus Torvalds * The page and its buffers are protected by PageWriteback(), so we can 18321da177e4SLinus Torvalds * drop the bh refcounts early. 18331da177e4SLinus Torvalds */ 18341da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 18351da177e4SLinus Torvalds set_page_writeback(page); 18361da177e4SLinus Torvalds 18371da177e4SLinus Torvalds do { 18381da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 18391da177e4SLinus Torvalds if (buffer_async_write(bh)) { 1840020c2833SEric Biggers submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 18411da177e4SLinus Torvalds nr_underway++; 1842ad576e63SNick Piggin } 18431da177e4SLinus Torvalds bh = next; 18441da177e4SLinus Torvalds } while (bh != head); 184505937baaSAndrew Morton unlock_page(page); 18461da177e4SLinus Torvalds 18471da177e4SLinus Torvalds err = 0; 18481da177e4SLinus Torvalds done: 18491da177e4SLinus Torvalds if (nr_underway == 0) { 18501da177e4SLinus Torvalds /* 18511da177e4SLinus Torvalds * The page was marked dirty, but the buffers were 18521da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 18531da177e4SLinus Torvalds * ll_rw_block/submit_bh. A rare case. 18541da177e4SLinus Torvalds */ 18551da177e4SLinus Torvalds end_page_writeback(page); 18563d67f2d7SNick Piggin 18571da177e4SLinus Torvalds /* 18581da177e4SLinus Torvalds * The page and buffer_heads can be released at any time from 18591da177e4SLinus Torvalds * here on. 18601da177e4SLinus Torvalds */ 18611da177e4SLinus Torvalds } 18621da177e4SLinus Torvalds return err; 18631da177e4SLinus Torvalds 18641da177e4SLinus Torvalds recover: 18651da177e4SLinus Torvalds /* 18661da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 18671da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 18681da177e4SLinus Torvalds * exposing stale data. 18691da177e4SLinus Torvalds * The page is currently locked and not marked for writeback 18701da177e4SLinus Torvalds */ 18711da177e4SLinus Torvalds bh = head; 18721da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 18731da177e4SLinus Torvalds do { 187429a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) && 187529a814d2SAlex Tomas !buffer_delay(bh)) { 18761da177e4SLinus Torvalds lock_buffer(bh); 187735c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 18781da177e4SLinus Torvalds } else { 18791da177e4SLinus Torvalds /* 18801da177e4SLinus Torvalds * The buffer may have been set dirty during 18811da177e4SLinus Torvalds * attachment to a dirty page. 18821da177e4SLinus Torvalds */ 18831da177e4SLinus Torvalds clear_buffer_dirty(bh); 18841da177e4SLinus Torvalds } 18851da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 18861da177e4SLinus Torvalds SetPageError(page); 18871da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 18887e4c3690SAndrew Morton mapping_set_error(page->mapping, err); 18891da177e4SLinus Torvalds set_page_writeback(page); 18901da177e4SLinus Torvalds do { 18911da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 18921da177e4SLinus Torvalds if (buffer_async_write(bh)) { 18931da177e4SLinus Torvalds clear_buffer_dirty(bh); 1894020c2833SEric Biggers submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 18951da177e4SLinus Torvalds nr_underway++; 1896ad576e63SNick Piggin } 18971da177e4SLinus Torvalds bh = next; 18981da177e4SLinus Torvalds } while (bh != head); 1899ffda9d30SNick Piggin unlock_page(page); 19001da177e4SLinus Torvalds goto done; 19011da177e4SLinus Torvalds } 1902b4bba389SBenjamin Marzinski EXPORT_SYMBOL(__block_write_full_page); 19031da177e4SLinus Torvalds 1904afddba49SNick Piggin /* 1905afddba49SNick Piggin * If a page has any new buffers, zero them out here, and mark them uptodate 1906afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised 1907afddba49SNick Piggin * block data from leaking). And clear the new bit. 1908afddba49SNick Piggin */ 1909afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1910afddba49SNick Piggin { 1911afddba49SNick Piggin unsigned int block_start, block_end; 1912afddba49SNick Piggin struct buffer_head *head, *bh; 1913afddba49SNick Piggin 1914afddba49SNick Piggin BUG_ON(!PageLocked(page)); 1915afddba49SNick Piggin if (!page_has_buffers(page)) 1916afddba49SNick Piggin return; 1917afddba49SNick Piggin 1918afddba49SNick Piggin bh = head = page_buffers(page); 1919afddba49SNick Piggin block_start = 0; 1920afddba49SNick Piggin do { 1921afddba49SNick Piggin block_end = block_start + bh->b_size; 1922afddba49SNick Piggin 1923afddba49SNick Piggin if (buffer_new(bh)) { 1924afddba49SNick Piggin if (block_end > from && block_start < to) { 1925afddba49SNick Piggin if (!PageUptodate(page)) { 1926afddba49SNick Piggin unsigned start, size; 1927afddba49SNick Piggin 1928afddba49SNick Piggin start = max(from, block_start); 1929afddba49SNick Piggin size = min(to, block_end) - start; 1930afddba49SNick Piggin 1931eebd2aa3SChristoph Lameter zero_user(page, start, size); 1932afddba49SNick Piggin set_buffer_uptodate(bh); 1933afddba49SNick Piggin } 1934afddba49SNick Piggin 1935afddba49SNick Piggin clear_buffer_new(bh); 1936afddba49SNick Piggin mark_buffer_dirty(bh); 1937afddba49SNick Piggin } 1938afddba49SNick Piggin } 1939afddba49SNick Piggin 1940afddba49SNick Piggin block_start = block_end; 1941afddba49SNick Piggin bh = bh->b_this_page; 1942afddba49SNick Piggin } while (bh != head); 1943afddba49SNick Piggin } 1944afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers); 1945afddba49SNick Piggin 1946ae259a9cSChristoph Hellwig static void 1947ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 1948ae259a9cSChristoph Hellwig struct iomap *iomap) 1949ae259a9cSChristoph Hellwig { 1950ae259a9cSChristoph Hellwig loff_t offset = block << inode->i_blkbits; 1951ae259a9cSChristoph Hellwig 1952ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev; 1953ae259a9cSChristoph Hellwig 1954ae259a9cSChristoph Hellwig /* 1955ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains 1956ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the 1957ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller 1958ae259a9cSChristoph Hellwig * handle it. 1959ae259a9cSChristoph Hellwig */ 1960ae259a9cSChristoph Hellwig BUG_ON(offset >= iomap->offset + iomap->length); 1961ae259a9cSChristoph Hellwig 1962ae259a9cSChristoph Hellwig switch (iomap->type) { 1963ae259a9cSChristoph Hellwig case IOMAP_HOLE: 1964ae259a9cSChristoph Hellwig /* 1965ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF, 1966ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is 1967ae259a9cSChristoph Hellwig * executed if necessary. 1968ae259a9cSChristoph Hellwig */ 1969ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 1970ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 1971ae259a9cSChristoph Hellwig set_buffer_new(bh); 1972ae259a9cSChristoph Hellwig break; 1973ae259a9cSChristoph Hellwig case IOMAP_DELALLOC: 1974ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 1975ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 1976ae259a9cSChristoph Hellwig set_buffer_new(bh); 1977ae259a9cSChristoph Hellwig set_buffer_uptodate(bh); 1978ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 1979ae259a9cSChristoph Hellwig set_buffer_delay(bh); 1980ae259a9cSChristoph Hellwig break; 1981ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN: 1982ae259a9cSChristoph Hellwig /* 1983ae259a9cSChristoph Hellwig * For unwritten regions, we always need to ensure that 1984ae259a9cSChristoph Hellwig * sub-block writes cause the regions in the block we are not 1985ae259a9cSChristoph Hellwig * writing to are zeroed. Set the buffer as new to ensure this. 1986ae259a9cSChristoph Hellwig */ 1987ae259a9cSChristoph Hellwig set_buffer_new(bh); 1988ae259a9cSChristoph Hellwig set_buffer_unwritten(bh); 1989ae259a9cSChristoph Hellwig /* FALLTHRU */ 1990ae259a9cSChristoph Hellwig case IOMAP_MAPPED: 1991ae259a9cSChristoph Hellwig if (offset >= i_size_read(inode)) 1992ae259a9cSChristoph Hellwig set_buffer_new(bh); 1993ae259a9cSChristoph Hellwig bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) + 1994ae259a9cSChristoph Hellwig ((offset - iomap->offset) >> inode->i_blkbits); 1995ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 1996ae259a9cSChristoph Hellwig break; 1997ae259a9cSChristoph Hellwig } 1998ae259a9cSChristoph Hellwig } 1999ae259a9cSChristoph Hellwig 2000ae259a9cSChristoph Hellwig int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, 2001ae259a9cSChristoph Hellwig get_block_t *get_block, struct iomap *iomap) 20021da177e4SLinus Torvalds { 200309cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2004ebdec241SChristoph Hellwig unsigned to = from + len; 20056e1db88dSChristoph Hellwig struct inode *inode = page->mapping->host; 20061da177e4SLinus Torvalds unsigned block_start, block_end; 20071da177e4SLinus Torvalds sector_t block; 20081da177e4SLinus Torvalds int err = 0; 20091da177e4SLinus Torvalds unsigned blocksize, bbits; 20101da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 20111da177e4SLinus Torvalds 20121da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 201309cbfeafSKirill A. Shutemov BUG_ON(from > PAGE_SIZE); 201409cbfeafSKirill A. Shutemov BUG_ON(to > PAGE_SIZE); 20151da177e4SLinus Torvalds BUG_ON(from > to); 20161da177e4SLinus Torvalds 201745bce8f3SLinus Torvalds head = create_page_buffers(page, inode, 0); 201845bce8f3SLinus Torvalds blocksize = head->b_size; 201945bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 20201da177e4SLinus Torvalds 202109cbfeafSKirill A. Shutemov block = (sector_t)page->index << (PAGE_SHIFT - bbits); 20221da177e4SLinus Torvalds 20231da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start; 20241da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 20251da177e4SLinus Torvalds block_end = block_start + blocksize; 20261da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 20271da177e4SLinus Torvalds if (PageUptodate(page)) { 20281da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 20291da177e4SLinus Torvalds set_buffer_uptodate(bh); 20301da177e4SLinus Torvalds } 20311da177e4SLinus Torvalds continue; 20321da177e4SLinus Torvalds } 20331da177e4SLinus Torvalds if (buffer_new(bh)) 20341da177e4SLinus Torvalds clear_buffer_new(bh); 20351da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2036b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2037ae259a9cSChristoph Hellwig if (get_block) { 20381da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 20391da177e4SLinus Torvalds if (err) 2040f3ddbdc6SNick Piggin break; 2041ae259a9cSChristoph Hellwig } else { 2042ae259a9cSChristoph Hellwig iomap_to_bh(inode, block, bh, iomap); 2043ae259a9cSChristoph Hellwig } 2044ae259a9cSChristoph Hellwig 20451da177e4SLinus Torvalds if (buffer_new(bh)) { 2046e64855c6SJan Kara clean_bdev_bh_alias(bh); 20471da177e4SLinus Torvalds if (PageUptodate(page)) { 2048637aff46SNick Piggin clear_buffer_new(bh); 20491da177e4SLinus Torvalds set_buffer_uptodate(bh); 2050637aff46SNick Piggin mark_buffer_dirty(bh); 20511da177e4SLinus Torvalds continue; 20521da177e4SLinus Torvalds } 2053eebd2aa3SChristoph Lameter if (block_end > to || block_start < from) 2054eebd2aa3SChristoph Lameter zero_user_segments(page, 2055eebd2aa3SChristoph Lameter to, block_end, 2056eebd2aa3SChristoph Lameter block_start, from); 20571da177e4SLinus Torvalds continue; 20581da177e4SLinus Torvalds } 20591da177e4SLinus Torvalds } 20601da177e4SLinus Torvalds if (PageUptodate(page)) { 20611da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 20621da177e4SLinus Torvalds set_buffer_uptodate(bh); 20631da177e4SLinus Torvalds continue; 20641da177e4SLinus Torvalds } 20651da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 206633a266ddSDavid Chinner !buffer_unwritten(bh) && 20671da177e4SLinus Torvalds (block_start < from || block_end > to)) { 2068dfec8a14SMike Christie ll_rw_block(REQ_OP_READ, 0, 1, &bh); 20691da177e4SLinus Torvalds *wait_bh++=bh; 20701da177e4SLinus Torvalds } 20711da177e4SLinus Torvalds } 20721da177e4SLinus Torvalds /* 20731da177e4SLinus Torvalds * If we issued read requests - let them complete. 20741da177e4SLinus Torvalds */ 20751da177e4SLinus Torvalds while(wait_bh > wait) { 20761da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 20771da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 2078f3ddbdc6SNick Piggin err = -EIO; 20791da177e4SLinus Torvalds } 2080f9f07b6cSJan Kara if (unlikely(err)) 2081afddba49SNick Piggin page_zero_new_buffers(page, from, to); 20821da177e4SLinus Torvalds return err; 20831da177e4SLinus Torvalds } 2084ae259a9cSChristoph Hellwig 2085ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2086ae259a9cSChristoph Hellwig get_block_t *get_block) 2087ae259a9cSChristoph Hellwig { 2088ae259a9cSChristoph Hellwig return __block_write_begin_int(page, pos, len, get_block, NULL); 2089ae259a9cSChristoph Hellwig } 2090ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin); 20911da177e4SLinus Torvalds 20921da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page, 20931da177e4SLinus Torvalds unsigned from, unsigned to) 20941da177e4SLinus Torvalds { 20951da177e4SLinus Torvalds unsigned block_start, block_end; 20961da177e4SLinus Torvalds int partial = 0; 20971da177e4SLinus Torvalds unsigned blocksize; 20981da177e4SLinus Torvalds struct buffer_head *bh, *head; 20991da177e4SLinus Torvalds 210045bce8f3SLinus Torvalds bh = head = page_buffers(page); 210145bce8f3SLinus Torvalds blocksize = bh->b_size; 21021da177e4SLinus Torvalds 210345bce8f3SLinus Torvalds block_start = 0; 210445bce8f3SLinus Torvalds do { 21051da177e4SLinus Torvalds block_end = block_start + blocksize; 21061da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 21071da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21081da177e4SLinus Torvalds partial = 1; 21091da177e4SLinus Torvalds } else { 21101da177e4SLinus Torvalds set_buffer_uptodate(bh); 21111da177e4SLinus Torvalds mark_buffer_dirty(bh); 21121da177e4SLinus Torvalds } 2113afddba49SNick Piggin clear_buffer_new(bh); 211445bce8f3SLinus Torvalds 211545bce8f3SLinus Torvalds block_start = block_end; 211645bce8f3SLinus Torvalds bh = bh->b_this_page; 211745bce8f3SLinus Torvalds } while (bh != head); 21181da177e4SLinus Torvalds 21191da177e4SLinus Torvalds /* 21201da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 21211da177e4SLinus Torvalds * uptodate then we can optimize away a bogus readpage() for 21221da177e4SLinus Torvalds * the next read(). Here we 'discover' whether the page went 21231da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 21241da177e4SLinus Torvalds */ 21251da177e4SLinus Torvalds if (!partial) 21261da177e4SLinus Torvalds SetPageUptodate(page); 21271da177e4SLinus Torvalds return 0; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds 21301da177e4SLinus Torvalds /* 2131155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and 2132155130a4SChristoph Hellwig * bringing partial write blocks uptodate first. 2133155130a4SChristoph Hellwig * 21347bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 2135afddba49SNick Piggin */ 2136155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2137155130a4SChristoph Hellwig unsigned flags, struct page **pagep, get_block_t *get_block) 2138afddba49SNick Piggin { 213909cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2140afddba49SNick Piggin struct page *page; 21416e1db88dSChristoph Hellwig int status; 2142afddba49SNick Piggin 214354566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 21446e1db88dSChristoph Hellwig if (!page) 21456e1db88dSChristoph Hellwig return -ENOMEM; 2146afddba49SNick Piggin 21476e1db88dSChristoph Hellwig status = __block_write_begin(page, pos, len, get_block); 2148afddba49SNick Piggin if (unlikely(status)) { 2149afddba49SNick Piggin unlock_page(page); 215009cbfeafSKirill A. Shutemov put_page(page); 21516e1db88dSChristoph Hellwig page = NULL; 2152afddba49SNick Piggin } 2153afddba49SNick Piggin 21546e1db88dSChristoph Hellwig *pagep = page; 2155afddba49SNick Piggin return status; 2156afddba49SNick Piggin } 2157afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin); 2158afddba49SNick Piggin 2159afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping, 2160afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2161afddba49SNick Piggin struct page *page, void *fsdata) 2162afddba49SNick Piggin { 2163afddba49SNick Piggin struct inode *inode = mapping->host; 2164afddba49SNick Piggin unsigned start; 2165afddba49SNick Piggin 216609cbfeafSKirill A. Shutemov start = pos & (PAGE_SIZE - 1); 2167afddba49SNick Piggin 2168afddba49SNick Piggin if (unlikely(copied < len)) { 2169afddba49SNick Piggin /* 2170afddba49SNick Piggin * The buffers that were written will now be uptodate, so we 2171afddba49SNick Piggin * don't have to worry about a readpage reading them and 2172afddba49SNick Piggin * overwriting a partial write. However if we have encountered 2173afddba49SNick Piggin * a short write and only partially written into a buffer, it 2174afddba49SNick Piggin * will not be marked uptodate, so a readpage might come in and 2175afddba49SNick Piggin * destroy our partial write. 2176afddba49SNick Piggin * 2177afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a 2178afddba49SNick Piggin * non uptodate page as a zero-length write, and force the 2179afddba49SNick Piggin * caller to redo the whole thing. 2180afddba49SNick Piggin */ 2181afddba49SNick Piggin if (!PageUptodate(page)) 2182afddba49SNick Piggin copied = 0; 2183afddba49SNick Piggin 2184afddba49SNick Piggin page_zero_new_buffers(page, start+copied, start+len); 2185afddba49SNick Piggin } 2186afddba49SNick Piggin flush_dcache_page(page); 2187afddba49SNick Piggin 2188afddba49SNick Piggin /* This could be a short (even 0-length) commit */ 2189afddba49SNick Piggin __block_commit_write(inode, page, start, start+copied); 2190afddba49SNick Piggin 2191afddba49SNick Piggin return copied; 2192afddba49SNick Piggin } 2193afddba49SNick Piggin EXPORT_SYMBOL(block_write_end); 2194afddba49SNick Piggin 2195afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping, 2196afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2197afddba49SNick Piggin struct page *page, void *fsdata) 2198afddba49SNick Piggin { 2199afddba49SNick Piggin struct inode *inode = mapping->host; 220090a80202SJan Kara loff_t old_size = inode->i_size; 2201c7d206b3SJan Kara int i_size_changed = 0; 2202afddba49SNick Piggin 2203afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2204afddba49SNick Piggin 2205afddba49SNick Piggin /* 2206afddba49SNick Piggin * No need to use i_size_read() here, the i_size 2207afddba49SNick Piggin * cannot change under us because we hold i_mutex. 2208afddba49SNick Piggin * 2209afddba49SNick Piggin * But it's important to update i_size while still holding page lock: 2210afddba49SNick Piggin * page writeout could otherwise come in and zero beyond i_size. 2211afddba49SNick Piggin */ 2212afddba49SNick Piggin if (pos+copied > inode->i_size) { 2213afddba49SNick Piggin i_size_write(inode, pos+copied); 2214c7d206b3SJan Kara i_size_changed = 1; 2215afddba49SNick Piggin } 2216afddba49SNick Piggin 2217afddba49SNick Piggin unlock_page(page); 221809cbfeafSKirill A. Shutemov put_page(page); 2219afddba49SNick Piggin 222090a80202SJan Kara if (old_size < pos) 222190a80202SJan Kara pagecache_isize_extended(inode, old_size, pos); 2222c7d206b3SJan Kara /* 2223c7d206b3SJan Kara * Don't mark the inode dirty under page lock. First, it unnecessarily 2224c7d206b3SJan Kara * makes the holding time of page lock longer. Second, it forces lock 2225c7d206b3SJan Kara * ordering of page lock and transaction start for journaling 2226c7d206b3SJan Kara * filesystems. 2227c7d206b3SJan Kara */ 2228c7d206b3SJan Kara if (i_size_changed) 2229c7d206b3SJan Kara mark_inode_dirty(inode); 2230c7d206b3SJan Kara 2231afddba49SNick Piggin return copied; 2232afddba49SNick Piggin } 2233afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end); 2234afddba49SNick Piggin 2235afddba49SNick Piggin /* 22368ab22b9aSHisashi Hifumi * block_is_partially_uptodate checks whether buffers within a page are 22378ab22b9aSHisashi Hifumi * uptodate or not. 22388ab22b9aSHisashi Hifumi * 22398ab22b9aSHisashi Hifumi * Returns true if all buffers which correspond to a file portion 22408ab22b9aSHisashi Hifumi * we want to read are uptodate. 22418ab22b9aSHisashi Hifumi */ 2242c186afb4SAl Viro int block_is_partially_uptodate(struct page *page, unsigned long from, 2243c186afb4SAl Viro unsigned long count) 22448ab22b9aSHisashi Hifumi { 22458ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize; 22468ab22b9aSHisashi Hifumi unsigned to; 22478ab22b9aSHisashi Hifumi struct buffer_head *bh, *head; 22488ab22b9aSHisashi Hifumi int ret = 1; 22498ab22b9aSHisashi Hifumi 22508ab22b9aSHisashi Hifumi if (!page_has_buffers(page)) 22518ab22b9aSHisashi Hifumi return 0; 22528ab22b9aSHisashi Hifumi 225345bce8f3SLinus Torvalds head = page_buffers(page); 225445bce8f3SLinus Torvalds blocksize = head->b_size; 225509cbfeafSKirill A. Shutemov to = min_t(unsigned, PAGE_SIZE - from, count); 22568ab22b9aSHisashi Hifumi to = from + to; 225709cbfeafSKirill A. Shutemov if (from < blocksize && to > PAGE_SIZE - blocksize) 22588ab22b9aSHisashi Hifumi return 0; 22598ab22b9aSHisashi Hifumi 22608ab22b9aSHisashi Hifumi bh = head; 22618ab22b9aSHisashi Hifumi block_start = 0; 22628ab22b9aSHisashi Hifumi do { 22638ab22b9aSHisashi Hifumi block_end = block_start + blocksize; 22648ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) { 22658ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) { 22668ab22b9aSHisashi Hifumi ret = 0; 22678ab22b9aSHisashi Hifumi break; 22688ab22b9aSHisashi Hifumi } 22698ab22b9aSHisashi Hifumi if (block_end >= to) 22708ab22b9aSHisashi Hifumi break; 22718ab22b9aSHisashi Hifumi } 22728ab22b9aSHisashi Hifumi block_start = block_end; 22738ab22b9aSHisashi Hifumi bh = bh->b_this_page; 22748ab22b9aSHisashi Hifumi } while (bh != head); 22758ab22b9aSHisashi Hifumi 22768ab22b9aSHisashi Hifumi return ret; 22778ab22b9aSHisashi Hifumi } 22788ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate); 22798ab22b9aSHisashi Hifumi 22808ab22b9aSHisashi Hifumi /* 22811da177e4SLinus Torvalds * Generic "read page" function for block devices that have the normal 22821da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 22831da177e4SLinus Torvalds * Reads the page asynchronously --- the unlock_buffer() and 22841da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 22851da177e4SLinus Torvalds * page struct once IO has completed. 22861da177e4SLinus Torvalds */ 22871da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block) 22881da177e4SLinus Torvalds { 22891da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 22901da177e4SLinus Torvalds sector_t iblock, lblock; 22911da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 229245bce8f3SLinus Torvalds unsigned int blocksize, bbits; 22931da177e4SLinus Torvalds int nr, i; 22941da177e4SLinus Torvalds int fully_mapped = 1; 22951da177e4SLinus Torvalds 229645bce8f3SLinus Torvalds head = create_page_buffers(page, inode, 0); 229745bce8f3SLinus Torvalds blocksize = head->b_size; 229845bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 22991da177e4SLinus Torvalds 230009cbfeafSKirill A. Shutemov iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); 230145bce8f3SLinus Torvalds lblock = (i_size_read(inode)+blocksize-1) >> bbits; 23021da177e4SLinus Torvalds bh = head; 23031da177e4SLinus Torvalds nr = 0; 23041da177e4SLinus Torvalds i = 0; 23051da177e4SLinus Torvalds 23061da177e4SLinus Torvalds do { 23071da177e4SLinus Torvalds if (buffer_uptodate(bh)) 23081da177e4SLinus Torvalds continue; 23091da177e4SLinus Torvalds 23101da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2311c64610baSAndrew Morton int err = 0; 2312c64610baSAndrew Morton 23131da177e4SLinus Torvalds fully_mapped = 0; 23141da177e4SLinus Torvalds if (iblock < lblock) { 2315b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2316c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 2317c64610baSAndrew Morton if (err) 23181da177e4SLinus Torvalds SetPageError(page); 23191da177e4SLinus Torvalds } 23201da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2321eebd2aa3SChristoph Lameter zero_user(page, i * blocksize, blocksize); 2322c64610baSAndrew Morton if (!err) 23231da177e4SLinus Torvalds set_buffer_uptodate(bh); 23241da177e4SLinus Torvalds continue; 23251da177e4SLinus Torvalds } 23261da177e4SLinus Torvalds /* 23271da177e4SLinus Torvalds * get_block() might have updated the buffer 23281da177e4SLinus Torvalds * synchronously 23291da177e4SLinus Torvalds */ 23301da177e4SLinus Torvalds if (buffer_uptodate(bh)) 23311da177e4SLinus Torvalds continue; 23321da177e4SLinus Torvalds } 23331da177e4SLinus Torvalds arr[nr++] = bh; 23341da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 23351da177e4SLinus Torvalds 23361da177e4SLinus Torvalds if (fully_mapped) 23371da177e4SLinus Torvalds SetPageMappedToDisk(page); 23381da177e4SLinus Torvalds 23391da177e4SLinus Torvalds if (!nr) { 23401da177e4SLinus Torvalds /* 23411da177e4SLinus Torvalds * All buffers are uptodate - we can set the page uptodate 23421da177e4SLinus Torvalds * as well. But not if get_block() returned an error. 23431da177e4SLinus Torvalds */ 23441da177e4SLinus Torvalds if (!PageError(page)) 23451da177e4SLinus Torvalds SetPageUptodate(page); 23461da177e4SLinus Torvalds unlock_page(page); 23471da177e4SLinus Torvalds return 0; 23481da177e4SLinus Torvalds } 23491da177e4SLinus Torvalds 23501da177e4SLinus Torvalds /* Stage two: lock the buffers */ 23511da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 23521da177e4SLinus Torvalds bh = arr[i]; 23531da177e4SLinus Torvalds lock_buffer(bh); 23541da177e4SLinus Torvalds mark_buffer_async_read(bh); 23551da177e4SLinus Torvalds } 23561da177e4SLinus Torvalds 23571da177e4SLinus Torvalds /* 23581da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 23591da177e4SLinus Torvalds * inside the buffer lock in case another process reading 23601da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 23611da177e4SLinus Torvalds */ 23621da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 23631da177e4SLinus Torvalds bh = arr[i]; 23641da177e4SLinus Torvalds if (buffer_uptodate(bh)) 23651da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 23661da177e4SLinus Torvalds else 23672a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds return 0; 23701da177e4SLinus Torvalds } 23711fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_read_full_page); 23721da177e4SLinus Torvalds 23731da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 237489e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to 23751da177e4SLinus Torvalds * deal with the hole. 23761da177e4SLinus Torvalds */ 237789e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size) 23781da177e4SLinus Torvalds { 23791da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 23801da177e4SLinus Torvalds struct page *page; 238189e10787SNick Piggin void *fsdata; 23821da177e4SLinus Torvalds int err; 23831da177e4SLinus Torvalds 2384c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size); 2385c08d3b0eSnpiggin@suse.de if (err) 23861da177e4SLinus Torvalds goto out; 23871da177e4SLinus Torvalds 238889e10787SNick Piggin err = pagecache_write_begin(NULL, mapping, size, 0, 2389c718a975STetsuo Handa AOP_FLAG_CONT_EXPAND, &page, &fsdata); 239089e10787SNick Piggin if (err) 239105eb0b51SOGAWA Hirofumi goto out; 239205eb0b51SOGAWA Hirofumi 239389e10787SNick Piggin err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); 239489e10787SNick Piggin BUG_ON(err > 0); 239505eb0b51SOGAWA Hirofumi 239605eb0b51SOGAWA Hirofumi out: 239705eb0b51SOGAWA Hirofumi return err; 239805eb0b51SOGAWA Hirofumi } 23991fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple); 240005eb0b51SOGAWA Hirofumi 2401f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping, 240289e10787SNick Piggin loff_t pos, loff_t *bytes) 240305eb0b51SOGAWA Hirofumi { 240489e10787SNick Piggin struct inode *inode = mapping->host; 240593407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 240689e10787SNick Piggin struct page *page; 240789e10787SNick Piggin void *fsdata; 240889e10787SNick Piggin pgoff_t index, curidx; 240989e10787SNick Piggin loff_t curpos; 241089e10787SNick Piggin unsigned zerofrom, offset, len; 241189e10787SNick Piggin int err = 0; 241205eb0b51SOGAWA Hirofumi 241309cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 241409cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK; 241589e10787SNick Piggin 241609cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 241709cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 241889e10787SNick Piggin if (zerofrom & (blocksize-1)) { 241989e10787SNick Piggin *bytes |= (blocksize-1); 242089e10787SNick Piggin (*bytes)++; 242189e10787SNick Piggin } 242209cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom; 242389e10787SNick Piggin 2424c718a975STetsuo Handa err = pagecache_write_begin(file, mapping, curpos, len, 0, 242589e10787SNick Piggin &page, &fsdata); 242689e10787SNick Piggin if (err) 242789e10787SNick Piggin goto out; 2428eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 242989e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 243089e10787SNick Piggin page, fsdata); 243189e10787SNick Piggin if (err < 0) 243289e10787SNick Piggin goto out; 243389e10787SNick Piggin BUG_ON(err != len); 243489e10787SNick Piggin err = 0; 2435061e9746SOGAWA Hirofumi 2436061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping); 2437c2ca0fcdSMikulas Patocka 2438c2ca0fcdSMikulas Patocka if (unlikely(fatal_signal_pending(current))) { 2439c2ca0fcdSMikulas Patocka err = -EINTR; 2440c2ca0fcdSMikulas Patocka goto out; 2441c2ca0fcdSMikulas Patocka } 244289e10787SNick Piggin } 244389e10787SNick Piggin 244489e10787SNick Piggin /* page covers the boundary, find the boundary offset */ 244589e10787SNick Piggin if (index == curidx) { 244609cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 244789e10787SNick Piggin /* if we will expand the thing last block will be filled */ 244889e10787SNick Piggin if (offset <= zerofrom) { 244989e10787SNick Piggin goto out; 245089e10787SNick Piggin } 245189e10787SNick Piggin if (zerofrom & (blocksize-1)) { 245289e10787SNick Piggin *bytes |= (blocksize-1); 245389e10787SNick Piggin (*bytes)++; 245489e10787SNick Piggin } 245589e10787SNick Piggin len = offset - zerofrom; 245689e10787SNick Piggin 2457c718a975STetsuo Handa err = pagecache_write_begin(file, mapping, curpos, len, 0, 245889e10787SNick Piggin &page, &fsdata); 245989e10787SNick Piggin if (err) 246089e10787SNick Piggin goto out; 2461eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 246289e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 246389e10787SNick Piggin page, fsdata); 246489e10787SNick Piggin if (err < 0) 246589e10787SNick Piggin goto out; 246689e10787SNick Piggin BUG_ON(err != len); 246789e10787SNick Piggin err = 0; 246889e10787SNick Piggin } 246989e10787SNick Piggin out: 247089e10787SNick Piggin return err; 24711da177e4SLinus Torvalds } 24721da177e4SLinus Torvalds 24731da177e4SLinus Torvalds /* 24741da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 24751da177e4SLinus Torvalds * We may have to extend the file. 24761da177e4SLinus Torvalds */ 2477282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping, 247889e10787SNick Piggin loff_t pos, unsigned len, unsigned flags, 247989e10787SNick Piggin struct page **pagep, void **fsdata, 248089e10787SNick Piggin get_block_t *get_block, loff_t *bytes) 24811da177e4SLinus Torvalds { 24821da177e4SLinus Torvalds struct inode *inode = mapping->host; 248393407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 248493407472SFabian Frederick unsigned int zerofrom; 248589e10787SNick Piggin int err; 24861da177e4SLinus Torvalds 248789e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes); 248889e10787SNick Piggin if (err) 2489155130a4SChristoph Hellwig return err; 24901da177e4SLinus Torvalds 249109cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK; 249289e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) { 24931da177e4SLinus Torvalds *bytes |= (blocksize-1); 24941da177e4SLinus Torvalds (*bytes)++; 24951da177e4SLinus Torvalds } 24961da177e4SLinus Torvalds 2497155130a4SChristoph Hellwig return block_write_begin(mapping, pos, len, flags, pagep, get_block); 24981da177e4SLinus Torvalds } 24991fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin); 25001da177e4SLinus Torvalds 25011da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to) 25021da177e4SLinus Torvalds { 25031da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 25041da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 25051da177e4SLinus Torvalds return 0; 25061da177e4SLinus Torvalds } 25071fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write); 25081da177e4SLinus Torvalds 250954171690SDavid Chinner /* 251054171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets 251154171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must 251254171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly 251354171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into 251454171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that 251554171690SDavid Chinner * support these features. 251654171690SDavid Chinner * 251754171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to 251854171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because 25197bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the 252054171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not 252154171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we 252254171690SDavid Chinner * unlock the page. 2523ea13a864SJan Kara * 252414da9200SJan Kara * Direct callers of this function should protect against filesystem freezing 25255c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions. 252654171690SDavid Chinner */ 25275c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 252854171690SDavid Chinner get_block_t get_block) 252954171690SDavid Chinner { 2530c2ec175cSNick Piggin struct page *page = vmf->page; 2531496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 253254171690SDavid Chinner unsigned long end; 253354171690SDavid Chinner loff_t size; 253424da4fabSJan Kara int ret; 253554171690SDavid Chinner 253654171690SDavid Chinner lock_page(page); 253754171690SDavid Chinner size = i_size_read(inode); 253854171690SDavid Chinner if ((page->mapping != inode->i_mapping) || 253918336338SNick Piggin (page_offset(page) > size)) { 254024da4fabSJan Kara /* We overload EFAULT to mean page got truncated */ 254124da4fabSJan Kara ret = -EFAULT; 254224da4fabSJan Kara goto out_unlock; 254354171690SDavid Chinner } 254454171690SDavid Chinner 254554171690SDavid Chinner /* page is wholly or partially inside EOF */ 254609cbfeafSKirill A. Shutemov if (((page->index + 1) << PAGE_SHIFT) > size) 254709cbfeafSKirill A. Shutemov end = size & ~PAGE_MASK; 254854171690SDavid Chinner else 254909cbfeafSKirill A. Shutemov end = PAGE_SIZE; 255054171690SDavid Chinner 2551ebdec241SChristoph Hellwig ret = __block_write_begin(page, 0, end, get_block); 255254171690SDavid Chinner if (!ret) 255354171690SDavid Chinner ret = block_commit_write(page, 0, end); 255454171690SDavid Chinner 255524da4fabSJan Kara if (unlikely(ret < 0)) 255624da4fabSJan Kara goto out_unlock; 2557ea13a864SJan Kara set_page_dirty(page); 25581d1d1a76SDarrick J. Wong wait_for_stable_page(page); 255924da4fabSJan Kara return 0; 256024da4fabSJan Kara out_unlock: 2561b827e496SNick Piggin unlock_page(page); 256254171690SDavid Chinner return ret; 256354171690SDavid Chinner } 25641fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite); 25651da177e4SLinus Torvalds 25661da177e4SLinus Torvalds /* 256703158cd7SNick Piggin * nobh_write_begin()'s prereads are special: the buffer_heads are freed 25681da177e4SLinus Torvalds * immediately, while under the page lock. So it needs a special end_io 25691da177e4SLinus Torvalds * handler which does not touch the bh after unlocking it. 25701da177e4SLinus Torvalds */ 25711da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 25721da177e4SLinus Torvalds { 257368671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 25741da177e4SLinus Torvalds } 25751da177e4SLinus Torvalds 25761da177e4SLinus Torvalds /* 257703158cd7SNick Piggin * Attach the singly-linked list of buffers created by nobh_write_begin, to 257803158cd7SNick Piggin * the page (converting it to circular linked list and taking care of page 257903158cd7SNick Piggin * dirty races). 258003158cd7SNick Piggin */ 258103158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head) 258203158cd7SNick Piggin { 258303158cd7SNick Piggin struct buffer_head *bh; 258403158cd7SNick Piggin 258503158cd7SNick Piggin BUG_ON(!PageLocked(page)); 258603158cd7SNick Piggin 258703158cd7SNick Piggin spin_lock(&page->mapping->private_lock); 258803158cd7SNick Piggin bh = head; 258903158cd7SNick Piggin do { 259003158cd7SNick Piggin if (PageDirty(page)) 259103158cd7SNick Piggin set_buffer_dirty(bh); 259203158cd7SNick Piggin if (!bh->b_this_page) 259303158cd7SNick Piggin bh->b_this_page = head; 259403158cd7SNick Piggin bh = bh->b_this_page; 259503158cd7SNick Piggin } while (bh != head); 259603158cd7SNick Piggin attach_page_buffers(page, head); 259703158cd7SNick Piggin spin_unlock(&page->mapping->private_lock); 259803158cd7SNick Piggin } 259903158cd7SNick Piggin 260003158cd7SNick Piggin /* 2601ea0f04e5SChristoph Hellwig * On entry, the page is fully not uptodate. 2602ea0f04e5SChristoph Hellwig * On exit the page is fully uptodate in the areas outside (from,to) 26037bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 26041da177e4SLinus Torvalds */ 2605ea0f04e5SChristoph Hellwig int nobh_write_begin(struct address_space *mapping, 260603158cd7SNick Piggin loff_t pos, unsigned len, unsigned flags, 260703158cd7SNick Piggin struct page **pagep, void **fsdata, 26081da177e4SLinus Torvalds get_block_t *get_block) 26091da177e4SLinus Torvalds { 261003158cd7SNick Piggin struct inode *inode = mapping->host; 26111da177e4SLinus Torvalds const unsigned blkbits = inode->i_blkbits; 26121da177e4SLinus Torvalds const unsigned blocksize = 1 << blkbits; 2613a4b0672dSNick Piggin struct buffer_head *head, *bh; 261403158cd7SNick Piggin struct page *page; 261503158cd7SNick Piggin pgoff_t index; 261603158cd7SNick Piggin unsigned from, to; 26171da177e4SLinus Torvalds unsigned block_in_page; 2618a4b0672dSNick Piggin unsigned block_start, block_end; 26191da177e4SLinus Torvalds sector_t block_in_file; 26201da177e4SLinus Torvalds int nr_reads = 0; 26211da177e4SLinus Torvalds int ret = 0; 26221da177e4SLinus Torvalds int is_mapped_to_disk = 1; 26231da177e4SLinus Torvalds 262409cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 262509cbfeafSKirill A. Shutemov from = pos & (PAGE_SIZE - 1); 262603158cd7SNick Piggin to = from + len; 262703158cd7SNick Piggin 262854566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 262903158cd7SNick Piggin if (!page) 263003158cd7SNick Piggin return -ENOMEM; 263103158cd7SNick Piggin *pagep = page; 263203158cd7SNick Piggin *fsdata = NULL; 263303158cd7SNick Piggin 263403158cd7SNick Piggin if (page_has_buffers(page)) { 2635309f77adSNamhyung Kim ret = __block_write_begin(page, pos, len, get_block); 2636309f77adSNamhyung Kim if (unlikely(ret)) 2637309f77adSNamhyung Kim goto out_release; 2638309f77adSNamhyung Kim return ret; 263903158cd7SNick Piggin } 2640a4b0672dSNick Piggin 26411da177e4SLinus Torvalds if (PageMappedToDisk(page)) 26421da177e4SLinus Torvalds return 0; 26431da177e4SLinus Torvalds 2644a4b0672dSNick Piggin /* 2645a4b0672dSNick Piggin * Allocate buffers so that we can keep track of state, and potentially 2646a4b0672dSNick Piggin * attach them to the page if an error occurs. In the common case of 2647a4b0672dSNick Piggin * no error, they will just be freed again without ever being attached 2648a4b0672dSNick Piggin * to the page (which is all OK, because we're under the page lock). 2649a4b0672dSNick Piggin * 2650a4b0672dSNick Piggin * Be careful: the buffer linked list is a NULL terminated one, rather 2651a4b0672dSNick Piggin * than the circular one we're used to. 2652a4b0672dSNick Piggin */ 2653a4b0672dSNick Piggin head = alloc_page_buffers(page, blocksize, 0); 265403158cd7SNick Piggin if (!head) { 265503158cd7SNick Piggin ret = -ENOMEM; 265603158cd7SNick Piggin goto out_release; 265703158cd7SNick Piggin } 2658a4b0672dSNick Piggin 265909cbfeafSKirill A. Shutemov block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 26601da177e4SLinus Torvalds 26611da177e4SLinus Torvalds /* 26621da177e4SLinus Torvalds * We loop across all blocks in the page, whether or not they are 26631da177e4SLinus Torvalds * part of the affected region. This is so we can discover if the 26641da177e4SLinus Torvalds * page is fully mapped-to-disk. 26651da177e4SLinus Torvalds */ 2666a4b0672dSNick Piggin for (block_start = 0, block_in_page = 0, bh = head; 266709cbfeafSKirill A. Shutemov block_start < PAGE_SIZE; 2668a4b0672dSNick Piggin block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 26691da177e4SLinus Torvalds int create; 26701da177e4SLinus Torvalds 2671a4b0672dSNick Piggin block_end = block_start + blocksize; 2672a4b0672dSNick Piggin bh->b_state = 0; 26731da177e4SLinus Torvalds create = 1; 26741da177e4SLinus Torvalds if (block_start >= to) 26751da177e4SLinus Torvalds create = 0; 26761da177e4SLinus Torvalds ret = get_block(inode, block_in_file + block_in_page, 2677a4b0672dSNick Piggin bh, create); 26781da177e4SLinus Torvalds if (ret) 26791da177e4SLinus Torvalds goto failed; 2680a4b0672dSNick Piggin if (!buffer_mapped(bh)) 26811da177e4SLinus Torvalds is_mapped_to_disk = 0; 2682a4b0672dSNick Piggin if (buffer_new(bh)) 2683e64855c6SJan Kara clean_bdev_bh_alias(bh); 2684a4b0672dSNick Piggin if (PageUptodate(page)) { 2685a4b0672dSNick Piggin set_buffer_uptodate(bh); 26861da177e4SLinus Torvalds continue; 2687a4b0672dSNick Piggin } 2688a4b0672dSNick Piggin if (buffer_new(bh) || !buffer_mapped(bh)) { 2689eebd2aa3SChristoph Lameter zero_user_segments(page, block_start, from, 2690eebd2aa3SChristoph Lameter to, block_end); 26911da177e4SLinus Torvalds continue; 26921da177e4SLinus Torvalds } 2693a4b0672dSNick Piggin if (buffer_uptodate(bh)) 26941da177e4SLinus Torvalds continue; /* reiserfs does this */ 26951da177e4SLinus Torvalds if (block_start < from || block_end > to) { 2696a4b0672dSNick Piggin lock_buffer(bh); 2697a4b0672dSNick Piggin bh->b_end_io = end_buffer_read_nobh; 26982a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 2699a4b0672dSNick Piggin nr_reads++; 27001da177e4SLinus Torvalds } 27011da177e4SLinus Torvalds } 27021da177e4SLinus Torvalds 27031da177e4SLinus Torvalds if (nr_reads) { 27041da177e4SLinus Torvalds /* 27051da177e4SLinus Torvalds * The page is locked, so these buffers are protected from 27061da177e4SLinus Torvalds * any VM or truncate activity. Hence we don't need to care 27071da177e4SLinus Torvalds * for the buffer_head refcounts. 27081da177e4SLinus Torvalds */ 2709a4b0672dSNick Piggin for (bh = head; bh; bh = bh->b_this_page) { 27101da177e4SLinus Torvalds wait_on_buffer(bh); 27111da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 27121da177e4SLinus Torvalds ret = -EIO; 27131da177e4SLinus Torvalds } 27141da177e4SLinus Torvalds if (ret) 27151da177e4SLinus Torvalds goto failed; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds if (is_mapped_to_disk) 27191da177e4SLinus Torvalds SetPageMappedToDisk(page); 27201da177e4SLinus Torvalds 272103158cd7SNick Piggin *fsdata = head; /* to be released by nobh_write_end */ 2722a4b0672dSNick Piggin 27231da177e4SLinus Torvalds return 0; 27241da177e4SLinus Torvalds 27251da177e4SLinus Torvalds failed: 272603158cd7SNick Piggin BUG_ON(!ret); 27271da177e4SLinus Torvalds /* 2728a4b0672dSNick Piggin * Error recovery is a bit difficult. We need to zero out blocks that 2729a4b0672dSNick Piggin * were newly allocated, and dirty them to ensure they get written out. 2730a4b0672dSNick Piggin * Buffers need to be attached to the page at this point, otherwise 2731a4b0672dSNick Piggin * the handling of potential IO errors during writeout would be hard 2732a4b0672dSNick Piggin * (could try doing synchronous writeout, but what if that fails too?) 27331da177e4SLinus Torvalds */ 273403158cd7SNick Piggin attach_nobh_buffers(page, head); 273503158cd7SNick Piggin page_zero_new_buffers(page, from, to); 2736a4b0672dSNick Piggin 273703158cd7SNick Piggin out_release: 273803158cd7SNick Piggin unlock_page(page); 273909cbfeafSKirill A. Shutemov put_page(page); 274003158cd7SNick Piggin *pagep = NULL; 2741a4b0672dSNick Piggin 27427bb46a67Snpiggin@suse.de return ret; 27437bb46a67Snpiggin@suse.de } 274403158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin); 27451da177e4SLinus Torvalds 274603158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping, 274703158cd7SNick Piggin loff_t pos, unsigned len, unsigned copied, 274803158cd7SNick Piggin struct page *page, void *fsdata) 27491da177e4SLinus Torvalds { 27501da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 2751efdc3131SNick Piggin struct buffer_head *head = fsdata; 275203158cd7SNick Piggin struct buffer_head *bh; 27535b41e74aSDmitri Monakhov BUG_ON(fsdata != NULL && page_has_buffers(page)); 27541da177e4SLinus Torvalds 2755d4cf109fSDave Kleikamp if (unlikely(copied < len) && head) 275603158cd7SNick Piggin attach_nobh_buffers(page, head); 2757a4b0672dSNick Piggin if (page_has_buffers(page)) 275803158cd7SNick Piggin return generic_write_end(file, mapping, pos, len, 275903158cd7SNick Piggin copied, page, fsdata); 2760a4b0672dSNick Piggin 276122c8ca78SNick Piggin SetPageUptodate(page); 27621da177e4SLinus Torvalds set_page_dirty(page); 276303158cd7SNick Piggin if (pos+copied > inode->i_size) { 276403158cd7SNick Piggin i_size_write(inode, pos+copied); 27651da177e4SLinus Torvalds mark_inode_dirty(inode); 27661da177e4SLinus Torvalds } 276703158cd7SNick Piggin 276803158cd7SNick Piggin unlock_page(page); 276909cbfeafSKirill A. Shutemov put_page(page); 277003158cd7SNick Piggin 277103158cd7SNick Piggin while (head) { 277203158cd7SNick Piggin bh = head; 277303158cd7SNick Piggin head = head->b_this_page; 277403158cd7SNick Piggin free_buffer_head(bh); 27751da177e4SLinus Torvalds } 277603158cd7SNick Piggin 277703158cd7SNick Piggin return copied; 277803158cd7SNick Piggin } 277903158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end); 27801da177e4SLinus Torvalds 27811da177e4SLinus Torvalds /* 27821da177e4SLinus Torvalds * nobh_writepage() - based on block_full_write_page() except 27831da177e4SLinus Torvalds * that it tries to operate without attaching bufferheads to 27841da177e4SLinus Torvalds * the page. 27851da177e4SLinus Torvalds */ 27861da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block, 27871da177e4SLinus Torvalds struct writeback_control *wbc) 27881da177e4SLinus Torvalds { 27891da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 27901da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 279109cbfeafSKirill A. Shutemov const pgoff_t end_index = i_size >> PAGE_SHIFT; 27921da177e4SLinus Torvalds unsigned offset; 27931da177e4SLinus Torvalds int ret; 27941da177e4SLinus Torvalds 27951da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 27961da177e4SLinus Torvalds if (page->index < end_index) 27971da177e4SLinus Torvalds goto out; 27981da177e4SLinus Torvalds 27991da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 280009cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 28011da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 28021da177e4SLinus Torvalds /* 28031da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 28041da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 28051da177e4SLinus Torvalds * freeable here, so the page does not leak. 28061da177e4SLinus Torvalds */ 28071da177e4SLinus Torvalds #if 0 28081da177e4SLinus Torvalds /* Not really sure about this - do we need this ? */ 28091da177e4SLinus Torvalds if (page->mapping->a_ops->invalidatepage) 28101da177e4SLinus Torvalds page->mapping->a_ops->invalidatepage(page, offset); 28111da177e4SLinus Torvalds #endif 28121da177e4SLinus Torvalds unlock_page(page); 28131da177e4SLinus Torvalds return 0; /* don't care */ 28141da177e4SLinus Torvalds } 28151da177e4SLinus Torvalds 28161da177e4SLinus Torvalds /* 28171da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 28181da177e4SLinus Torvalds * writepage invocation because it may be mmapped. "A file is mapped 28191da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 28201da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 28211da177e4SLinus Torvalds * writes to that region are not written out to the file." 28221da177e4SLinus Torvalds */ 282309cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE); 28241da177e4SLinus Torvalds out: 28251da177e4SLinus Torvalds ret = mpage_writepage(page, get_block, wbc); 28261da177e4SLinus Torvalds if (ret == -EAGAIN) 282735c80d5fSChris Mason ret = __block_write_full_page(inode, page, get_block, wbc, 282835c80d5fSChris Mason end_buffer_async_write); 28291da177e4SLinus Torvalds return ret; 28301da177e4SLinus Torvalds } 28311da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage); 28321da177e4SLinus Torvalds 283303158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping, 283403158cd7SNick Piggin loff_t from, get_block_t *get_block) 28351da177e4SLinus Torvalds { 283609cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 283709cbfeafSKirill A. Shutemov unsigned offset = from & (PAGE_SIZE-1); 283803158cd7SNick Piggin unsigned blocksize; 283903158cd7SNick Piggin sector_t iblock; 284003158cd7SNick Piggin unsigned length, pos; 284103158cd7SNick Piggin struct inode *inode = mapping->host; 28421da177e4SLinus Torvalds struct page *page; 284303158cd7SNick Piggin struct buffer_head map_bh; 284403158cd7SNick Piggin int err; 28451da177e4SLinus Torvalds 284693407472SFabian Frederick blocksize = i_blocksize(inode); 284703158cd7SNick Piggin length = offset & (blocksize - 1); 28481da177e4SLinus Torvalds 284903158cd7SNick Piggin /* Block boundary? Nothing to do */ 285003158cd7SNick Piggin if (!length) 285103158cd7SNick Piggin return 0; 285203158cd7SNick Piggin 285303158cd7SNick Piggin length = blocksize - length; 285409cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 285503158cd7SNick Piggin 28561da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 285703158cd7SNick Piggin err = -ENOMEM; 28581da177e4SLinus Torvalds if (!page) 28591da177e4SLinus Torvalds goto out; 28601da177e4SLinus Torvalds 286103158cd7SNick Piggin if (page_has_buffers(page)) { 286203158cd7SNick Piggin has_buffers: 286303158cd7SNick Piggin unlock_page(page); 286409cbfeafSKirill A. Shutemov put_page(page); 286503158cd7SNick Piggin return block_truncate_page(mapping, from, get_block); 28661da177e4SLinus Torvalds } 286703158cd7SNick Piggin 286803158cd7SNick Piggin /* Find the buffer that contains "offset" */ 286903158cd7SNick Piggin pos = blocksize; 287003158cd7SNick Piggin while (offset >= pos) { 287103158cd7SNick Piggin iblock++; 287203158cd7SNick Piggin pos += blocksize; 287303158cd7SNick Piggin } 287403158cd7SNick Piggin 2875460bcf57STheodore Ts'o map_bh.b_size = blocksize; 2876460bcf57STheodore Ts'o map_bh.b_state = 0; 287703158cd7SNick Piggin err = get_block(inode, iblock, &map_bh, 0); 287803158cd7SNick Piggin if (err) 287903158cd7SNick Piggin goto unlock; 288003158cd7SNick Piggin /* unmapped? It's a hole - nothing to do */ 288103158cd7SNick Piggin if (!buffer_mapped(&map_bh)) 288203158cd7SNick Piggin goto unlock; 288303158cd7SNick Piggin 288403158cd7SNick Piggin /* Ok, it's mapped. Make sure it's up-to-date */ 288503158cd7SNick Piggin if (!PageUptodate(page)) { 288603158cd7SNick Piggin err = mapping->a_ops->readpage(NULL, page); 288703158cd7SNick Piggin if (err) { 288809cbfeafSKirill A. Shutemov put_page(page); 288903158cd7SNick Piggin goto out; 289003158cd7SNick Piggin } 289103158cd7SNick Piggin lock_page(page); 289203158cd7SNick Piggin if (!PageUptodate(page)) { 289303158cd7SNick Piggin err = -EIO; 289403158cd7SNick Piggin goto unlock; 289503158cd7SNick Piggin } 289603158cd7SNick Piggin if (page_has_buffers(page)) 289703158cd7SNick Piggin goto has_buffers; 289803158cd7SNick Piggin } 2899eebd2aa3SChristoph Lameter zero_user(page, offset, length); 290003158cd7SNick Piggin set_page_dirty(page); 290103158cd7SNick Piggin err = 0; 290203158cd7SNick Piggin 290303158cd7SNick Piggin unlock: 29041da177e4SLinus Torvalds unlock_page(page); 290509cbfeafSKirill A. Shutemov put_page(page); 29061da177e4SLinus Torvalds out: 290703158cd7SNick Piggin return err; 29081da177e4SLinus Torvalds } 29091da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page); 29101da177e4SLinus Torvalds 29111da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 29121da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 29131da177e4SLinus Torvalds { 291409cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 291509cbfeafSKirill A. Shutemov unsigned offset = from & (PAGE_SIZE-1); 29161da177e4SLinus Torvalds unsigned blocksize; 291754b21a79SAndrew Morton sector_t iblock; 29181da177e4SLinus Torvalds unsigned length, pos; 29191da177e4SLinus Torvalds struct inode *inode = mapping->host; 29201da177e4SLinus Torvalds struct page *page; 29211da177e4SLinus Torvalds struct buffer_head *bh; 29221da177e4SLinus Torvalds int err; 29231da177e4SLinus Torvalds 292493407472SFabian Frederick blocksize = i_blocksize(inode); 29251da177e4SLinus Torvalds length = offset & (blocksize - 1); 29261da177e4SLinus Torvalds 29271da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 29281da177e4SLinus Torvalds if (!length) 29291da177e4SLinus Torvalds return 0; 29301da177e4SLinus Torvalds 29311da177e4SLinus Torvalds length = blocksize - length; 293209cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 29331da177e4SLinus Torvalds 29341da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 29351da177e4SLinus Torvalds err = -ENOMEM; 29361da177e4SLinus Torvalds if (!page) 29371da177e4SLinus Torvalds goto out; 29381da177e4SLinus Torvalds 29391da177e4SLinus Torvalds if (!page_has_buffers(page)) 29401da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 29411da177e4SLinus Torvalds 29421da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 29431da177e4SLinus Torvalds bh = page_buffers(page); 29441da177e4SLinus Torvalds pos = blocksize; 29451da177e4SLinus Torvalds while (offset >= pos) { 29461da177e4SLinus Torvalds bh = bh->b_this_page; 29471da177e4SLinus Torvalds iblock++; 29481da177e4SLinus Torvalds pos += blocksize; 29491da177e4SLinus Torvalds } 29501da177e4SLinus Torvalds 29511da177e4SLinus Torvalds err = 0; 29521da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2953b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 29541da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 29551da177e4SLinus Torvalds if (err) 29561da177e4SLinus Torvalds goto unlock; 29571da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 29581da177e4SLinus Torvalds if (!buffer_mapped(bh)) 29591da177e4SLinus Torvalds goto unlock; 29601da177e4SLinus Torvalds } 29611da177e4SLinus Torvalds 29621da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 29631da177e4SLinus Torvalds if (PageUptodate(page)) 29641da177e4SLinus Torvalds set_buffer_uptodate(bh); 29651da177e4SLinus Torvalds 296633a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 29671da177e4SLinus Torvalds err = -EIO; 2968dfec8a14SMike Christie ll_rw_block(REQ_OP_READ, 0, 1, &bh); 29691da177e4SLinus Torvalds wait_on_buffer(bh); 29701da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 29711da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 29721da177e4SLinus Torvalds goto unlock; 29731da177e4SLinus Torvalds } 29741da177e4SLinus Torvalds 2975eebd2aa3SChristoph Lameter zero_user(page, offset, length); 29761da177e4SLinus Torvalds mark_buffer_dirty(bh); 29771da177e4SLinus Torvalds err = 0; 29781da177e4SLinus Torvalds 29791da177e4SLinus Torvalds unlock: 29801da177e4SLinus Torvalds unlock_page(page); 298109cbfeafSKirill A. Shutemov put_page(page); 29821da177e4SLinus Torvalds out: 29831da177e4SLinus Torvalds return err; 29841da177e4SLinus Torvalds } 29851fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page); 29861da177e4SLinus Torvalds 29871da177e4SLinus Torvalds /* 29881da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 29891da177e4SLinus Torvalds */ 29901b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block, 29911b938c08SMatthew Wilcox struct writeback_control *wbc) 29921da177e4SLinus Torvalds { 29931da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 29941da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 299509cbfeafSKirill A. Shutemov const pgoff_t end_index = i_size >> PAGE_SHIFT; 29961da177e4SLinus Torvalds unsigned offset; 29971da177e4SLinus Torvalds 29981da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 29991da177e4SLinus Torvalds if (page->index < end_index) 300035c80d5fSChris Mason return __block_write_full_page(inode, page, get_block, wbc, 30011b938c08SMatthew Wilcox end_buffer_async_write); 30021da177e4SLinus Torvalds 30031da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 300409cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 30051da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 30061da177e4SLinus Torvalds /* 30071da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 30081da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 30091da177e4SLinus Torvalds * freeable here, so the page does not leak. 30101da177e4SLinus Torvalds */ 301109cbfeafSKirill A. Shutemov do_invalidatepage(page, 0, PAGE_SIZE); 30121da177e4SLinus Torvalds unlock_page(page); 30131da177e4SLinus Torvalds return 0; /* don't care */ 30141da177e4SLinus Torvalds } 30151da177e4SLinus Torvalds 30161da177e4SLinus Torvalds /* 30171da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 30182a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped 30191da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 30201da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 30211da177e4SLinus Torvalds * writes to that region are not written out to the file." 30221da177e4SLinus Torvalds */ 302309cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE); 30241b938c08SMatthew Wilcox return __block_write_full_page(inode, page, get_block, wbc, 302535c80d5fSChris Mason end_buffer_async_write); 302635c80d5fSChris Mason } 30271fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page); 302835c80d5fSChris Mason 30291da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 30301da177e4SLinus Torvalds get_block_t *get_block) 30311da177e4SLinus Torvalds { 30321da177e4SLinus Torvalds struct buffer_head tmp; 30331da177e4SLinus Torvalds struct inode *inode = mapping->host; 30341da177e4SLinus Torvalds tmp.b_state = 0; 30351da177e4SLinus Torvalds tmp.b_blocknr = 0; 303693407472SFabian Frederick tmp.b_size = i_blocksize(inode); 30371da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 30381da177e4SLinus Torvalds return tmp.b_blocknr; 30391da177e4SLinus Torvalds } 30401fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap); 30411da177e4SLinus Torvalds 30424246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio) 30431da177e4SLinus Torvalds { 30441da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 30451da177e4SLinus Torvalds 3046b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET))) 304708bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state); 304808bafc03SKeith Mannthey 30494246a0b6SChristoph Hellwig bh->b_end_io(bh, !bio->bi_error); 30501da177e4SLinus Torvalds bio_put(bio); 30511da177e4SLinus Torvalds } 30521da177e4SLinus Torvalds 305357302e0dSLinus Torvalds /* 305457302e0dSLinus Torvalds * This allows us to do IO even on the odd last sectors 305559d43914SAkinobu Mita * of a device, even if the block size is some multiple 305657302e0dSLinus Torvalds * of the physical sector size. 305757302e0dSLinus Torvalds * 305857302e0dSLinus Torvalds * We'll just truncate the bio to the size of the device, 305957302e0dSLinus Torvalds * and clear the end of the buffer head manually. 306057302e0dSLinus Torvalds * 306157302e0dSLinus Torvalds * Truly out-of-range accesses will turn into actual IO 306257302e0dSLinus Torvalds * errors, this only handles the "we need to be able to 306357302e0dSLinus Torvalds * do IO at the final sector" case. 306457302e0dSLinus Torvalds */ 30652a222ca9SMike Christie void guard_bio_eod(int op, struct bio *bio) 306657302e0dSLinus Torvalds { 306757302e0dSLinus Torvalds sector_t maxsector; 306859d43914SAkinobu Mita struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 306959d43914SAkinobu Mita unsigned truncated_bytes; 307057302e0dSLinus Torvalds 307157302e0dSLinus Torvalds maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 307257302e0dSLinus Torvalds if (!maxsector) 307357302e0dSLinus Torvalds return; 307457302e0dSLinus Torvalds 307557302e0dSLinus Torvalds /* 307657302e0dSLinus Torvalds * If the *whole* IO is past the end of the device, 307757302e0dSLinus Torvalds * let it through, and the IO layer will turn it into 307857302e0dSLinus Torvalds * an EIO. 307957302e0dSLinus Torvalds */ 30804f024f37SKent Overstreet if (unlikely(bio->bi_iter.bi_sector >= maxsector)) 308157302e0dSLinus Torvalds return; 308257302e0dSLinus Torvalds 30834f024f37SKent Overstreet maxsector -= bio->bi_iter.bi_sector; 308459d43914SAkinobu Mita if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) 308557302e0dSLinus Torvalds return; 308657302e0dSLinus Torvalds 308759d43914SAkinobu Mita /* Uhhuh. We've got a bio that straddles the device size! */ 308859d43914SAkinobu Mita truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 308957302e0dSLinus Torvalds 309057302e0dSLinus Torvalds /* Truncate the bio.. */ 309159d43914SAkinobu Mita bio->bi_iter.bi_size -= truncated_bytes; 309259d43914SAkinobu Mita bvec->bv_len -= truncated_bytes; 309357302e0dSLinus Torvalds 309457302e0dSLinus Torvalds /* ..and clear the end of the buffer for reads */ 30952a222ca9SMike Christie if (op == REQ_OP_READ) { 309659d43914SAkinobu Mita zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 309759d43914SAkinobu Mita truncated_bytes); 309857302e0dSLinus Torvalds } 309957302e0dSLinus Torvalds } 310057302e0dSLinus Torvalds 31012a222ca9SMike Christie static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 3102020c2833SEric Biggers struct writeback_control *wbc) 31031da177e4SLinus Torvalds { 31041da177e4SLinus Torvalds struct bio *bio; 31051da177e4SLinus Torvalds 31061da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 31071da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 31081da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 31098fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh)); 31108fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh)); 31111da177e4SLinus Torvalds 311248fd4f93SJens Axboe /* 311348fd4f93SJens Axboe * Only clear out a write error when rewriting 31141da177e4SLinus Torvalds */ 31152a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 31161da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 31171da177e4SLinus Torvalds 31181da177e4SLinus Torvalds /* 31191da177e4SLinus Torvalds * from here on down, it's all bio -- do the initial mapping, 31201da177e4SLinus Torvalds * submit_bio -> generic_make_request may further map this bio around 31211da177e4SLinus Torvalds */ 31221da177e4SLinus Torvalds bio = bio_alloc(GFP_NOIO, 1); 31231da177e4SLinus Torvalds 31242a814908STejun Heo if (wbc) { 3125b16b1debSTejun Heo wbc_init_bio(wbc, bio); 31262a814908STejun Heo wbc_account_io(wbc, bh->b_page, bh->b_size); 31272a814908STejun Heo } 3128bafc0dbaSTejun Heo 31294f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 31301da177e4SLinus Torvalds bio->bi_bdev = bh->b_bdev; 31311da177e4SLinus Torvalds 31326cf66b4cSKent Overstreet bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 31336cf66b4cSKent Overstreet BUG_ON(bio->bi_iter.bi_size != bh->b_size); 31341da177e4SLinus Torvalds 31351da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 31361da177e4SLinus Torvalds bio->bi_private = bh; 31371da177e4SLinus Torvalds 313857302e0dSLinus Torvalds /* Take care of bh's that straddle the end of the device */ 31392a222ca9SMike Christie guard_bio_eod(op, bio); 314057302e0dSLinus Torvalds 3141877f962cSTheodore Ts'o if (buffer_meta(bh)) 31422a222ca9SMike Christie op_flags |= REQ_META; 3143877f962cSTheodore Ts'o if (buffer_prio(bh)) 31442a222ca9SMike Christie op_flags |= REQ_PRIO; 31452a222ca9SMike Christie bio_set_op_attrs(bio, op, op_flags); 3146877f962cSTheodore Ts'o 31474e49ea4aSMike Christie submit_bio(bio); 3148f6454b04SJulia Lawall return 0; 31491da177e4SLinus Torvalds } 3150bafc0dbaSTejun Heo 31512a222ca9SMike Christie int submit_bh(int op, int op_flags, struct buffer_head *bh) 315271368511SDarrick J. Wong { 3153020c2833SEric Biggers return submit_bh_wbc(op, op_flags, bh, NULL); 315471368511SDarrick J. Wong } 31551fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh); 31561da177e4SLinus Torvalds 31571da177e4SLinus Torvalds /** 31581da177e4SLinus Torvalds * ll_rw_block: low-level access to block devices (DEPRECATED) 3159dfec8a14SMike Christie * @op: whether to %READ or %WRITE 3160ef295ecfSChristoph Hellwig * @op_flags: req_flag_bits 31611da177e4SLinus Torvalds * @nr: number of &struct buffer_heads in the array 31621da177e4SLinus Torvalds * @bhs: array of pointers to &struct buffer_head 31631da177e4SLinus Torvalds * 3164a7662236SJan Kara * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 316570246286SChristoph Hellwig * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE. 316670246286SChristoph Hellwig * @op_flags contains flags modifying the detailed I/O behavior, most notably 316770246286SChristoph Hellwig * %REQ_RAHEAD. 31681da177e4SLinus Torvalds * 31691da177e4SLinus Torvalds * This function drops any buffer that it cannot get a lock on (with the 31709cb569d6SChristoph Hellwig * BH_Lock state bit), any buffer that appears to be clean when doing a write 31719cb569d6SChristoph Hellwig * request, and any buffer that appears to be up-to-date when doing read 31729cb569d6SChristoph Hellwig * request. Further it marks as clean buffers that are processed for 31739cb569d6SChristoph Hellwig * writing (the buffer cache won't assume that they are actually clean 31749cb569d6SChristoph Hellwig * until the buffer gets unlocked). 31751da177e4SLinus Torvalds * 31761da177e4SLinus Torvalds * ll_rw_block sets b_end_io to simple completion handler that marks 3177e227867fSMasanari Iida * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 31781da177e4SLinus Torvalds * any waiters. 31791da177e4SLinus Torvalds * 31801da177e4SLinus Torvalds * All of the buffers must be for the same device, and must also be a 31811da177e4SLinus Torvalds * multiple of the current approved size for the device. 31821da177e4SLinus Torvalds */ 3183dfec8a14SMike Christie void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) 31841da177e4SLinus Torvalds { 31851da177e4SLinus Torvalds int i; 31861da177e4SLinus Torvalds 31871da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 31881da177e4SLinus Torvalds struct buffer_head *bh = bhs[i]; 31891da177e4SLinus Torvalds 31909cb569d6SChristoph Hellwig if (!trylock_buffer(bh)) 31911da177e4SLinus Torvalds continue; 3192dfec8a14SMike Christie if (op == WRITE) { 31931da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 319476c3073aSakpm@osdl.org bh->b_end_io = end_buffer_write_sync; 3195e60e5c50SOGAWA Hirofumi get_bh(bh); 3196dfec8a14SMike Christie submit_bh(op, op_flags, bh); 31971da177e4SLinus Torvalds continue; 31981da177e4SLinus Torvalds } 31991da177e4SLinus Torvalds } else { 32001da177e4SLinus Torvalds if (!buffer_uptodate(bh)) { 320176c3073aSakpm@osdl.org bh->b_end_io = end_buffer_read_sync; 3202e60e5c50SOGAWA Hirofumi get_bh(bh); 3203dfec8a14SMike Christie submit_bh(op, op_flags, bh); 32041da177e4SLinus Torvalds continue; 32051da177e4SLinus Torvalds } 32061da177e4SLinus Torvalds } 32071da177e4SLinus Torvalds unlock_buffer(bh); 32081da177e4SLinus Torvalds } 32091da177e4SLinus Torvalds } 32101fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(ll_rw_block); 32111da177e4SLinus Torvalds 32122a222ca9SMike Christie void write_dirty_buffer(struct buffer_head *bh, int op_flags) 32139cb569d6SChristoph Hellwig { 32149cb569d6SChristoph Hellwig lock_buffer(bh); 32159cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) { 32169cb569d6SChristoph Hellwig unlock_buffer(bh); 32179cb569d6SChristoph Hellwig return; 32189cb569d6SChristoph Hellwig } 32199cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync; 32209cb569d6SChristoph Hellwig get_bh(bh); 32212a222ca9SMike Christie submit_bh(REQ_OP_WRITE, op_flags, bh); 32229cb569d6SChristoph Hellwig } 32239cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer); 32249cb569d6SChristoph Hellwig 32251da177e4SLinus Torvalds /* 32261da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 32271da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 32281da177e4SLinus Torvalds * the buffer_head. 32291da177e4SLinus Torvalds */ 32302a222ca9SMike Christie int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) 32311da177e4SLinus Torvalds { 32321da177e4SLinus Torvalds int ret = 0; 32331da177e4SLinus Torvalds 32341da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 32351da177e4SLinus Torvalds lock_buffer(bh); 32361da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 32371da177e4SLinus Torvalds get_bh(bh); 32381da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 32392a222ca9SMike Christie ret = submit_bh(REQ_OP_WRITE, op_flags, bh); 32401da177e4SLinus Torvalds wait_on_buffer(bh); 32411da177e4SLinus Torvalds if (!ret && !buffer_uptodate(bh)) 32421da177e4SLinus Torvalds ret = -EIO; 32431da177e4SLinus Torvalds } else { 32441da177e4SLinus Torvalds unlock_buffer(bh); 32451da177e4SLinus Torvalds } 32461da177e4SLinus Torvalds return ret; 32471da177e4SLinus Torvalds } 324887e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer); 324987e99511SChristoph Hellwig 325087e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh) 325187e99511SChristoph Hellwig { 325270fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC); 325387e99511SChristoph Hellwig } 32541fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer); 32551da177e4SLinus Torvalds 32561da177e4SLinus Torvalds /* 32571da177e4SLinus Torvalds * try_to_free_buffers() checks if all the buffers on this particular page 32581da177e4SLinus Torvalds * are unused, and releases them if so. 32591da177e4SLinus Torvalds * 32601da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either 32611da177e4SLinus Torvalds * locking the page or by holding its mapping's private_lock. 32621da177e4SLinus Torvalds * 32631da177e4SLinus Torvalds * If the page is dirty but all the buffers are clean then we need to 32641da177e4SLinus Torvalds * be sure to mark the page clean as well. This is because the page 32651da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers 32661da177e4SLinus Torvalds * to a dirty page will set *all* buffers dirty. Which would corrupt 32671da177e4SLinus Torvalds * filesystem data on the same device. 32681da177e4SLinus Torvalds * 32691da177e4SLinus Torvalds * The same applies to regular filesystem pages: if all the buffers are 32701da177e4SLinus Torvalds * clean then we set the page clean and proceed. To do that, we require 32711da177e4SLinus Torvalds * total exclusion from __set_page_dirty_buffers(). That is obtained with 32721da177e4SLinus Torvalds * private_lock. 32731da177e4SLinus Torvalds * 32741da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking. 32751da177e4SLinus Torvalds */ 32761da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 32771da177e4SLinus Torvalds { 32781da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 32791da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 32801da177e4SLinus Torvalds } 32811da177e4SLinus Torvalds 32821da177e4SLinus Torvalds static int 32831da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 32841da177e4SLinus Torvalds { 32851da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 32861da177e4SLinus Torvalds struct buffer_head *bh; 32871da177e4SLinus Torvalds 32881da177e4SLinus Torvalds bh = head; 32891da177e4SLinus Torvalds do { 32901da177e4SLinus Torvalds if (buffer_busy(bh)) 32911da177e4SLinus Torvalds goto failed; 32921da177e4SLinus Torvalds bh = bh->b_this_page; 32931da177e4SLinus Torvalds } while (bh != head); 32941da177e4SLinus Torvalds 32951da177e4SLinus Torvalds do { 32961da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 32971da177e4SLinus Torvalds 3298535ee2fbSJan Kara if (bh->b_assoc_map) 32991da177e4SLinus Torvalds __remove_assoc_queue(bh); 33001da177e4SLinus Torvalds bh = next; 33011da177e4SLinus Torvalds } while (bh != head); 33021da177e4SLinus Torvalds *buffers_to_free = head; 33031da177e4SLinus Torvalds __clear_page_buffers(page); 33041da177e4SLinus Torvalds return 1; 33051da177e4SLinus Torvalds failed: 33061da177e4SLinus Torvalds return 0; 33071da177e4SLinus Torvalds } 33081da177e4SLinus Torvalds 33091da177e4SLinus Torvalds int try_to_free_buffers(struct page *page) 33101da177e4SLinus Torvalds { 33111da177e4SLinus Torvalds struct address_space * const mapping = page->mapping; 33121da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 33131da177e4SLinus Torvalds int ret = 0; 33141da177e4SLinus Torvalds 33151da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3316ecdfc978SLinus Torvalds if (PageWriteback(page)) 33171da177e4SLinus Torvalds return 0; 33181da177e4SLinus Torvalds 33191da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 33201da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 33211da177e4SLinus Torvalds goto out; 33221da177e4SLinus Torvalds } 33231da177e4SLinus Torvalds 33241da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 33251da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 3326ecdfc978SLinus Torvalds 3327ecdfc978SLinus Torvalds /* 3328ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 3329ecdfc978SLinus Torvalds * then we can have clean buffers against a dirty page. We 3330ecdfc978SLinus Torvalds * clean the page here; otherwise the VM will never notice 3331ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 3332ecdfc978SLinus Torvalds * 3333ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 3334ecdfc978SLinus Torvalds * the page's buffers clean. We discover that here and clean 3335ecdfc978SLinus Torvalds * the page also. 333687df7241SNick Piggin * 333787df7241SNick Piggin * private_lock must be held over this entire operation in order 333887df7241SNick Piggin * to synchronise against __set_page_dirty_buffers and prevent the 333987df7241SNick Piggin * dirty bit from being lost. 3340ecdfc978SLinus Torvalds */ 334111f81becSTejun Heo if (ret) 334211f81becSTejun Heo cancel_dirty_page(page); 334387df7241SNick Piggin spin_unlock(&mapping->private_lock); 33441da177e4SLinus Torvalds out: 33451da177e4SLinus Torvalds if (buffers_to_free) { 33461da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 33471da177e4SLinus Torvalds 33481da177e4SLinus Torvalds do { 33491da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 33501da177e4SLinus Torvalds free_buffer_head(bh); 33511da177e4SLinus Torvalds bh = next; 33521da177e4SLinus Torvalds } while (bh != buffers_to_free); 33531da177e4SLinus Torvalds } 33541da177e4SLinus Torvalds return ret; 33551da177e4SLinus Torvalds } 33561da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 33571da177e4SLinus Torvalds 33581da177e4SLinus Torvalds /* 33591da177e4SLinus Torvalds * There are no bdflush tunables left. But distributions are 33601da177e4SLinus Torvalds * still running obsolete flush daemons, so we terminate them here. 33611da177e4SLinus Torvalds * 33621da177e4SLinus Torvalds * Use of bdflush() is deprecated and will be removed in a future kernel. 33635b0830cbSJens Axboe * The `flush-X' kernel threads fully replace bdflush daemons and this call. 33641da177e4SLinus Torvalds */ 3365bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data) 33661da177e4SLinus Torvalds { 33671da177e4SLinus Torvalds static int msg_count; 33681da177e4SLinus Torvalds 33691da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 33701da177e4SLinus Torvalds return -EPERM; 33711da177e4SLinus Torvalds 33721da177e4SLinus Torvalds if (msg_count < 5) { 33731da177e4SLinus Torvalds msg_count++; 33741da177e4SLinus Torvalds printk(KERN_INFO 33751da177e4SLinus Torvalds "warning: process `%s' used the obsolete bdflush" 33761da177e4SLinus Torvalds " system call\n", current->comm); 33771da177e4SLinus Torvalds printk(KERN_INFO "Fix your initscripts?\n"); 33781da177e4SLinus Torvalds } 33791da177e4SLinus Torvalds 33801da177e4SLinus Torvalds if (func == 1) 33811da177e4SLinus Torvalds do_exit(0); 33821da177e4SLinus Torvalds return 0; 33831da177e4SLinus Torvalds } 33841da177e4SLinus Torvalds 33851da177e4SLinus Torvalds /* 33861da177e4SLinus Torvalds * Buffer-head allocation 33871da177e4SLinus Torvalds */ 3388a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly; 33891da177e4SLinus Torvalds 33901da177e4SLinus Torvalds /* 33911da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 33921da177e4SLinus Torvalds * stripping them in writeback. 33931da177e4SLinus Torvalds */ 339443be594aSZhang Yanfei static unsigned long max_buffer_heads; 33951da177e4SLinus Torvalds 33961da177e4SLinus Torvalds int buffer_heads_over_limit; 33971da177e4SLinus Torvalds 33981da177e4SLinus Torvalds struct bh_accounting { 33991da177e4SLinus Torvalds int nr; /* Number of live bh's */ 34001da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 34011da177e4SLinus Torvalds }; 34021da177e4SLinus Torvalds 34031da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 34041da177e4SLinus Torvalds 34051da177e4SLinus Torvalds static void recalc_bh_state(void) 34061da177e4SLinus Torvalds { 34071da177e4SLinus Torvalds int i; 34081da177e4SLinus Torvalds int tot = 0; 34091da177e4SLinus Torvalds 3410ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 34111da177e4SLinus Torvalds return; 3412c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0); 34138a143426SEric Dumazet for_each_online_cpu(i) 34141da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 34151da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 34161da177e4SLinus Torvalds } 34171da177e4SLinus Torvalds 3418dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 34191da177e4SLinus Torvalds { 3420019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 34211da177e4SLinus Torvalds if (ret) { 3422a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 3423c7b92516SChristoph Lameter preempt_disable(); 3424c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr); 34251da177e4SLinus Torvalds recalc_bh_state(); 3426c7b92516SChristoph Lameter preempt_enable(); 34271da177e4SLinus Torvalds } 34281da177e4SLinus Torvalds return ret; 34291da177e4SLinus Torvalds } 34301da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 34311da177e4SLinus Torvalds 34321da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 34331da177e4SLinus Torvalds { 34341da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 34351da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 3436c7b92516SChristoph Lameter preempt_disable(); 3437c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr); 34381da177e4SLinus Torvalds recalc_bh_state(); 3439c7b92516SChristoph Lameter preempt_enable(); 34401da177e4SLinus Torvalds } 34411da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 34421da177e4SLinus Torvalds 3443fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu) 34441da177e4SLinus Torvalds { 34451da177e4SLinus Torvalds int i; 34461da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 34471da177e4SLinus Torvalds 34481da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 34491da177e4SLinus Torvalds brelse(b->bhs[i]); 34501da177e4SLinus Torvalds b->bhs[i] = NULL; 34511da177e4SLinus Torvalds } 3452c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 34538a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 3454fc4d24c9SSebastian Andrzej Siewior return 0; 34551da177e4SLinus Torvalds } 34561da177e4SLinus Torvalds 3457389d1b08SAneesh Kumar K.V /** 3458a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate 3459389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3460389d1b08SAneesh Kumar K.V * 3461389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false, 3462389d1b08SAneesh Kumar K.V * with the buffer locked, if not. 3463389d1b08SAneesh Kumar K.V */ 3464389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh) 3465389d1b08SAneesh Kumar K.V { 3466389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) { 3467389d1b08SAneesh Kumar K.V lock_buffer(bh); 3468389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) 3469389d1b08SAneesh Kumar K.V return 0; 3470389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3471389d1b08SAneesh Kumar K.V } 3472389d1b08SAneesh Kumar K.V return 1; 3473389d1b08SAneesh Kumar K.V } 3474389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock); 3475389d1b08SAneesh Kumar K.V 3476389d1b08SAneesh Kumar K.V /** 3477a6b91919SRandy Dunlap * bh_submit_read - Submit a locked buffer for reading 3478389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3479389d1b08SAneesh Kumar K.V * 3480389d1b08SAneesh Kumar K.V * Returns zero on success and -EIO on error. 3481389d1b08SAneesh Kumar K.V */ 3482389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh) 3483389d1b08SAneesh Kumar K.V { 3484389d1b08SAneesh Kumar K.V BUG_ON(!buffer_locked(bh)); 3485389d1b08SAneesh Kumar K.V 3486389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) { 3487389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3488389d1b08SAneesh Kumar K.V return 0; 3489389d1b08SAneesh Kumar K.V } 3490389d1b08SAneesh Kumar K.V 3491389d1b08SAneesh Kumar K.V get_bh(bh); 3492389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync; 34932a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 3494389d1b08SAneesh Kumar K.V wait_on_buffer(bh); 3495389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) 3496389d1b08SAneesh Kumar K.V return 0; 3497389d1b08SAneesh Kumar K.V return -EIO; 3498389d1b08SAneesh Kumar K.V } 3499389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read); 3500389d1b08SAneesh Kumar K.V 35011da177e4SLinus Torvalds void __init buffer_init(void) 35021da177e4SLinus Torvalds { 350343be594aSZhang Yanfei unsigned long nrpages; 3504fc4d24c9SSebastian Andrzej Siewior int ret; 35051da177e4SLinus Torvalds 3506b98938c3SChristoph Lameter bh_cachep = kmem_cache_create("buffer_head", 3507b98938c3SChristoph Lameter sizeof(struct buffer_head), 0, 3508b98938c3SChristoph Lameter (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3509b98938c3SChristoph Lameter SLAB_MEM_SPREAD), 3510019b4d12SRichard Kennedy NULL); 35111da177e4SLinus Torvalds 35121da177e4SLinus Torvalds /* 35131da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 35141da177e4SLinus Torvalds */ 35151da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 35161da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3517fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3518fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead); 3519fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0); 35201da177e4SLinus Torvalds } 3521