11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/buffer.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 111da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 141da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds #include <linux/kernel.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 231da177e4SLinus Torvalds #include <linux/fs.h> 241da177e4SLinus Torvalds #include <linux/mm.h> 251da177e4SLinus Torvalds #include <linux/percpu.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 2716f7e0feSRandy Dunlap #include <linux/capability.h> 281da177e4SLinus Torvalds #include <linux/blkdev.h> 291da177e4SLinus Torvalds #include <linux/file.h> 301da177e4SLinus Torvalds #include <linux/quotaops.h> 311da177e4SLinus Torvalds #include <linux/highmem.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/writeback.h> 341da177e4SLinus Torvalds #include <linux/hash.h> 351da177e4SLinus Torvalds #include <linux/suspend.h> 361da177e4SLinus Torvalds #include <linux/buffer_head.h> 3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 381da177e4SLinus Torvalds #include <linux/bio.h> 391da177e4SLinus Torvalds #include <linux/notifier.h> 401da177e4SLinus Torvalds #include <linux/cpu.h> 411da177e4SLinus Torvalds #include <linux/bitops.h> 421da177e4SLinus Torvalds #include <linux/mpage.h> 43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds inline void 501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 511da177e4SLinus Torvalds { 521da177e4SLinus Torvalds bh->b_end_io = handler; 531da177e4SLinus Torvalds bh->b_private = private; 541da177e4SLinus Torvalds } 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static int sync_buffer(void *word) 571da177e4SLinus Torvalds { 581da177e4SLinus Torvalds struct block_device *bd; 591da177e4SLinus Torvalds struct buffer_head *bh 601da177e4SLinus Torvalds = container_of(word, struct buffer_head, b_state); 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds smp_mb(); 631da177e4SLinus Torvalds bd = bh->b_bdev; 641da177e4SLinus Torvalds if (bd) 651da177e4SLinus Torvalds blk_run_address_space(bd->bd_inode->i_mapping); 661da177e4SLinus Torvalds io_schedule(); 671da177e4SLinus Torvalds return 0; 681da177e4SLinus Torvalds } 691da177e4SLinus Torvalds 70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh) 711da177e4SLinus Torvalds { 721da177e4SLinus Torvalds wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 731da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 741da177e4SLinus Torvalds } 751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 761da177e4SLinus Torvalds 77fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh) 781da177e4SLinus Torvalds { 7972ed3d03SNick Piggin smp_mb__before_clear_bit(); 801da177e4SLinus Torvalds clear_buffer_locked(bh); 811da177e4SLinus Torvalds smp_mb__after_clear_bit(); 821da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 831da177e4SLinus Torvalds } 841da177e4SLinus Torvalds 851da177e4SLinus Torvalds /* 861da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 871da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 881da177e4SLinus Torvalds * if you want to preserve its state. 891da177e4SLinus Torvalds */ 901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 911da177e4SLinus Torvalds { 921da177e4SLinus Torvalds wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 931da177e4SLinus Torvalds } 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds static void 961da177e4SLinus Torvalds __clear_page_buffers(struct page *page) 971da177e4SLinus Torvalds { 981da177e4SLinus Torvalds ClearPagePrivate(page); 994c21e2f2SHugh Dickins set_page_private(page, 0); 1001da177e4SLinus Torvalds page_cache_release(page); 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh) 1041da177e4SLinus Torvalds { 1051da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 1081da177e4SLinus Torvalds bdevname(bh->b_bdev, b), 1091da177e4SLinus Torvalds (unsigned long long)bh->b_blocknr); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds 1121da177e4SLinus Torvalds /* 11368671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after 11468671f35SDmitry Monakhov * unlocking it. 11568671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 11668671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for 11768671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh 11868671f35SDmitry Monakhov * itself. 1191da177e4SLinus Torvalds */ 12068671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 1211da177e4SLinus Torvalds { 1221da177e4SLinus Torvalds if (uptodate) { 1231da177e4SLinus Torvalds set_buffer_uptodate(bh); 1241da177e4SLinus Torvalds } else { 1251da177e4SLinus Torvalds /* This happens, due to failed READA attempts. */ 1261da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds unlock_buffer(bh); 12968671f35SDmitry Monakhov } 13068671f35SDmitry Monakhov 13168671f35SDmitry Monakhov /* 13268671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and 13368671f35SDmitry Monakhov * unlock the buffer. This is what ll_rw_block uses too. 13468671f35SDmitry Monakhov */ 13568671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 13668671f35SDmitry Monakhov { 13768671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 1381da177e4SLinus Torvalds put_bh(bh); 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1421da177e4SLinus Torvalds { 1431da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds if (uptodate) { 1461da177e4SLinus Torvalds set_buffer_uptodate(bh); 1471da177e4SLinus Torvalds } else { 1481da177e4SLinus Torvalds if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 1491da177e4SLinus Torvalds buffer_io_error(bh); 1501da177e4SLinus Torvalds printk(KERN_WARNING "lost page write due to " 1511da177e4SLinus Torvalds "I/O error on %s\n", 1521da177e4SLinus Torvalds bdevname(bh->b_bdev, b)); 1531da177e4SLinus Torvalds } 1541da177e4SLinus Torvalds set_buffer_write_io_error(bh); 1551da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds unlock_buffer(bh); 1581da177e4SLinus Torvalds put_bh(bh); 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds 1611da177e4SLinus Torvalds /* 1621da177e4SLinus Torvalds * Write out and wait upon all the dirty data associated with a block 1631da177e4SLinus Torvalds * device via its mapping. Does not take the superblock lock. 1641da177e4SLinus Torvalds */ 1651da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev) 1661da177e4SLinus Torvalds { 1671da177e4SLinus Torvalds int ret = 0; 1681da177e4SLinus Torvalds 16928fd1298SOGAWA Hirofumi if (bdev) 17028fd1298SOGAWA Hirofumi ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); 1711da177e4SLinus Torvalds return ret; 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev); 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds /* 1761da177e4SLinus Torvalds * Write out and wait upon all dirty data associated with this 1771da177e4SLinus Torvalds * device. Filesystem data as well as the underlying block 1781da177e4SLinus Torvalds * device. Takes the superblock lock. 1791da177e4SLinus Torvalds */ 1801da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev) 1811da177e4SLinus Torvalds { 1821da177e4SLinus Torvalds struct super_block *sb = get_super(bdev); 1831da177e4SLinus Torvalds if (sb) { 1841da177e4SLinus Torvalds int res = fsync_super(sb); 1851da177e4SLinus Torvalds drop_super(sb); 1861da177e4SLinus Torvalds return res; 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds return sync_blockdev(bdev); 1891da177e4SLinus Torvalds } 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds /** 1921da177e4SLinus Torvalds * freeze_bdev -- lock a filesystem and force it into a consistent state 1931da177e4SLinus Torvalds * @bdev: blockdevice to lock 1941da177e4SLinus Torvalds * 195f73ca1b7SDavid Chinner * This takes the block device bd_mount_sem to make sure no new mounts 1961da177e4SLinus Torvalds * happen on bdev until thaw_bdev() is called. 1971da177e4SLinus Torvalds * If a superblock is found on this device, we take the s_umount semaphore 1981da177e4SLinus Torvalds * on it to make sure nobody unmounts until the snapshot creation is done. 1991da177e4SLinus Torvalds */ 2001da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev) 2011da177e4SLinus Torvalds { 2021da177e4SLinus Torvalds struct super_block *sb; 2031da177e4SLinus Torvalds 204f73ca1b7SDavid Chinner down(&bdev->bd_mount_sem); 2051da177e4SLinus Torvalds sb = get_super(bdev); 2061da177e4SLinus Torvalds if (sb && !(sb->s_flags & MS_RDONLY)) { 2071da177e4SLinus Torvalds sb->s_frozen = SB_FREEZE_WRITE; 208d59dd462Sakpm@osdl.org smp_wmb(); 2091da177e4SLinus Torvalds 210d25b9a1fSOGAWA Hirofumi __fsync_super(sb); 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds sb->s_frozen = SB_FREEZE_TRANS; 213d59dd462Sakpm@osdl.org smp_wmb(); 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds sync_blockdev(sb->s_bdev); 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds if (sb->s_op->write_super_lockfs) 2181da177e4SLinus Torvalds sb->s_op->write_super_lockfs(sb); 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds sync_blockdev(bdev); 2221da177e4SLinus Torvalds return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev); 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds /** 2271da177e4SLinus Torvalds * thaw_bdev -- unlock filesystem 2281da177e4SLinus Torvalds * @bdev: blockdevice to unlock 2291da177e4SLinus Torvalds * @sb: associated superblock 2301da177e4SLinus Torvalds * 2311da177e4SLinus Torvalds * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 2321da177e4SLinus Torvalds */ 2331da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb) 2341da177e4SLinus Torvalds { 2351da177e4SLinus Torvalds if (sb) { 2361da177e4SLinus Torvalds BUG_ON(sb->s_bdev != bdev); 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds if (sb->s_op->unlockfs) 2391da177e4SLinus Torvalds sb->s_op->unlockfs(sb); 2401da177e4SLinus Torvalds sb->s_frozen = SB_UNFROZEN; 241d59dd462Sakpm@osdl.org smp_wmb(); 2421da177e4SLinus Torvalds wake_up(&sb->s_wait_unfrozen); 2431da177e4SLinus Torvalds drop_super(sb); 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds 246f73ca1b7SDavid Chinner up(&bdev->bd_mount_sem); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev); 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* 2511da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 2521da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 2531da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 2541da177e4SLinus Torvalds * private_lock. 2551da177e4SLinus Torvalds * 2561da177e4SLinus Torvalds * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 2571da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 2581da177e4SLinus Torvalds * succeeds, there is no need to take private_lock. (But if 2591da177e4SLinus Torvalds * private_lock is contended then so is mapping->tree_lock). 2601da177e4SLinus Torvalds */ 2611da177e4SLinus Torvalds static struct buffer_head * 262385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 2631da177e4SLinus Torvalds { 2641da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 2651da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 2661da177e4SLinus Torvalds struct buffer_head *ret = NULL; 2671da177e4SLinus Torvalds pgoff_t index; 2681da177e4SLinus Torvalds struct buffer_head *bh; 2691da177e4SLinus Torvalds struct buffer_head *head; 2701da177e4SLinus Torvalds struct page *page; 2711da177e4SLinus Torvalds int all_mapped = 1; 2721da177e4SLinus Torvalds 2731da177e4SLinus Torvalds index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 2741da177e4SLinus Torvalds page = find_get_page(bd_mapping, index); 2751da177e4SLinus Torvalds if (!page) 2761da177e4SLinus Torvalds goto out; 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock); 2791da177e4SLinus Torvalds if (!page_has_buffers(page)) 2801da177e4SLinus Torvalds goto out_unlock; 2811da177e4SLinus Torvalds head = page_buffers(page); 2821da177e4SLinus Torvalds bh = head; 2831da177e4SLinus Torvalds do { 2841da177e4SLinus Torvalds if (bh->b_blocknr == block) { 2851da177e4SLinus Torvalds ret = bh; 2861da177e4SLinus Torvalds get_bh(bh); 2871da177e4SLinus Torvalds goto out_unlock; 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds if (!buffer_mapped(bh)) 2901da177e4SLinus Torvalds all_mapped = 0; 2911da177e4SLinus Torvalds bh = bh->b_this_page; 2921da177e4SLinus Torvalds } while (bh != head); 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2951da177e4SLinus Torvalds * not mapped. This is due to various races between 2961da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2971da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2981da177e4SLinus Torvalds */ 2991da177e4SLinus Torvalds if (all_mapped) { 3001da177e4SLinus Torvalds printk("__find_get_block_slow() failed. " 3011da177e4SLinus Torvalds "block=%llu, b_blocknr=%llu\n", 302205f87f6SBadari Pulavarty (unsigned long long)block, 303205f87f6SBadari Pulavarty (unsigned long long)bh->b_blocknr); 304205f87f6SBadari Pulavarty printk("b_state=0x%08lx, b_size=%zu\n", 305205f87f6SBadari Pulavarty bh->b_state, bh->b_size); 3061da177e4SLinus Torvalds printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds out_unlock: 3091da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock); 3101da177e4SLinus Torvalds page_cache_release(page); 3111da177e4SLinus Torvalds out: 3121da177e4SLinus Torvalds return ret; 3131da177e4SLinus Torvalds } 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind 3161da177e4SLinus Torvalds of fs corruption is going on. Trashing dirty data always imply losing 3171da177e4SLinus Torvalds information that was supposed to be just stored on the physical layer 3181da177e4SLinus Torvalds by the user. 3191da177e4SLinus Torvalds 3201da177e4SLinus Torvalds Thus invalidate_buffers in general usage is not allwowed to trash 3211da177e4SLinus Torvalds dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to 3221da177e4SLinus Torvalds be preserved. These buffers are simply skipped. 3231da177e4SLinus Torvalds 3241da177e4SLinus Torvalds We also skip buffers which are still in use. For example this can 3251da177e4SLinus Torvalds happen if a userspace program is reading the block device. 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds NOTE: In the case where the user removed a removable-media-disk even if 3281da177e4SLinus Torvalds there's still dirty data not synced on disk (due a bug in the device driver 3291da177e4SLinus Torvalds or due an error of the user), by not destroying the dirty buffers we could 3301da177e4SLinus Torvalds generate corruption also on the next media inserted, thus a parameter is 3311da177e4SLinus Torvalds necessary to handle this case in the most safe way possible (trying 3321da177e4SLinus Torvalds to not corrupt also the new disk inserted with the data belonging to 3331da177e4SLinus Torvalds the old now corrupted disk). Also for the ramdisk the natural thing 3341da177e4SLinus Torvalds to do in order to release the ramdisk memory is to destroy dirty buffers. 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds These are two special cases. Normal usage imply the device driver 3371da177e4SLinus Torvalds to issue a sync on the device (without waiting I/O completion) and 3381da177e4SLinus Torvalds then an invalidate_buffers call that doesn't trash dirty buffers. 3391da177e4SLinus Torvalds 3401da177e4SLinus Torvalds For handling cache coherency with the blkdev pagecache the 'update' case 3411da177e4SLinus Torvalds is been introduced. It is needed to re-read from disk any pinned 3421da177e4SLinus Torvalds buffer. NOTE: re-reading from disk is destructive so we can do it only 3431da177e4SLinus Torvalds when we assume nobody is changing the buffercache under our I/O and when 3441da177e4SLinus Torvalds we think the disk contains more recent information than the buffercache. 3451da177e4SLinus Torvalds The update == 1 pass marks the buffers we need to update, the update == 2 3461da177e4SLinus Torvalds pass does the actual I/O. */ 347f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev) 3481da177e4SLinus Torvalds { 3490e1dfc66SAndrew Morton struct address_space *mapping = bdev->bd_inode->i_mapping; 3500e1dfc66SAndrew Morton 3510e1dfc66SAndrew Morton if (mapping->nrpages == 0) 3520e1dfc66SAndrew Morton return; 3530e1dfc66SAndrew Morton 3541da177e4SLinus Torvalds invalidate_bh_lrus(); 355fc0ecff6SAndrew Morton invalidate_mapping_pages(mapping, 0, -1); 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds /* 3591da177e4SLinus Torvalds * Kick pdflush then try to free up some ZONE_NORMAL memory. 3601da177e4SLinus Torvalds */ 3611da177e4SLinus Torvalds static void free_more_memory(void) 3621da177e4SLinus Torvalds { 363dac1d27bSMel Gorman struct zonelist *zonelist; 364*0e88460dSMel Gorman int nid; 3651da177e4SLinus Torvalds 366687a21ceSPekka J Enberg wakeup_pdflush(1024); 3671da177e4SLinus Torvalds yield(); 3681da177e4SLinus Torvalds 369*0e88460dSMel Gorman for_each_online_node(nid) { 370*0e88460dSMel Gorman zonelist = node_zonelist(nid, GFP_NOFS); 371dac1d27bSMel Gorman if (zonelist->zones[0]) 372dac1d27bSMel Gorman try_to_free_pages(zonelist, 0, GFP_NOFS); 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 3761da177e4SLinus Torvalds /* 3771da177e4SLinus Torvalds * I/O completion handler for block_read_full_page() - pages 3781da177e4SLinus Torvalds * which come unlocked at the end of I/O. 3791da177e4SLinus Torvalds */ 3801da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 3811da177e4SLinus Torvalds { 3821da177e4SLinus Torvalds unsigned long flags; 383a3972203SNick Piggin struct buffer_head *first; 3841da177e4SLinus Torvalds struct buffer_head *tmp; 3851da177e4SLinus Torvalds struct page *page; 3861da177e4SLinus Torvalds int page_uptodate = 1; 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds page = bh->b_page; 3911da177e4SLinus Torvalds if (uptodate) { 3921da177e4SLinus Torvalds set_buffer_uptodate(bh); 3931da177e4SLinus Torvalds } else { 3941da177e4SLinus Torvalds clear_buffer_uptodate(bh); 3951da177e4SLinus Torvalds if (printk_ratelimit()) 3961da177e4SLinus Torvalds buffer_io_error(bh); 3971da177e4SLinus Torvalds SetPageError(page); 3981da177e4SLinus Torvalds } 3991da177e4SLinus Torvalds 4001da177e4SLinus Torvalds /* 4011da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 4021da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 4031da177e4SLinus Torvalds * decide that the page is now completely done. 4041da177e4SLinus Torvalds */ 405a3972203SNick Piggin first = page_buffers(page); 406a3972203SNick Piggin local_irq_save(flags); 407a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 4081da177e4SLinus Torvalds clear_buffer_async_read(bh); 4091da177e4SLinus Torvalds unlock_buffer(bh); 4101da177e4SLinus Torvalds tmp = bh; 4111da177e4SLinus Torvalds do { 4121da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 4131da177e4SLinus Torvalds page_uptodate = 0; 4141da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 4151da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4161da177e4SLinus Torvalds goto still_busy; 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds tmp = tmp->b_this_page; 4191da177e4SLinus Torvalds } while (tmp != bh); 420a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 421a3972203SNick Piggin local_irq_restore(flags); 4221da177e4SLinus Torvalds 4231da177e4SLinus Torvalds /* 4241da177e4SLinus Torvalds * If none of the buffers had errors and they are all 4251da177e4SLinus Torvalds * uptodate then we can set the page uptodate. 4261da177e4SLinus Torvalds */ 4271da177e4SLinus Torvalds if (page_uptodate && !PageError(page)) 4281da177e4SLinus Torvalds SetPageUptodate(page); 4291da177e4SLinus Torvalds unlock_page(page); 4301da177e4SLinus Torvalds return; 4311da177e4SLinus Torvalds 4321da177e4SLinus Torvalds still_busy: 433a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 434a3972203SNick Piggin local_irq_restore(flags); 4351da177e4SLinus Torvalds return; 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds 4381da177e4SLinus Torvalds /* 4391da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked 4401da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion. 4411da177e4SLinus Torvalds */ 442b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 4431da177e4SLinus Torvalds { 4441da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 4451da177e4SLinus Torvalds unsigned long flags; 446a3972203SNick Piggin struct buffer_head *first; 4471da177e4SLinus Torvalds struct buffer_head *tmp; 4481da177e4SLinus Torvalds struct page *page; 4491da177e4SLinus Torvalds 4501da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds page = bh->b_page; 4531da177e4SLinus Torvalds if (uptodate) { 4541da177e4SLinus Torvalds set_buffer_uptodate(bh); 4551da177e4SLinus Torvalds } else { 4561da177e4SLinus Torvalds if (printk_ratelimit()) { 4571da177e4SLinus Torvalds buffer_io_error(bh); 4581da177e4SLinus Torvalds printk(KERN_WARNING "lost page write due to " 4591da177e4SLinus Torvalds "I/O error on %s\n", 4601da177e4SLinus Torvalds bdevname(bh->b_bdev, b)); 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds set_bit(AS_EIO, &page->mapping->flags); 46358ff407bSJan Kara set_buffer_write_io_error(bh); 4641da177e4SLinus Torvalds clear_buffer_uptodate(bh); 4651da177e4SLinus Torvalds SetPageError(page); 4661da177e4SLinus Torvalds } 4671da177e4SLinus Torvalds 468a3972203SNick Piggin first = page_buffers(page); 469a3972203SNick Piggin local_irq_save(flags); 470a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 471a3972203SNick Piggin 4721da177e4SLinus Torvalds clear_buffer_async_write(bh); 4731da177e4SLinus Torvalds unlock_buffer(bh); 4741da177e4SLinus Torvalds tmp = bh->b_this_page; 4751da177e4SLinus Torvalds while (tmp != bh) { 4761da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 4771da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4781da177e4SLinus Torvalds goto still_busy; 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds tmp = tmp->b_this_page; 4811da177e4SLinus Torvalds } 482a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 483a3972203SNick Piggin local_irq_restore(flags); 4841da177e4SLinus Torvalds end_page_writeback(page); 4851da177e4SLinus Torvalds return; 4861da177e4SLinus Torvalds 4871da177e4SLinus Torvalds still_busy: 488a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 489a3972203SNick Piggin local_irq_restore(flags); 4901da177e4SLinus Torvalds return; 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds /* 4941da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 4951da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 4961da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 4971da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 4981da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 4991da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 5001da177e4SLinus Torvalds * that this buffer is not under async I/O. 5011da177e4SLinus Torvalds * 5021da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 5031da177e4SLinus Torvalds * left. 5041da177e4SLinus Torvalds * 5051da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 5061da177e4SLinus Torvalds * the buffers. 5071da177e4SLinus Torvalds * 5081da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 5091da177e4SLinus Torvalds * page. 5101da177e4SLinus Torvalds * 5111da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 5121da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 5131da177e4SLinus Torvalds */ 5141da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 5151da177e4SLinus Torvalds { 5161da177e4SLinus Torvalds bh->b_end_io = end_buffer_async_read; 5171da177e4SLinus Torvalds set_buffer_async_read(bh); 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds 5201da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 5211da177e4SLinus Torvalds { 5221da177e4SLinus Torvalds bh->b_end_io = end_buffer_async_write; 5231da177e4SLinus Torvalds set_buffer_async_write(bh); 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds /* 5291da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 5301da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 5311da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 5321da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 5331da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 5341da177e4SLinus Torvalds * 5351da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 5361da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 5371da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list. 5381da177e4SLinus Torvalds * 5391da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 5401da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 5411da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 5421da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 5431da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space 5441da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 5451da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 5461da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact, 5471da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's 5481da177e4SLinus Torvalds * ->private_lock. 5491da177e4SLinus Torvalds * 5501da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 5511da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's. 5521da177e4SLinus Torvalds * 5531da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these 5541da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for 5551da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list) 5561da177e4SLinus Torvalds * be true at clear_inode() time. 5571da177e4SLinus Torvalds * 5581da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 5591da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 5601da177e4SLinus Torvalds * BUG_ON(!list_empty). 5611da177e4SLinus Torvalds * 5621da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 5631da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 5641da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 5651da177e4SLinus Torvalds * queued up. 5661da177e4SLinus Torvalds * 5671da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 5681da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 5691da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 5701da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 5711da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 5721da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 5731da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 5741da177e4SLinus Torvalds * b_inode back. 5751da177e4SLinus Torvalds */ 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds /* 5781da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held 5791da177e4SLinus Torvalds */ 5801da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh) 5811da177e4SLinus Torvalds { 5821da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 58358ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 58458ff407bSJan Kara if (buffer_write_io_error(bh)) 58558ff407bSJan Kara set_bit(AS_EIO, &bh->b_assoc_map->flags); 58658ff407bSJan Kara bh->b_assoc_map = NULL; 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 5901da177e4SLinus Torvalds { 5911da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds 5941da177e4SLinus Torvalds /* 5951da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 5961da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 5971da177e4SLinus Torvalds * writes to the disk. 5981da177e4SLinus Torvalds * 5991da177e4SLinus Torvalds * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 6001da177e4SLinus Torvalds * you dirty the buffers, and then use osync_inode_buffers to wait for 6011da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 6021da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 6031da177e4SLinus Torvalds */ 6041da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 6051da177e4SLinus Torvalds { 6061da177e4SLinus Torvalds struct buffer_head *bh; 6071da177e4SLinus Torvalds struct list_head *p; 6081da177e4SLinus Torvalds int err = 0; 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds spin_lock(lock); 6111da177e4SLinus Torvalds repeat: 6121da177e4SLinus Torvalds list_for_each_prev(p, list) { 6131da177e4SLinus Torvalds bh = BH_ENTRY(p); 6141da177e4SLinus Torvalds if (buffer_locked(bh)) { 6151da177e4SLinus Torvalds get_bh(bh); 6161da177e4SLinus Torvalds spin_unlock(lock); 6171da177e4SLinus Torvalds wait_on_buffer(bh); 6181da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 6191da177e4SLinus Torvalds err = -EIO; 6201da177e4SLinus Torvalds brelse(bh); 6211da177e4SLinus Torvalds spin_lock(lock); 6221da177e4SLinus Torvalds goto repeat; 6231da177e4SLinus Torvalds } 6241da177e4SLinus Torvalds } 6251da177e4SLinus Torvalds spin_unlock(lock); 6261da177e4SLinus Torvalds return err; 6271da177e4SLinus Torvalds } 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds /** 63078a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 63167be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 6321da177e4SLinus Torvalds * 6331da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon 6341da177e4SLinus Torvalds * that I/O. 6351da177e4SLinus Torvalds * 63667be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 63767be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 63867be2dd1SMartin Waitz * a successful fsync(). 6391da177e4SLinus Torvalds */ 6401da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 6411da177e4SLinus Torvalds { 6421da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 6451da177e4SLinus Torvalds return 0; 6461da177e4SLinus Torvalds 6471da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock, 6481da177e4SLinus Torvalds &mapping->private_list); 6491da177e4SLinus Torvalds } 6501da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 6511da177e4SLinus Torvalds 6521da177e4SLinus Torvalds /* 6531da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 6541da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 6551da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 6561da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 6571da177e4SLinus Torvalds */ 6581da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 6591da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 6601da177e4SLinus Torvalds { 6611da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 6621da177e4SLinus Torvalds if (bh) { 6631da177e4SLinus Torvalds if (buffer_dirty(bh)) 6641da177e4SLinus Torvalds ll_rw_block(WRITE, 1, &bh); 6651da177e4SLinus Torvalds put_bh(bh); 6661da177e4SLinus Torvalds } 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 6701da177e4SLinus Torvalds { 6711da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 6721da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds mark_buffer_dirty(bh); 6751da177e4SLinus Torvalds if (!mapping->assoc_mapping) { 6761da177e4SLinus Torvalds mapping->assoc_mapping = buffer_mapping; 6771da177e4SLinus Torvalds } else { 678e827f923SEric Sesterhenn BUG_ON(mapping->assoc_mapping != buffer_mapping); 6791da177e4SLinus Torvalds } 680535ee2fbSJan Kara if (!bh->b_assoc_map) { 6811da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 6821da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 6831da177e4SLinus Torvalds &mapping->private_list); 68458ff407bSJan Kara bh->b_assoc_map = mapping; 6851da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds } 6881da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds /* 691787d2214SNick Piggin * Mark the page dirty, and set it dirty in the radix tree, and mark the inode 692787d2214SNick Piggin * dirty. 693787d2214SNick Piggin * 694787d2214SNick Piggin * If warn is true, then emit a warning if the page is not uptodate and has 695787d2214SNick Piggin * not been truncated. 696787d2214SNick Piggin */ 697787d2214SNick Piggin static int __set_page_dirty(struct page *page, 698787d2214SNick Piggin struct address_space *mapping, int warn) 699787d2214SNick Piggin { 700787d2214SNick Piggin if (unlikely(!mapping)) 701787d2214SNick Piggin return !TestSetPageDirty(page); 702787d2214SNick Piggin 703787d2214SNick Piggin if (TestSetPageDirty(page)) 704787d2214SNick Piggin return 0; 705787d2214SNick Piggin 706787d2214SNick Piggin write_lock_irq(&mapping->tree_lock); 707787d2214SNick Piggin if (page->mapping) { /* Race with truncate? */ 708787d2214SNick Piggin WARN_ON_ONCE(warn && !PageUptodate(page)); 709787d2214SNick Piggin 710787d2214SNick Piggin if (mapping_cap_account_dirty(mapping)) { 711787d2214SNick Piggin __inc_zone_page_state(page, NR_FILE_DIRTY); 712c9e51e41SPeter Zijlstra __inc_bdi_stat(mapping->backing_dev_info, 713c9e51e41SPeter Zijlstra BDI_RECLAIMABLE); 714787d2214SNick Piggin task_io_account_write(PAGE_CACHE_SIZE); 715787d2214SNick Piggin } 716787d2214SNick Piggin radix_tree_tag_set(&mapping->page_tree, 717787d2214SNick Piggin page_index(page), PAGECACHE_TAG_DIRTY); 718787d2214SNick Piggin } 719787d2214SNick Piggin write_unlock_irq(&mapping->tree_lock); 720787d2214SNick Piggin __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 721787d2214SNick Piggin 722787d2214SNick Piggin return 1; 723787d2214SNick Piggin } 724787d2214SNick Piggin 725787d2214SNick Piggin /* 7261da177e4SLinus Torvalds * Add a page to the dirty page list. 7271da177e4SLinus Torvalds * 7281da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places 7291da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep. 7301da177e4SLinus Torvalds * 7311da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve 7321da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does 7331da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set 7341da177e4SLinus Torvalds * dirty. 7351da177e4SLinus Torvalds * 7361da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race 7371da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the 7381da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty 7391da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty 7401da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 7411da177e4SLinus Torvalds * page on the dirty page list. 7421da177e4SLinus Torvalds * 7431da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the 7441da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being 7451da177e4SLinus Torvalds * added to the page after it was set dirty. 7461da177e4SLinus Torvalds * 7471da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the 7481da177e4SLinus Torvalds * address_space though. 7491da177e4SLinus Torvalds */ 7501da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page) 7511da177e4SLinus Torvalds { 752787d2214SNick Piggin struct address_space *mapping = page_mapping(page); 753ebf7a227SNick Piggin 754ebf7a227SNick Piggin if (unlikely(!mapping)) 755ebf7a227SNick Piggin return !TestSetPageDirty(page); 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 7581da177e4SLinus Torvalds if (page_has_buffers(page)) { 7591da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 7601da177e4SLinus Torvalds struct buffer_head *bh = head; 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds do { 7631da177e4SLinus Torvalds set_buffer_dirty(bh); 7641da177e4SLinus Torvalds bh = bh->b_this_page; 7651da177e4SLinus Torvalds } while (bh != head); 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds spin_unlock(&mapping->private_lock); 7681da177e4SLinus Torvalds 769787d2214SNick Piggin return __set_page_dirty(page, mapping, 1); 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers); 7721da177e4SLinus Torvalds 7731da177e4SLinus Torvalds /* 7741da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 7751da177e4SLinus Torvalds * 7761da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 7771da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 7781da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 7791da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 7801da177e4SLinus Torvalds * 7811da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 7821da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 7831da177e4SLinus Torvalds * up, waiting for those writes to complete. 7841da177e4SLinus Torvalds * 7851da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 7861da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 7871da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 7881da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 7891da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 7901da177e4SLinus Torvalds * any newly dirty buffers for write. 7911da177e4SLinus Torvalds */ 7921da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 7931da177e4SLinus Torvalds { 7941da177e4SLinus Torvalds struct buffer_head *bh; 7951da177e4SLinus Torvalds struct list_head tmp; 796535ee2fbSJan Kara struct address_space *mapping; 7971da177e4SLinus Torvalds int err = 0, err2; 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 8001da177e4SLinus Torvalds 8011da177e4SLinus Torvalds spin_lock(lock); 8021da177e4SLinus Torvalds while (!list_empty(list)) { 8031da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 804535ee2fbSJan Kara mapping = bh->b_assoc_map; 80558ff407bSJan Kara __remove_assoc_queue(bh); 806535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 807535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 808535ee2fbSJan Kara smp_mb(); 8091da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 8101da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 811535ee2fbSJan Kara bh->b_assoc_map = mapping; 8121da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8131da177e4SLinus Torvalds get_bh(bh); 8141da177e4SLinus Torvalds spin_unlock(lock); 8151da177e4SLinus Torvalds /* 8161da177e4SLinus Torvalds * Ensure any pending I/O completes so that 8171da177e4SLinus Torvalds * ll_rw_block() actually writes the current 8181da177e4SLinus Torvalds * contents - it is a noop if I/O is still in 8191da177e4SLinus Torvalds * flight on potentially older contents. 8201da177e4SLinus Torvalds */ 821a7662236SJan Kara ll_rw_block(SWRITE, 1, &bh); 8221da177e4SLinus Torvalds brelse(bh); 8231da177e4SLinus Torvalds spin_lock(lock); 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds } 8271da177e4SLinus Torvalds 8281da177e4SLinus Torvalds while (!list_empty(&tmp)) { 8291da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 8301da177e4SLinus Torvalds get_bh(bh); 831535ee2fbSJan Kara mapping = bh->b_assoc_map; 832535ee2fbSJan Kara __remove_assoc_queue(bh); 833535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 834535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 835535ee2fbSJan Kara smp_mb(); 836535ee2fbSJan Kara if (buffer_dirty(bh)) { 837535ee2fbSJan Kara list_add(&bh->b_assoc_buffers, 838e3892296SJan Kara &mapping->private_list); 839535ee2fbSJan Kara bh->b_assoc_map = mapping; 840535ee2fbSJan Kara } 8411da177e4SLinus Torvalds spin_unlock(lock); 8421da177e4SLinus Torvalds wait_on_buffer(bh); 8431da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 8441da177e4SLinus Torvalds err = -EIO; 8451da177e4SLinus Torvalds brelse(bh); 8461da177e4SLinus Torvalds spin_lock(lock); 8471da177e4SLinus Torvalds } 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds spin_unlock(lock); 8501da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 8511da177e4SLinus Torvalds if (err) 8521da177e4SLinus Torvalds return err; 8531da177e4SLinus Torvalds else 8541da177e4SLinus Torvalds return err2; 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds 8571da177e4SLinus Torvalds /* 8581da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 8591da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 8601da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 8611da177e4SLinus Torvalds * 8621da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which 8631da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 8641da177e4SLinus Torvalds * for reiserfs. 8651da177e4SLinus Torvalds */ 8661da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 8671da177e4SLinus Torvalds { 8681da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8691da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8701da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 8711da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8741da177e4SLinus Torvalds while (!list_empty(list)) 8751da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 8761da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8771da177e4SLinus Torvalds } 8781da177e4SLinus Torvalds } 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds /* 8811da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 8821da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 8831da177e4SLinus Torvalds * 8841da177e4SLinus Torvalds * Returns true if all buffers were removed. 8851da177e4SLinus Torvalds */ 8861da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 8871da177e4SLinus Torvalds { 8881da177e4SLinus Torvalds int ret = 1; 8891da177e4SLinus Torvalds 8901da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8911da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8921da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 8931da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 8941da177e4SLinus Torvalds 8951da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8961da177e4SLinus Torvalds while (!list_empty(list)) { 8971da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 8981da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8991da177e4SLinus Torvalds ret = 0; 9001da177e4SLinus Torvalds break; 9011da177e4SLinus Torvalds } 9021da177e4SLinus Torvalds __remove_assoc_queue(bh); 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 9051da177e4SLinus Torvalds } 9061da177e4SLinus Torvalds return ret; 9071da177e4SLinus Torvalds } 9081da177e4SLinus Torvalds 9091da177e4SLinus Torvalds /* 9101da177e4SLinus Torvalds * Create the appropriate buffers when given a page for data area and 9111da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 9121da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 9131da177e4SLinus Torvalds * buffers. 9141da177e4SLinus Torvalds * 9151da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 9161da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 9171da177e4SLinus Torvalds */ 9181da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 9191da177e4SLinus Torvalds int retry) 9201da177e4SLinus Torvalds { 9211da177e4SLinus Torvalds struct buffer_head *bh, *head; 9221da177e4SLinus Torvalds long offset; 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds try_again: 9251da177e4SLinus Torvalds head = NULL; 9261da177e4SLinus Torvalds offset = PAGE_SIZE; 9271da177e4SLinus Torvalds while ((offset -= size) >= 0) { 9281da177e4SLinus Torvalds bh = alloc_buffer_head(GFP_NOFS); 9291da177e4SLinus Torvalds if (!bh) 9301da177e4SLinus Torvalds goto no_grow; 9311da177e4SLinus Torvalds 9321da177e4SLinus Torvalds bh->b_bdev = NULL; 9331da177e4SLinus Torvalds bh->b_this_page = head; 9341da177e4SLinus Torvalds bh->b_blocknr = -1; 9351da177e4SLinus Torvalds head = bh; 9361da177e4SLinus Torvalds 9371da177e4SLinus Torvalds bh->b_state = 0; 9381da177e4SLinus Torvalds atomic_set(&bh->b_count, 0); 939fc5cd582SChris Mason bh->b_private = NULL; 9401da177e4SLinus Torvalds bh->b_size = size; 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /* Link the buffer to its page */ 9431da177e4SLinus Torvalds set_bh_page(bh, page, offset); 9441da177e4SLinus Torvalds 94501ffe339SNathan Scott init_buffer(bh, NULL, NULL); 9461da177e4SLinus Torvalds } 9471da177e4SLinus Torvalds return head; 9481da177e4SLinus Torvalds /* 9491da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 9501da177e4SLinus Torvalds */ 9511da177e4SLinus Torvalds no_grow: 9521da177e4SLinus Torvalds if (head) { 9531da177e4SLinus Torvalds do { 9541da177e4SLinus Torvalds bh = head; 9551da177e4SLinus Torvalds head = head->b_this_page; 9561da177e4SLinus Torvalds free_buffer_head(bh); 9571da177e4SLinus Torvalds } while (head); 9581da177e4SLinus Torvalds } 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds /* 9611da177e4SLinus Torvalds * Return failure for non-async IO requests. Async IO requests 9621da177e4SLinus Torvalds * are not allowed to fail, so we have to wait until buffer heads 9631da177e4SLinus Torvalds * become available. But we don't want tasks sleeping with 9641da177e4SLinus Torvalds * partially complete buffers, so all were released above. 9651da177e4SLinus Torvalds */ 9661da177e4SLinus Torvalds if (!retry) 9671da177e4SLinus Torvalds return NULL; 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds /* We're _really_ low on memory. Now we just 9701da177e4SLinus Torvalds * wait for old buffer heads to become free due to 9711da177e4SLinus Torvalds * finishing IO. Since this is an async request and 9721da177e4SLinus Torvalds * the reserve list is empty, we're sure there are 9731da177e4SLinus Torvalds * async buffer heads in use. 9741da177e4SLinus Torvalds */ 9751da177e4SLinus Torvalds free_more_memory(); 9761da177e4SLinus Torvalds goto try_again; 9771da177e4SLinus Torvalds } 9781da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 9791da177e4SLinus Torvalds 9801da177e4SLinus Torvalds static inline void 9811da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head) 9821da177e4SLinus Torvalds { 9831da177e4SLinus Torvalds struct buffer_head *bh, *tail; 9841da177e4SLinus Torvalds 9851da177e4SLinus Torvalds bh = head; 9861da177e4SLinus Torvalds do { 9871da177e4SLinus Torvalds tail = bh; 9881da177e4SLinus Torvalds bh = bh->b_this_page; 9891da177e4SLinus Torvalds } while (bh); 9901da177e4SLinus Torvalds tail->b_this_page = head; 9911da177e4SLinus Torvalds attach_page_buffers(page, head); 9921da177e4SLinus Torvalds } 9931da177e4SLinus Torvalds 9941da177e4SLinus Torvalds /* 9951da177e4SLinus Torvalds * Initialise the state of a blockdev page's buffers. 9961da177e4SLinus Torvalds */ 9971da177e4SLinus Torvalds static void 9981da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev, 9991da177e4SLinus Torvalds sector_t block, int size) 10001da177e4SLinus Torvalds { 10011da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 10021da177e4SLinus Torvalds struct buffer_head *bh = head; 10031da177e4SLinus Torvalds int uptodate = PageUptodate(page); 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds do { 10061da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 10071da177e4SLinus Torvalds init_buffer(bh, NULL, NULL); 10081da177e4SLinus Torvalds bh->b_bdev = bdev; 10091da177e4SLinus Torvalds bh->b_blocknr = block; 10101da177e4SLinus Torvalds if (uptodate) 10111da177e4SLinus Torvalds set_buffer_uptodate(bh); 10121da177e4SLinus Torvalds set_buffer_mapped(bh); 10131da177e4SLinus Torvalds } 10141da177e4SLinus Torvalds block++; 10151da177e4SLinus Torvalds bh = bh->b_this_page; 10161da177e4SLinus Torvalds } while (bh != head); 10171da177e4SLinus Torvalds } 10181da177e4SLinus Torvalds 10191da177e4SLinus Torvalds /* 10201da177e4SLinus Torvalds * Create the page-cache page that contains the requested block. 10211da177e4SLinus Torvalds * 10221da177e4SLinus Torvalds * This is user purely for blockdev mappings. 10231da177e4SLinus Torvalds */ 10241da177e4SLinus Torvalds static struct page * 10251da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block, 10261da177e4SLinus Torvalds pgoff_t index, int size) 10271da177e4SLinus Torvalds { 10281da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 10291da177e4SLinus Torvalds struct page *page; 10301da177e4SLinus Torvalds struct buffer_head *bh; 10311da177e4SLinus Torvalds 1032ea125892SChristoph Lameter page = find_or_create_page(inode->i_mapping, index, 1033769848c0SMel Gorman (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 10341da177e4SLinus Torvalds if (!page) 10351da177e4SLinus Torvalds return NULL; 10361da177e4SLinus Torvalds 1037e827f923SEric Sesterhenn BUG_ON(!PageLocked(page)); 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds if (page_has_buffers(page)) { 10401da177e4SLinus Torvalds bh = page_buffers(page); 10411da177e4SLinus Torvalds if (bh->b_size == size) { 10421da177e4SLinus Torvalds init_page_buffers(page, bdev, block, size); 10431da177e4SLinus Torvalds return page; 10441da177e4SLinus Torvalds } 10451da177e4SLinus Torvalds if (!try_to_free_buffers(page)) 10461da177e4SLinus Torvalds goto failed; 10471da177e4SLinus Torvalds } 10481da177e4SLinus Torvalds 10491da177e4SLinus Torvalds /* 10501da177e4SLinus Torvalds * Allocate some buffers for this page 10511da177e4SLinus Torvalds */ 10521da177e4SLinus Torvalds bh = alloc_page_buffers(page, size, 0); 10531da177e4SLinus Torvalds if (!bh) 10541da177e4SLinus Torvalds goto failed; 10551da177e4SLinus Torvalds 10561da177e4SLinus Torvalds /* 10571da177e4SLinus Torvalds * Link the page to the buffers and initialise them. Take the 10581da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 10591da177e4SLinus Torvalds * run under the page lock. 10601da177e4SLinus Torvalds */ 10611da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock); 10621da177e4SLinus Torvalds link_dev_buffers(page, bh); 10631da177e4SLinus Torvalds init_page_buffers(page, bdev, block, size); 10641da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock); 10651da177e4SLinus Torvalds return page; 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds failed: 10681da177e4SLinus Torvalds BUG(); 10691da177e4SLinus Torvalds unlock_page(page); 10701da177e4SLinus Torvalds page_cache_release(page); 10711da177e4SLinus Torvalds return NULL; 10721da177e4SLinus Torvalds } 10731da177e4SLinus Torvalds 10741da177e4SLinus Torvalds /* 10751da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If 10761da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also. 10771da177e4SLinus Torvalds */ 1078858119e1SArjan van de Ven static int 10791da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size) 10801da177e4SLinus Torvalds { 10811da177e4SLinus Torvalds struct page *page; 10821da177e4SLinus Torvalds pgoff_t index; 10831da177e4SLinus Torvalds int sizebits; 10841da177e4SLinus Torvalds 10851da177e4SLinus Torvalds sizebits = -1; 10861da177e4SLinus Torvalds do { 10871da177e4SLinus Torvalds sizebits++; 10881da177e4SLinus Torvalds } while ((size << sizebits) < PAGE_SIZE); 10891da177e4SLinus Torvalds 10901da177e4SLinus Torvalds index = block >> sizebits; 10911da177e4SLinus Torvalds 1092e5657933SAndrew Morton /* 1093e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible 1094e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types). 1095e5657933SAndrew Morton */ 1096e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) { 1097e5657933SAndrew Morton char b[BDEVNAME_SIZE]; 1098e5657933SAndrew Morton 1099e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for " 1100e5657933SAndrew Morton "device %s\n", 1101e5657933SAndrew Morton __FUNCTION__, (unsigned long long)block, 1102e5657933SAndrew Morton bdevname(bdev, b)); 1103e5657933SAndrew Morton return -EIO; 1104e5657933SAndrew Morton } 1105e5657933SAndrew Morton block = index << sizebits; 11061da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */ 11071da177e4SLinus Torvalds page = grow_dev_page(bdev, block, index, size); 11081da177e4SLinus Torvalds if (!page) 11091da177e4SLinus Torvalds return 0; 11101da177e4SLinus Torvalds unlock_page(page); 11111da177e4SLinus Torvalds page_cache_release(page); 11121da177e4SLinus Torvalds return 1; 11131da177e4SLinus Torvalds } 11141da177e4SLinus Torvalds 111575c96f85SAdrian Bunk static struct buffer_head * 11161da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size) 11171da177e4SLinus Torvalds { 11181da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 11191da177e4SLinus Torvalds if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 11201da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 11211da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 11221da177e4SLinus Torvalds size); 11231da177e4SLinus Torvalds printk(KERN_ERR "hardsect size: %d\n", 11241da177e4SLinus Torvalds bdev_hardsect_size(bdev)); 11251da177e4SLinus Torvalds 11261da177e4SLinus Torvalds dump_stack(); 11271da177e4SLinus Torvalds return NULL; 11281da177e4SLinus Torvalds } 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds for (;;) { 11311da177e4SLinus Torvalds struct buffer_head * bh; 1132e5657933SAndrew Morton int ret; 11331da177e4SLinus Torvalds 11341da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 11351da177e4SLinus Torvalds if (bh) 11361da177e4SLinus Torvalds return bh; 11371da177e4SLinus Torvalds 1138e5657933SAndrew Morton ret = grow_buffers(bdev, block, size); 1139e5657933SAndrew Morton if (ret < 0) 1140e5657933SAndrew Morton return NULL; 1141e5657933SAndrew Morton if (ret == 0) 11421da177e4SLinus Torvalds free_more_memory(); 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 11461da177e4SLinus Torvalds /* 11471da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 11481da177e4SLinus Torvalds * 11491da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 11501da177e4SLinus Torvalds * the page is tagged dirty in its radix tree. 11511da177e4SLinus Torvalds * 11521da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 11531da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 11541da177e4SLinus Torvalds * merely a hint about the true dirty state. 11551da177e4SLinus Torvalds * 11561da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 11571da177e4SLinus Torvalds * (if the page has buffers). 11581da177e4SLinus Torvalds * 11591da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 11601da177e4SLinus Torvalds * buffers are not. 11611da177e4SLinus Torvalds * 11621da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 11631da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 11641da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 11651da177e4SLinus Torvalds * block_read_full_page() against that page will discover all the uptodate 11661da177e4SLinus Torvalds * buffers, will set the page uptodate and will perform no I/O. 11671da177e4SLinus Torvalds */ 11681da177e4SLinus Torvalds 11691da177e4SLinus Torvalds /** 11701da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 117167be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 11721da177e4SLinus Torvalds * 11731da177e4SLinus Torvalds * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 11741da177e4SLinus Torvalds * backing page dirty, then tag the page as dirty in its address_space's radix 11751da177e4SLinus Torvalds * tree and then attach the address_space's inode to its superblock's dirty 11761da177e4SLinus Torvalds * inode list. 11771da177e4SLinus Torvalds * 11781da177e4SLinus Torvalds * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 11791da177e4SLinus Torvalds * mapping->tree_lock and the global inode_lock. 11801da177e4SLinus Torvalds */ 1181fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh) 11821da177e4SLinus Torvalds { 1183787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh)); 11841be62dc1SLinus Torvalds 11851be62dc1SLinus Torvalds /* 11861be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case. 11871be62dc1SLinus Torvalds * 11881be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we 11891be62dc1SLinus Torvalds * perhaps modified the buffer. 11901be62dc1SLinus Torvalds */ 11911be62dc1SLinus Torvalds if (buffer_dirty(bh)) { 11921be62dc1SLinus Torvalds smp_mb(); 11931be62dc1SLinus Torvalds if (buffer_dirty(bh)) 11941be62dc1SLinus Torvalds return; 11951be62dc1SLinus Torvalds } 11961be62dc1SLinus Torvalds 11971be62dc1SLinus Torvalds if (!test_set_buffer_dirty(bh)) 1198787d2214SNick Piggin __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); 11991da177e4SLinus Torvalds } 12001da177e4SLinus Torvalds 12011da177e4SLinus Torvalds /* 12021da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page 12031da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean 12041da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page 12051da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from 12061da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached). 12071da177e4SLinus Torvalds */ 12081da177e4SLinus Torvalds void __brelse(struct buffer_head * buf) 12091da177e4SLinus Torvalds { 12101da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) { 12111da177e4SLinus Torvalds put_bh(buf); 12121da177e4SLinus Torvalds return; 12131da177e4SLinus Torvalds } 12141da177e4SLinus Torvalds printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 12151da177e4SLinus Torvalds WARN_ON(1); 12161da177e4SLinus Torvalds } 12171da177e4SLinus Torvalds 12181da177e4SLinus Torvalds /* 12191da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any 12201da177e4SLinus Torvalds * potentially dirty data. 12211da177e4SLinus Torvalds */ 12221da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 12231da177e4SLinus Torvalds { 12241da177e4SLinus Torvalds clear_buffer_dirty(bh); 1225535ee2fbSJan Kara if (bh->b_assoc_map) { 12261da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 12271da177e4SLinus Torvalds 12281da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 12291da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 123058ff407bSJan Kara bh->b_assoc_map = NULL; 12311da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 12321da177e4SLinus Torvalds } 12331da177e4SLinus Torvalds __brelse(bh); 12341da177e4SLinus Torvalds } 12351da177e4SLinus Torvalds 12361da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 12371da177e4SLinus Torvalds { 12381da177e4SLinus Torvalds lock_buffer(bh); 12391da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 12401da177e4SLinus Torvalds unlock_buffer(bh); 12411da177e4SLinus Torvalds return bh; 12421da177e4SLinus Torvalds } else { 12431da177e4SLinus Torvalds get_bh(bh); 12441da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 12451da177e4SLinus Torvalds submit_bh(READ, bh); 12461da177e4SLinus Torvalds wait_on_buffer(bh); 12471da177e4SLinus Torvalds if (buffer_uptodate(bh)) 12481da177e4SLinus Torvalds return bh; 12491da177e4SLinus Torvalds } 12501da177e4SLinus Torvalds brelse(bh); 12511da177e4SLinus Torvalds return NULL; 12521da177e4SLinus Torvalds } 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds /* 12551da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 12561da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 12571da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 12581da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 12591da177e4SLinus Torvalds * CPU's LRUs at the same time. 12601da177e4SLinus Torvalds * 12611da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 12621da177e4SLinus Torvalds * sb_find_get_block(). 12631da177e4SLinus Torvalds * 12641da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 12651da177e4SLinus Torvalds * a local interrupt disable for that. 12661da177e4SLinus Torvalds */ 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds #define BH_LRU_SIZE 8 12691da177e4SLinus Torvalds 12701da177e4SLinus Torvalds struct bh_lru { 12711da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12721da177e4SLinus Torvalds }; 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 12751da177e4SLinus Torvalds 12761da177e4SLinus Torvalds #ifdef CONFIG_SMP 12771da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 12781da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 12791da177e4SLinus Torvalds #else 12801da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 12811da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 12821da177e4SLinus Torvalds #endif 12831da177e4SLinus Torvalds 12841da177e4SLinus Torvalds static inline void check_irqs_on(void) 12851da177e4SLinus Torvalds { 12861da177e4SLinus Torvalds #ifdef irqs_disabled 12871da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 12881da177e4SLinus Torvalds #endif 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 12911da177e4SLinus Torvalds /* 12921da177e4SLinus Torvalds * The LRU management algorithm is dopey-but-simple. Sorry. 12931da177e4SLinus Torvalds */ 12941da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 12951da177e4SLinus Torvalds { 12961da177e4SLinus Torvalds struct buffer_head *evictee = NULL; 12971da177e4SLinus Torvalds struct bh_lru *lru; 12981da177e4SLinus Torvalds 12991da177e4SLinus Torvalds check_irqs_on(); 13001da177e4SLinus Torvalds bh_lru_lock(); 13011da177e4SLinus Torvalds lru = &__get_cpu_var(bh_lrus); 13021da177e4SLinus Torvalds if (lru->bhs[0] != bh) { 13031da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 13041da177e4SLinus Torvalds int in; 13051da177e4SLinus Torvalds int out = 0; 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds get_bh(bh); 13081da177e4SLinus Torvalds bhs[out++] = bh; 13091da177e4SLinus Torvalds for (in = 0; in < BH_LRU_SIZE; in++) { 13101da177e4SLinus Torvalds struct buffer_head *bh2 = lru->bhs[in]; 13111da177e4SLinus Torvalds 13121da177e4SLinus Torvalds if (bh2 == bh) { 13131da177e4SLinus Torvalds __brelse(bh2); 13141da177e4SLinus Torvalds } else { 13151da177e4SLinus Torvalds if (out >= BH_LRU_SIZE) { 13161da177e4SLinus Torvalds BUG_ON(evictee != NULL); 13171da177e4SLinus Torvalds evictee = bh2; 13181da177e4SLinus Torvalds } else { 13191da177e4SLinus Torvalds bhs[out++] = bh2; 13201da177e4SLinus Torvalds } 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds } 13231da177e4SLinus Torvalds while (out < BH_LRU_SIZE) 13241da177e4SLinus Torvalds bhs[out++] = NULL; 13251da177e4SLinus Torvalds memcpy(lru->bhs, bhs, sizeof(bhs)); 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds bh_lru_unlock(); 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds if (evictee) 13301da177e4SLinus Torvalds __brelse(evictee); 13311da177e4SLinus Torvalds } 13321da177e4SLinus Torvalds 13331da177e4SLinus Torvalds /* 13341da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 13351da177e4SLinus Torvalds */ 1336858119e1SArjan van de Ven static struct buffer_head * 13373991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 13381da177e4SLinus Torvalds { 13391da177e4SLinus Torvalds struct buffer_head *ret = NULL; 13401da177e4SLinus Torvalds struct bh_lru *lru; 13413991d3bdSTomasz Kvarsin unsigned int i; 13421da177e4SLinus Torvalds 13431da177e4SLinus Torvalds check_irqs_on(); 13441da177e4SLinus Torvalds bh_lru_lock(); 13451da177e4SLinus Torvalds lru = &__get_cpu_var(bh_lrus); 13461da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 13471da177e4SLinus Torvalds struct buffer_head *bh = lru->bhs[i]; 13481da177e4SLinus Torvalds 13491da177e4SLinus Torvalds if (bh && bh->b_bdev == bdev && 13501da177e4SLinus Torvalds bh->b_blocknr == block && bh->b_size == size) { 13511da177e4SLinus Torvalds if (i) { 13521da177e4SLinus Torvalds while (i) { 13531da177e4SLinus Torvalds lru->bhs[i] = lru->bhs[i - 1]; 13541da177e4SLinus Torvalds i--; 13551da177e4SLinus Torvalds } 13561da177e4SLinus Torvalds lru->bhs[0] = bh; 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds get_bh(bh); 13591da177e4SLinus Torvalds ret = bh; 13601da177e4SLinus Torvalds break; 13611da177e4SLinus Torvalds } 13621da177e4SLinus Torvalds } 13631da177e4SLinus Torvalds bh_lru_unlock(); 13641da177e4SLinus Torvalds return ret; 13651da177e4SLinus Torvalds } 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds /* 13681da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 13691da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 13701da177e4SLinus Torvalds * NULL 13711da177e4SLinus Torvalds */ 13721da177e4SLinus Torvalds struct buffer_head * 13733991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 13741da177e4SLinus Torvalds { 13751da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 13761da177e4SLinus Torvalds 13771da177e4SLinus Torvalds if (bh == NULL) { 1378385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 13791da177e4SLinus Torvalds if (bh) 13801da177e4SLinus Torvalds bh_lru_install(bh); 13811da177e4SLinus Torvalds } 13821da177e4SLinus Torvalds if (bh) 13831da177e4SLinus Torvalds touch_buffer(bh); 13841da177e4SLinus Torvalds return bh; 13851da177e4SLinus Torvalds } 13861da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 13871da177e4SLinus Torvalds 13881da177e4SLinus Torvalds /* 13891da177e4SLinus Torvalds * __getblk will locate (and, if necessary, create) the buffer_head 13901da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The 13911da177e4SLinus Torvalds * returned buffer has its reference count incremented. 13921da177e4SLinus Torvalds * 13931da177e4SLinus Torvalds * __getblk() cannot fail - it just keeps trying. If you pass it an 13941da177e4SLinus Torvalds * illegal block number, __getblk() will happily return a buffer_head 13951da177e4SLinus Torvalds * which represents the non-existent block. Very weird. 13961da177e4SLinus Torvalds * 13971da177e4SLinus Torvalds * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 13981da177e4SLinus Torvalds * attempt is failing. FIXME, perhaps? 13991da177e4SLinus Torvalds */ 14001da177e4SLinus Torvalds struct buffer_head * 14013991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size) 14021da177e4SLinus Torvalds { 14031da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size); 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds might_sleep(); 14061da177e4SLinus Torvalds if (bh == NULL) 14071da177e4SLinus Torvalds bh = __getblk_slow(bdev, block, size); 14081da177e4SLinus Torvalds return bh; 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk); 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds /* 14131da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 14141da177e4SLinus Torvalds */ 14153991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 14161da177e4SLinus Torvalds { 14171da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 1418a3e713b5SAndrew Morton if (likely(bh)) { 14191da177e4SLinus Torvalds ll_rw_block(READA, 1, &bh); 14201da177e4SLinus Torvalds brelse(bh); 14211da177e4SLinus Torvalds } 1422a3e713b5SAndrew Morton } 14231da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 14241da177e4SLinus Torvalds 14251da177e4SLinus Torvalds /** 14261da177e4SLinus Torvalds * __bread() - reads a specified block and returns the bh 142767be2dd1SMartin Waitz * @bdev: the block_device to read from 14281da177e4SLinus Torvalds * @block: number of block 14291da177e4SLinus Torvalds * @size: size (in bytes) to read 14301da177e4SLinus Torvalds * 14311da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it. 14321da177e4SLinus Torvalds * It returns NULL if the block was unreadable. 14331da177e4SLinus Torvalds */ 14341da177e4SLinus Torvalds struct buffer_head * 14353991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size) 14361da177e4SLinus Torvalds { 14371da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 14381da177e4SLinus Torvalds 1439a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 14401da177e4SLinus Torvalds bh = __bread_slow(bh); 14411da177e4SLinus Torvalds return bh; 14421da177e4SLinus Torvalds } 14431da177e4SLinus Torvalds EXPORT_SYMBOL(__bread); 14441da177e4SLinus Torvalds 14451da177e4SLinus Torvalds /* 14461da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 14471da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 14481da177e4SLinus Torvalds * or with preempt disabled. 14491da177e4SLinus Torvalds */ 14501da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 14511da177e4SLinus Torvalds { 14521da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 14531da177e4SLinus Torvalds int i; 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 14561da177e4SLinus Torvalds brelse(b->bhs[i]); 14571da177e4SLinus Torvalds b->bhs[i] = NULL; 14581da177e4SLinus Torvalds } 14591da177e4SLinus Torvalds put_cpu_var(bh_lrus); 14601da177e4SLinus Torvalds } 14611da177e4SLinus Torvalds 1462f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 14631da177e4SLinus Torvalds { 14641da177e4SLinus Torvalds on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 14651da177e4SLinus Torvalds } 14669db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 14671da177e4SLinus Torvalds 14681da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh, 14691da177e4SLinus Torvalds struct page *page, unsigned long offset) 14701da177e4SLinus Torvalds { 14711da177e4SLinus Torvalds bh->b_page = page; 1472e827f923SEric Sesterhenn BUG_ON(offset >= PAGE_SIZE); 14731da177e4SLinus Torvalds if (PageHighMem(page)) 14741da177e4SLinus Torvalds /* 14751da177e4SLinus Torvalds * This catches illegal uses and preserves the offset: 14761da177e4SLinus Torvalds */ 14771da177e4SLinus Torvalds bh->b_data = (char *)(0 + offset); 14781da177e4SLinus Torvalds else 14791da177e4SLinus Torvalds bh->b_data = page_address(page) + offset; 14801da177e4SLinus Torvalds } 14811da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page); 14821da177e4SLinus Torvalds 14831da177e4SLinus Torvalds /* 14841da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 14851da177e4SLinus Torvalds */ 1486858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 14871da177e4SLinus Torvalds { 14881da177e4SLinus Torvalds lock_buffer(bh); 14891da177e4SLinus Torvalds clear_buffer_dirty(bh); 14901da177e4SLinus Torvalds bh->b_bdev = NULL; 14911da177e4SLinus Torvalds clear_buffer_mapped(bh); 14921da177e4SLinus Torvalds clear_buffer_req(bh); 14931da177e4SLinus Torvalds clear_buffer_new(bh); 14941da177e4SLinus Torvalds clear_buffer_delay(bh); 149533a266ddSDavid Chinner clear_buffer_unwritten(bh); 14961da177e4SLinus Torvalds unlock_buffer(bh); 14971da177e4SLinus Torvalds } 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds /** 15001da177e4SLinus Torvalds * block_invalidatepage - invalidate part of all of a buffer-backed page 15011da177e4SLinus Torvalds * 15021da177e4SLinus Torvalds * @page: the page which is affected 15031da177e4SLinus Torvalds * @offset: the index of the truncation point 15041da177e4SLinus Torvalds * 15051da177e4SLinus Torvalds * block_invalidatepage() is called when all or part of the page has become 15061da177e4SLinus Torvalds * invalidatedby a truncate operation. 15071da177e4SLinus Torvalds * 15081da177e4SLinus Torvalds * block_invalidatepage() does not have to release all buffers, but it must 15091da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 15101da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 15111da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 15121da177e4SLinus Torvalds * blocks on-disk. 15131da177e4SLinus Torvalds */ 15142ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset) 15151da177e4SLinus Torvalds { 15161da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 15171da177e4SLinus Torvalds unsigned int curr_off = 0; 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 15201da177e4SLinus Torvalds if (!page_has_buffers(page)) 15211da177e4SLinus Torvalds goto out; 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds head = page_buffers(page); 15241da177e4SLinus Torvalds bh = head; 15251da177e4SLinus Torvalds do { 15261da177e4SLinus Torvalds unsigned int next_off = curr_off + bh->b_size; 15271da177e4SLinus Torvalds next = bh->b_this_page; 15281da177e4SLinus Torvalds 15291da177e4SLinus Torvalds /* 15301da177e4SLinus Torvalds * is this block fully invalidated? 15311da177e4SLinus Torvalds */ 15321da177e4SLinus Torvalds if (offset <= curr_off) 15331da177e4SLinus Torvalds discard_buffer(bh); 15341da177e4SLinus Torvalds curr_off = next_off; 15351da177e4SLinus Torvalds bh = next; 15361da177e4SLinus Torvalds } while (bh != head); 15371da177e4SLinus Torvalds 15381da177e4SLinus Torvalds /* 15391da177e4SLinus Torvalds * We release buffers only if the entire page is being invalidated. 15401da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 15411da177e4SLinus Torvalds * so real IO is not possible anymore. 15421da177e4SLinus Torvalds */ 15431da177e4SLinus Torvalds if (offset == 0) 15442ff28e22SNeilBrown try_to_release_page(page, 0); 15451da177e4SLinus Torvalds out: 15462ff28e22SNeilBrown return; 15471da177e4SLinus Torvalds } 15481da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage); 15491da177e4SLinus Torvalds 15501da177e4SLinus Torvalds /* 15511da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 15521da177e4SLinus Torvalds * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 15531da177e4SLinus Torvalds * is already excluded via the page lock. 15541da177e4SLinus Torvalds */ 15551da177e4SLinus Torvalds void create_empty_buffers(struct page *page, 15561da177e4SLinus Torvalds unsigned long blocksize, unsigned long b_state) 15571da177e4SLinus Torvalds { 15581da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds head = alloc_page_buffers(page, blocksize, 1); 15611da177e4SLinus Torvalds bh = head; 15621da177e4SLinus Torvalds do { 15631da177e4SLinus Torvalds bh->b_state |= b_state; 15641da177e4SLinus Torvalds tail = bh; 15651da177e4SLinus Torvalds bh = bh->b_this_page; 15661da177e4SLinus Torvalds } while (bh); 15671da177e4SLinus Torvalds tail->b_this_page = head; 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds spin_lock(&page->mapping->private_lock); 15701da177e4SLinus Torvalds if (PageUptodate(page) || PageDirty(page)) { 15711da177e4SLinus Torvalds bh = head; 15721da177e4SLinus Torvalds do { 15731da177e4SLinus Torvalds if (PageDirty(page)) 15741da177e4SLinus Torvalds set_buffer_dirty(bh); 15751da177e4SLinus Torvalds if (PageUptodate(page)) 15761da177e4SLinus Torvalds set_buffer_uptodate(bh); 15771da177e4SLinus Torvalds bh = bh->b_this_page; 15781da177e4SLinus Torvalds } while (bh != head); 15791da177e4SLinus Torvalds } 15801da177e4SLinus Torvalds attach_page_buffers(page, head); 15811da177e4SLinus Torvalds spin_unlock(&page->mapping->private_lock); 15821da177e4SLinus Torvalds } 15831da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 15841da177e4SLinus Torvalds 15851da177e4SLinus Torvalds /* 15861da177e4SLinus Torvalds * We are taking a block for data and we don't want any output from any 15871da177e4SLinus Torvalds * buffer-cache aliases starting from return from that function and 15881da177e4SLinus Torvalds * until the moment when something will explicitly mark the buffer 15891da177e4SLinus Torvalds * dirty (hopefully that will not happen until we will free that block ;-) 15901da177e4SLinus Torvalds * We don't even need to mark it not-uptodate - nobody can expect 15911da177e4SLinus Torvalds * anything from a newly allocated buffer anyway. We used to used 15921da177e4SLinus Torvalds * unmap_buffer() for such invalidation, but that was wrong. We definitely 15931da177e4SLinus Torvalds * don't want to mark the alias unmapped, for example - it would confuse 15941da177e4SLinus Torvalds * anyone who might pick it with bread() afterwards... 15951da177e4SLinus Torvalds * 15961da177e4SLinus Torvalds * Also.. Note that bforget() doesn't lock the buffer. So there can 15971da177e4SLinus Torvalds * be writeout I/O going on against recently-freed buffers. We don't 15981da177e4SLinus Torvalds * wait on that I/O in bforget() - it's more efficient to wait on the I/O 15991da177e4SLinus Torvalds * only if we really need to. That happens here. 16001da177e4SLinus Torvalds */ 16011da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block) 16021da177e4SLinus Torvalds { 16031da177e4SLinus Torvalds struct buffer_head *old_bh; 16041da177e4SLinus Torvalds 16051da177e4SLinus Torvalds might_sleep(); 16061da177e4SLinus Torvalds 1607385fd4c5SCoywolf Qi Hunt old_bh = __find_get_block_slow(bdev, block); 16081da177e4SLinus Torvalds if (old_bh) { 16091da177e4SLinus Torvalds clear_buffer_dirty(old_bh); 16101da177e4SLinus Torvalds wait_on_buffer(old_bh); 16111da177e4SLinus Torvalds clear_buffer_req(old_bh); 16121da177e4SLinus Torvalds __brelse(old_bh); 16131da177e4SLinus Torvalds } 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata); 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds /* 16181da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 16191da177e4SLinus Torvalds * 16201da177e4SLinus Torvalds * Mapped Uptodate Meaning 16211da177e4SLinus Torvalds * 16221da177e4SLinus Torvalds * No No "unknown" - must do get_block() 16231da177e4SLinus Torvalds * No Yes "hole" - zero-filled 16241da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 16251da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 16261da177e4SLinus Torvalds * 16271da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 16281da177e4SLinus Torvalds */ 16291da177e4SLinus Torvalds 16301da177e4SLinus Torvalds /* 16311da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under 16321da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 16331da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 16341da177e4SLinus Torvalds * state inside lock_buffer(). 16351da177e4SLinus Torvalds * 16361da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback 16371da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 16381da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 16391da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 16401da177e4SLinus Torvalds * prevents this contention from occurring. 16411da177e4SLinus Torvalds */ 16421da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page, 16431da177e4SLinus Torvalds get_block_t *get_block, struct writeback_control *wbc) 16441da177e4SLinus Torvalds { 16451da177e4SLinus Torvalds int err; 16461da177e4SLinus Torvalds sector_t block; 16471da177e4SLinus Torvalds sector_t last_block; 1648f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 1649b0cf2321SBadari Pulavarty const unsigned blocksize = 1 << inode->i_blkbits; 16501da177e4SLinus Torvalds int nr_underway = 0; 16511da177e4SLinus Torvalds 16521da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 16551da177e4SLinus Torvalds 16561da177e4SLinus Torvalds if (!page_has_buffers(page)) { 1657b0cf2321SBadari Pulavarty create_empty_buffers(page, blocksize, 16581da177e4SLinus Torvalds (1 << BH_Dirty)|(1 << BH_Uptodate)); 16591da177e4SLinus Torvalds } 16601da177e4SLinus Torvalds 16611da177e4SLinus Torvalds /* 16621da177e4SLinus Torvalds * Be very careful. We have no exclusion from __set_page_dirty_buffers 16631da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 16641da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 16651da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty. 16661da177e4SLinus Torvalds * 16671da177e4SLinus Torvalds * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 16681da177e4SLinus Torvalds * handle that here by just cleaning them. 16691da177e4SLinus Torvalds */ 16701da177e4SLinus Torvalds 167154b21a79SAndrew Morton block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 16721da177e4SLinus Torvalds head = page_buffers(page); 16731da177e4SLinus Torvalds bh = head; 16741da177e4SLinus Torvalds 16751da177e4SLinus Torvalds /* 16761da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 16771da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 16781da177e4SLinus Torvalds */ 16791da177e4SLinus Torvalds do { 16801da177e4SLinus Torvalds if (block > last_block) { 16811da177e4SLinus Torvalds /* 16821da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 16831da177e4SLinus Torvalds * this page can be outside i_size when there is a 16841da177e4SLinus Torvalds * truncate in progress. 16851da177e4SLinus Torvalds */ 16861da177e4SLinus Torvalds /* 16871da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page() 16881da177e4SLinus Torvalds */ 16891da177e4SLinus Torvalds clear_buffer_dirty(bh); 16901da177e4SLinus Torvalds set_buffer_uptodate(bh); 16911da177e4SLinus Torvalds } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1692b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 16931da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 16941da177e4SLinus Torvalds if (err) 16951da177e4SLinus Torvalds goto recover; 16961da177e4SLinus Torvalds if (buffer_new(bh)) { 16971da177e4SLinus Torvalds /* blockdev mappings never come here */ 16981da177e4SLinus Torvalds clear_buffer_new(bh); 16991da177e4SLinus Torvalds unmap_underlying_metadata(bh->b_bdev, 17001da177e4SLinus Torvalds bh->b_blocknr); 17011da177e4SLinus Torvalds } 17021da177e4SLinus Torvalds } 17031da177e4SLinus Torvalds bh = bh->b_this_page; 17041da177e4SLinus Torvalds block++; 17051da177e4SLinus Torvalds } while (bh != head); 17061da177e4SLinus Torvalds 17071da177e4SLinus Torvalds do { 17081da177e4SLinus Torvalds if (!buffer_mapped(bh)) 17091da177e4SLinus Torvalds continue; 17101da177e4SLinus Torvalds /* 17111da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 17121da177e4SLinus Torvalds * lock the buffer then redirty the page. Note that this can 17131da177e4SLinus Torvalds * potentially cause a busy-wait loop from pdflush and kswapd 17141da177e4SLinus Torvalds * activity, but those code paths have their own higher-level 17151da177e4SLinus Torvalds * throttling. 17161da177e4SLinus Torvalds */ 17171da177e4SLinus Torvalds if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 17181da177e4SLinus Torvalds lock_buffer(bh); 17191da177e4SLinus Torvalds } else if (test_set_buffer_locked(bh)) { 17201da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page); 17211da177e4SLinus Torvalds continue; 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 17241da177e4SLinus Torvalds mark_buffer_async_write(bh); 17251da177e4SLinus Torvalds } else { 17261da177e4SLinus Torvalds unlock_buffer(bh); 17271da177e4SLinus Torvalds } 17281da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 17291da177e4SLinus Torvalds 17301da177e4SLinus Torvalds /* 17311da177e4SLinus Torvalds * The page and its buffers are protected by PageWriteback(), so we can 17321da177e4SLinus Torvalds * drop the bh refcounts early. 17331da177e4SLinus Torvalds */ 17341da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 17351da177e4SLinus Torvalds set_page_writeback(page); 17361da177e4SLinus Torvalds 17371da177e4SLinus Torvalds do { 17381da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 17391da177e4SLinus Torvalds if (buffer_async_write(bh)) { 17401da177e4SLinus Torvalds submit_bh(WRITE, bh); 17411da177e4SLinus Torvalds nr_underway++; 1742ad576e63SNick Piggin } 17431da177e4SLinus Torvalds bh = next; 17441da177e4SLinus Torvalds } while (bh != head); 174505937baaSAndrew Morton unlock_page(page); 17461da177e4SLinus Torvalds 17471da177e4SLinus Torvalds err = 0; 17481da177e4SLinus Torvalds done: 17491da177e4SLinus Torvalds if (nr_underway == 0) { 17501da177e4SLinus Torvalds /* 17511da177e4SLinus Torvalds * The page was marked dirty, but the buffers were 17521da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 17531da177e4SLinus Torvalds * ll_rw_block/submit_bh. A rare case. 17541da177e4SLinus Torvalds */ 17551da177e4SLinus Torvalds end_page_writeback(page); 17563d67f2d7SNick Piggin 17571da177e4SLinus Torvalds /* 17581da177e4SLinus Torvalds * The page and buffer_heads can be released at any time from 17591da177e4SLinus Torvalds * here on. 17601da177e4SLinus Torvalds */ 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds return err; 17631da177e4SLinus Torvalds 17641da177e4SLinus Torvalds recover: 17651da177e4SLinus Torvalds /* 17661da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 17671da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 17681da177e4SLinus Torvalds * exposing stale data. 17691da177e4SLinus Torvalds * The page is currently locked and not marked for writeback 17701da177e4SLinus Torvalds */ 17711da177e4SLinus Torvalds bh = head; 17721da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 17731da177e4SLinus Torvalds do { 17741da177e4SLinus Torvalds if (buffer_mapped(bh) && buffer_dirty(bh)) { 17751da177e4SLinus Torvalds lock_buffer(bh); 17761da177e4SLinus Torvalds mark_buffer_async_write(bh); 17771da177e4SLinus Torvalds } else { 17781da177e4SLinus Torvalds /* 17791da177e4SLinus Torvalds * The buffer may have been set dirty during 17801da177e4SLinus Torvalds * attachment to a dirty page. 17811da177e4SLinus Torvalds */ 17821da177e4SLinus Torvalds clear_buffer_dirty(bh); 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 17851da177e4SLinus Torvalds SetPageError(page); 17861da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 17877e4c3690SAndrew Morton mapping_set_error(page->mapping, err); 17881da177e4SLinus Torvalds set_page_writeback(page); 17891da177e4SLinus Torvalds do { 17901da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 17911da177e4SLinus Torvalds if (buffer_async_write(bh)) { 17921da177e4SLinus Torvalds clear_buffer_dirty(bh); 17931da177e4SLinus Torvalds submit_bh(WRITE, bh); 17941da177e4SLinus Torvalds nr_underway++; 1795ad576e63SNick Piggin } 17961da177e4SLinus Torvalds bh = next; 17971da177e4SLinus Torvalds } while (bh != head); 1798ffda9d30SNick Piggin unlock_page(page); 17991da177e4SLinus Torvalds goto done; 18001da177e4SLinus Torvalds } 18011da177e4SLinus Torvalds 1802afddba49SNick Piggin /* 1803afddba49SNick Piggin * If a page has any new buffers, zero them out here, and mark them uptodate 1804afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised 1805afddba49SNick Piggin * block data from leaking). And clear the new bit. 1806afddba49SNick Piggin */ 1807afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1808afddba49SNick Piggin { 1809afddba49SNick Piggin unsigned int block_start, block_end; 1810afddba49SNick Piggin struct buffer_head *head, *bh; 1811afddba49SNick Piggin 1812afddba49SNick Piggin BUG_ON(!PageLocked(page)); 1813afddba49SNick Piggin if (!page_has_buffers(page)) 1814afddba49SNick Piggin return; 1815afddba49SNick Piggin 1816afddba49SNick Piggin bh = head = page_buffers(page); 1817afddba49SNick Piggin block_start = 0; 1818afddba49SNick Piggin do { 1819afddba49SNick Piggin block_end = block_start + bh->b_size; 1820afddba49SNick Piggin 1821afddba49SNick Piggin if (buffer_new(bh)) { 1822afddba49SNick Piggin if (block_end > from && block_start < to) { 1823afddba49SNick Piggin if (!PageUptodate(page)) { 1824afddba49SNick Piggin unsigned start, size; 1825afddba49SNick Piggin 1826afddba49SNick Piggin start = max(from, block_start); 1827afddba49SNick Piggin size = min(to, block_end) - start; 1828afddba49SNick Piggin 1829eebd2aa3SChristoph Lameter zero_user(page, start, size); 1830afddba49SNick Piggin set_buffer_uptodate(bh); 1831afddba49SNick Piggin } 1832afddba49SNick Piggin 1833afddba49SNick Piggin clear_buffer_new(bh); 1834afddba49SNick Piggin mark_buffer_dirty(bh); 1835afddba49SNick Piggin } 1836afddba49SNick Piggin } 1837afddba49SNick Piggin 1838afddba49SNick Piggin block_start = block_end; 1839afddba49SNick Piggin bh = bh->b_this_page; 1840afddba49SNick Piggin } while (bh != head); 1841afddba49SNick Piggin } 1842afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers); 1843afddba49SNick Piggin 18441da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page, 18451da177e4SLinus Torvalds unsigned from, unsigned to, get_block_t *get_block) 18461da177e4SLinus Torvalds { 18471da177e4SLinus Torvalds unsigned block_start, block_end; 18481da177e4SLinus Torvalds sector_t block; 18491da177e4SLinus Torvalds int err = 0; 18501da177e4SLinus Torvalds unsigned blocksize, bbits; 18511da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 18541da177e4SLinus Torvalds BUG_ON(from > PAGE_CACHE_SIZE); 18551da177e4SLinus Torvalds BUG_ON(to > PAGE_CACHE_SIZE); 18561da177e4SLinus Torvalds BUG_ON(from > to); 18571da177e4SLinus Torvalds 18581da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 18591da177e4SLinus Torvalds if (!page_has_buffers(page)) 18601da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 18611da177e4SLinus Torvalds head = page_buffers(page); 18621da177e4SLinus Torvalds 18631da177e4SLinus Torvalds bbits = inode->i_blkbits; 18641da177e4SLinus Torvalds block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start; 18671da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 18681da177e4SLinus Torvalds block_end = block_start + blocksize; 18691da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 18701da177e4SLinus Torvalds if (PageUptodate(page)) { 18711da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 18721da177e4SLinus Torvalds set_buffer_uptodate(bh); 18731da177e4SLinus Torvalds } 18741da177e4SLinus Torvalds continue; 18751da177e4SLinus Torvalds } 18761da177e4SLinus Torvalds if (buffer_new(bh)) 18771da177e4SLinus Torvalds clear_buffer_new(bh); 18781da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 1879b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 18801da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 18811da177e4SLinus Torvalds if (err) 1882f3ddbdc6SNick Piggin break; 18831da177e4SLinus Torvalds if (buffer_new(bh)) { 18841da177e4SLinus Torvalds unmap_underlying_metadata(bh->b_bdev, 18851da177e4SLinus Torvalds bh->b_blocknr); 18861da177e4SLinus Torvalds if (PageUptodate(page)) { 1887637aff46SNick Piggin clear_buffer_new(bh); 18881da177e4SLinus Torvalds set_buffer_uptodate(bh); 1889637aff46SNick Piggin mark_buffer_dirty(bh); 18901da177e4SLinus Torvalds continue; 18911da177e4SLinus Torvalds } 1892eebd2aa3SChristoph Lameter if (block_end > to || block_start < from) 1893eebd2aa3SChristoph Lameter zero_user_segments(page, 1894eebd2aa3SChristoph Lameter to, block_end, 1895eebd2aa3SChristoph Lameter block_start, from); 18961da177e4SLinus Torvalds continue; 18971da177e4SLinus Torvalds } 18981da177e4SLinus Torvalds } 18991da177e4SLinus Torvalds if (PageUptodate(page)) { 19001da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 19011da177e4SLinus Torvalds set_buffer_uptodate(bh); 19021da177e4SLinus Torvalds continue; 19031da177e4SLinus Torvalds } 19041da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 190533a266ddSDavid Chinner !buffer_unwritten(bh) && 19061da177e4SLinus Torvalds (block_start < from || block_end > to)) { 19071da177e4SLinus Torvalds ll_rw_block(READ, 1, &bh); 19081da177e4SLinus Torvalds *wait_bh++=bh; 19091da177e4SLinus Torvalds } 19101da177e4SLinus Torvalds } 19111da177e4SLinus Torvalds /* 19121da177e4SLinus Torvalds * If we issued read requests - let them complete. 19131da177e4SLinus Torvalds */ 19141da177e4SLinus Torvalds while(wait_bh > wait) { 19151da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 19161da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 1917f3ddbdc6SNick Piggin err = -EIO; 19181da177e4SLinus Torvalds } 1919afddba49SNick Piggin if (unlikely(err)) 1920afddba49SNick Piggin page_zero_new_buffers(page, from, to); 19211da177e4SLinus Torvalds return err; 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds 19241da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page, 19251da177e4SLinus Torvalds unsigned from, unsigned to) 19261da177e4SLinus Torvalds { 19271da177e4SLinus Torvalds unsigned block_start, block_end; 19281da177e4SLinus Torvalds int partial = 0; 19291da177e4SLinus Torvalds unsigned blocksize; 19301da177e4SLinus Torvalds struct buffer_head *bh, *head; 19311da177e4SLinus Torvalds 19321da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 19331da177e4SLinus Torvalds 19341da177e4SLinus Torvalds for(bh = head = page_buffers(page), block_start = 0; 19351da177e4SLinus Torvalds bh != head || !block_start; 19361da177e4SLinus Torvalds block_start=block_end, bh = bh->b_this_page) { 19371da177e4SLinus Torvalds block_end = block_start + blocksize; 19381da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 19391da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 19401da177e4SLinus Torvalds partial = 1; 19411da177e4SLinus Torvalds } else { 19421da177e4SLinus Torvalds set_buffer_uptodate(bh); 19431da177e4SLinus Torvalds mark_buffer_dirty(bh); 19441da177e4SLinus Torvalds } 1945afddba49SNick Piggin clear_buffer_new(bh); 19461da177e4SLinus Torvalds } 19471da177e4SLinus Torvalds 19481da177e4SLinus Torvalds /* 19491da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 19501da177e4SLinus Torvalds * uptodate then we can optimize away a bogus readpage() for 19511da177e4SLinus Torvalds * the next read(). Here we 'discover' whether the page went 19521da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 19531da177e4SLinus Torvalds */ 19541da177e4SLinus Torvalds if (!partial) 19551da177e4SLinus Torvalds SetPageUptodate(page); 19561da177e4SLinus Torvalds return 0; 19571da177e4SLinus Torvalds } 19581da177e4SLinus Torvalds 19591da177e4SLinus Torvalds /* 1960afddba49SNick Piggin * block_write_begin takes care of the basic task of block allocation and 1961afddba49SNick Piggin * bringing partial write blocks uptodate first. 1962afddba49SNick Piggin * 1963afddba49SNick Piggin * If *pagep is not NULL, then block_write_begin uses the locked page 1964afddba49SNick Piggin * at *pagep rather than allocating its own. In this case, the page will 1965afddba49SNick Piggin * not be unlocked or deallocated on failure. 1966afddba49SNick Piggin */ 1967afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping, 1968afddba49SNick Piggin loff_t pos, unsigned len, unsigned flags, 1969afddba49SNick Piggin struct page **pagep, void **fsdata, 1970afddba49SNick Piggin get_block_t *get_block) 1971afddba49SNick Piggin { 1972afddba49SNick Piggin struct inode *inode = mapping->host; 1973afddba49SNick Piggin int status = 0; 1974afddba49SNick Piggin struct page *page; 1975afddba49SNick Piggin pgoff_t index; 1976afddba49SNick Piggin unsigned start, end; 1977afddba49SNick Piggin int ownpage = 0; 1978afddba49SNick Piggin 1979afddba49SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 1980afddba49SNick Piggin start = pos & (PAGE_CACHE_SIZE - 1); 1981afddba49SNick Piggin end = start + len; 1982afddba49SNick Piggin 1983afddba49SNick Piggin page = *pagep; 1984afddba49SNick Piggin if (page == NULL) { 1985afddba49SNick Piggin ownpage = 1; 1986afddba49SNick Piggin page = __grab_cache_page(mapping, index); 1987afddba49SNick Piggin if (!page) { 1988afddba49SNick Piggin status = -ENOMEM; 1989afddba49SNick Piggin goto out; 1990afddba49SNick Piggin } 1991afddba49SNick Piggin *pagep = page; 1992afddba49SNick Piggin } else 1993afddba49SNick Piggin BUG_ON(!PageLocked(page)); 1994afddba49SNick Piggin 1995afddba49SNick Piggin status = __block_prepare_write(inode, page, start, end, get_block); 1996afddba49SNick Piggin if (unlikely(status)) { 1997afddba49SNick Piggin ClearPageUptodate(page); 1998afddba49SNick Piggin 1999afddba49SNick Piggin if (ownpage) { 2000afddba49SNick Piggin unlock_page(page); 2001afddba49SNick Piggin page_cache_release(page); 2002afddba49SNick Piggin *pagep = NULL; 2003afddba49SNick Piggin 2004afddba49SNick Piggin /* 2005afddba49SNick Piggin * prepare_write() may have instantiated a few blocks 2006afddba49SNick Piggin * outside i_size. Trim these off again. Don't need 2007afddba49SNick Piggin * i_size_read because we hold i_mutex. 2008afddba49SNick Piggin */ 2009afddba49SNick Piggin if (pos + len > inode->i_size) 2010afddba49SNick Piggin vmtruncate(inode, inode->i_size); 2011afddba49SNick Piggin } 2012afddba49SNick Piggin goto out; 2013afddba49SNick Piggin } 2014afddba49SNick Piggin 2015afddba49SNick Piggin out: 2016afddba49SNick Piggin return status; 2017afddba49SNick Piggin } 2018afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin); 2019afddba49SNick Piggin 2020afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping, 2021afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2022afddba49SNick Piggin struct page *page, void *fsdata) 2023afddba49SNick Piggin { 2024afddba49SNick Piggin struct inode *inode = mapping->host; 2025afddba49SNick Piggin unsigned start; 2026afddba49SNick Piggin 2027afddba49SNick Piggin start = pos & (PAGE_CACHE_SIZE - 1); 2028afddba49SNick Piggin 2029afddba49SNick Piggin if (unlikely(copied < len)) { 2030afddba49SNick Piggin /* 2031afddba49SNick Piggin * The buffers that were written will now be uptodate, so we 2032afddba49SNick Piggin * don't have to worry about a readpage reading them and 2033afddba49SNick Piggin * overwriting a partial write. However if we have encountered 2034afddba49SNick Piggin * a short write and only partially written into a buffer, it 2035afddba49SNick Piggin * will not be marked uptodate, so a readpage might come in and 2036afddba49SNick Piggin * destroy our partial write. 2037afddba49SNick Piggin * 2038afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a 2039afddba49SNick Piggin * non uptodate page as a zero-length write, and force the 2040afddba49SNick Piggin * caller to redo the whole thing. 2041afddba49SNick Piggin */ 2042afddba49SNick Piggin if (!PageUptodate(page)) 2043afddba49SNick Piggin copied = 0; 2044afddba49SNick Piggin 2045afddba49SNick Piggin page_zero_new_buffers(page, start+copied, start+len); 2046afddba49SNick Piggin } 2047afddba49SNick Piggin flush_dcache_page(page); 2048afddba49SNick Piggin 2049afddba49SNick Piggin /* This could be a short (even 0-length) commit */ 2050afddba49SNick Piggin __block_commit_write(inode, page, start, start+copied); 2051afddba49SNick Piggin 2052afddba49SNick Piggin return copied; 2053afddba49SNick Piggin } 2054afddba49SNick Piggin EXPORT_SYMBOL(block_write_end); 2055afddba49SNick Piggin 2056afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping, 2057afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2058afddba49SNick Piggin struct page *page, void *fsdata) 2059afddba49SNick Piggin { 2060afddba49SNick Piggin struct inode *inode = mapping->host; 2061afddba49SNick Piggin 2062afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2063afddba49SNick Piggin 2064afddba49SNick Piggin /* 2065afddba49SNick Piggin * No need to use i_size_read() here, the i_size 2066afddba49SNick Piggin * cannot change under us because we hold i_mutex. 2067afddba49SNick Piggin * 2068afddba49SNick Piggin * But it's important to update i_size while still holding page lock: 2069afddba49SNick Piggin * page writeout could otherwise come in and zero beyond i_size. 2070afddba49SNick Piggin */ 2071afddba49SNick Piggin if (pos+copied > inode->i_size) { 2072afddba49SNick Piggin i_size_write(inode, pos+copied); 2073afddba49SNick Piggin mark_inode_dirty(inode); 2074afddba49SNick Piggin } 2075afddba49SNick Piggin 2076afddba49SNick Piggin unlock_page(page); 2077afddba49SNick Piggin page_cache_release(page); 2078afddba49SNick Piggin 2079afddba49SNick Piggin return copied; 2080afddba49SNick Piggin } 2081afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end); 2082afddba49SNick Piggin 2083afddba49SNick Piggin /* 20841da177e4SLinus Torvalds * Generic "read page" function for block devices that have the normal 20851da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 20861da177e4SLinus Torvalds * Reads the page asynchronously --- the unlock_buffer() and 20871da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 20881da177e4SLinus Torvalds * page struct once IO has completed. 20891da177e4SLinus Torvalds */ 20901da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block) 20911da177e4SLinus Torvalds { 20921da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 20931da177e4SLinus Torvalds sector_t iblock, lblock; 20941da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 20951da177e4SLinus Torvalds unsigned int blocksize; 20961da177e4SLinus Torvalds int nr, i; 20971da177e4SLinus Torvalds int fully_mapped = 1; 20981da177e4SLinus Torvalds 2099cd7619d6SMatt Mackall BUG_ON(!PageLocked(page)); 21001da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 21011da177e4SLinus Torvalds if (!page_has_buffers(page)) 21021da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 21031da177e4SLinus Torvalds head = page_buffers(page); 21041da177e4SLinus Torvalds 21051da177e4SLinus Torvalds iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 21061da177e4SLinus Torvalds lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; 21071da177e4SLinus Torvalds bh = head; 21081da177e4SLinus Torvalds nr = 0; 21091da177e4SLinus Torvalds i = 0; 21101da177e4SLinus Torvalds 21111da177e4SLinus Torvalds do { 21121da177e4SLinus Torvalds if (buffer_uptodate(bh)) 21131da177e4SLinus Torvalds continue; 21141da177e4SLinus Torvalds 21151da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2116c64610baSAndrew Morton int err = 0; 2117c64610baSAndrew Morton 21181da177e4SLinus Torvalds fully_mapped = 0; 21191da177e4SLinus Torvalds if (iblock < lblock) { 2120b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2121c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 2122c64610baSAndrew Morton if (err) 21231da177e4SLinus Torvalds SetPageError(page); 21241da177e4SLinus Torvalds } 21251da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2126eebd2aa3SChristoph Lameter zero_user(page, i * blocksize, blocksize); 2127c64610baSAndrew Morton if (!err) 21281da177e4SLinus Torvalds set_buffer_uptodate(bh); 21291da177e4SLinus Torvalds continue; 21301da177e4SLinus Torvalds } 21311da177e4SLinus Torvalds /* 21321da177e4SLinus Torvalds * get_block() might have updated the buffer 21331da177e4SLinus Torvalds * synchronously 21341da177e4SLinus Torvalds */ 21351da177e4SLinus Torvalds if (buffer_uptodate(bh)) 21361da177e4SLinus Torvalds continue; 21371da177e4SLinus Torvalds } 21381da177e4SLinus Torvalds arr[nr++] = bh; 21391da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds if (fully_mapped) 21421da177e4SLinus Torvalds SetPageMappedToDisk(page); 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds if (!nr) { 21451da177e4SLinus Torvalds /* 21461da177e4SLinus Torvalds * All buffers are uptodate - we can set the page uptodate 21471da177e4SLinus Torvalds * as well. But not if get_block() returned an error. 21481da177e4SLinus Torvalds */ 21491da177e4SLinus Torvalds if (!PageError(page)) 21501da177e4SLinus Torvalds SetPageUptodate(page); 21511da177e4SLinus Torvalds unlock_page(page); 21521da177e4SLinus Torvalds return 0; 21531da177e4SLinus Torvalds } 21541da177e4SLinus Torvalds 21551da177e4SLinus Torvalds /* Stage two: lock the buffers */ 21561da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 21571da177e4SLinus Torvalds bh = arr[i]; 21581da177e4SLinus Torvalds lock_buffer(bh); 21591da177e4SLinus Torvalds mark_buffer_async_read(bh); 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds 21621da177e4SLinus Torvalds /* 21631da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 21641da177e4SLinus Torvalds * inside the buffer lock in case another process reading 21651da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 21661da177e4SLinus Torvalds */ 21671da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 21681da177e4SLinus Torvalds bh = arr[i]; 21691da177e4SLinus Torvalds if (buffer_uptodate(bh)) 21701da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 21711da177e4SLinus Torvalds else 21721da177e4SLinus Torvalds submit_bh(READ, bh); 21731da177e4SLinus Torvalds } 21741da177e4SLinus Torvalds return 0; 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 217889e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to 21791da177e4SLinus Torvalds * deal with the hole. 21801da177e4SLinus Torvalds */ 218189e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size) 21821da177e4SLinus Torvalds { 21831da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 21841da177e4SLinus Torvalds struct page *page; 218589e10787SNick Piggin void *fsdata; 218605eb0b51SOGAWA Hirofumi unsigned long limit; 21871da177e4SLinus Torvalds int err; 21881da177e4SLinus Torvalds 21891da177e4SLinus Torvalds err = -EFBIG; 21901da177e4SLinus Torvalds limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 21911da177e4SLinus Torvalds if (limit != RLIM_INFINITY && size > (loff_t)limit) { 21921da177e4SLinus Torvalds send_sig(SIGXFSZ, current, 0); 21931da177e4SLinus Torvalds goto out; 21941da177e4SLinus Torvalds } 21951da177e4SLinus Torvalds if (size > inode->i_sb->s_maxbytes) 21961da177e4SLinus Torvalds goto out; 21971da177e4SLinus Torvalds 219889e10787SNick Piggin err = pagecache_write_begin(NULL, mapping, size, 0, 219989e10787SNick Piggin AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, 220089e10787SNick Piggin &page, &fsdata); 220189e10787SNick Piggin if (err) 220205eb0b51SOGAWA Hirofumi goto out; 220305eb0b51SOGAWA Hirofumi 220489e10787SNick Piggin err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); 220589e10787SNick Piggin BUG_ON(err > 0); 220605eb0b51SOGAWA Hirofumi 220705eb0b51SOGAWA Hirofumi out: 220805eb0b51SOGAWA Hirofumi return err; 220905eb0b51SOGAWA Hirofumi } 221005eb0b51SOGAWA Hirofumi 221189e10787SNick Piggin int cont_expand_zero(struct file *file, struct address_space *mapping, 221289e10787SNick Piggin loff_t pos, loff_t *bytes) 221305eb0b51SOGAWA Hirofumi { 221489e10787SNick Piggin struct inode *inode = mapping->host; 221589e10787SNick Piggin unsigned blocksize = 1 << inode->i_blkbits; 221689e10787SNick Piggin struct page *page; 221789e10787SNick Piggin void *fsdata; 221889e10787SNick Piggin pgoff_t index, curidx; 221989e10787SNick Piggin loff_t curpos; 222089e10787SNick Piggin unsigned zerofrom, offset, len; 222189e10787SNick Piggin int err = 0; 222205eb0b51SOGAWA Hirofumi 222389e10787SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 222489e10787SNick Piggin offset = pos & ~PAGE_CACHE_MASK; 222589e10787SNick Piggin 222689e10787SNick Piggin while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { 222789e10787SNick Piggin zerofrom = curpos & ~PAGE_CACHE_MASK; 222889e10787SNick Piggin if (zerofrom & (blocksize-1)) { 222989e10787SNick Piggin *bytes |= (blocksize-1); 223089e10787SNick Piggin (*bytes)++; 223189e10787SNick Piggin } 223289e10787SNick Piggin len = PAGE_CACHE_SIZE - zerofrom; 223389e10787SNick Piggin 223489e10787SNick Piggin err = pagecache_write_begin(file, mapping, curpos, len, 223589e10787SNick Piggin AOP_FLAG_UNINTERRUPTIBLE, 223689e10787SNick Piggin &page, &fsdata); 223789e10787SNick Piggin if (err) 223889e10787SNick Piggin goto out; 2239eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 224089e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 224189e10787SNick Piggin page, fsdata); 224289e10787SNick Piggin if (err < 0) 224389e10787SNick Piggin goto out; 224489e10787SNick Piggin BUG_ON(err != len); 224589e10787SNick Piggin err = 0; 224689e10787SNick Piggin } 224789e10787SNick Piggin 224889e10787SNick Piggin /* page covers the boundary, find the boundary offset */ 224989e10787SNick Piggin if (index == curidx) { 225089e10787SNick Piggin zerofrom = curpos & ~PAGE_CACHE_MASK; 225189e10787SNick Piggin /* if we will expand the thing last block will be filled */ 225289e10787SNick Piggin if (offset <= zerofrom) { 225389e10787SNick Piggin goto out; 225489e10787SNick Piggin } 225589e10787SNick Piggin if (zerofrom & (blocksize-1)) { 225689e10787SNick Piggin *bytes |= (blocksize-1); 225789e10787SNick Piggin (*bytes)++; 225889e10787SNick Piggin } 225989e10787SNick Piggin len = offset - zerofrom; 226089e10787SNick Piggin 226189e10787SNick Piggin err = pagecache_write_begin(file, mapping, curpos, len, 226289e10787SNick Piggin AOP_FLAG_UNINTERRUPTIBLE, 226389e10787SNick Piggin &page, &fsdata); 226489e10787SNick Piggin if (err) 226589e10787SNick Piggin goto out; 2266eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 226789e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 226889e10787SNick Piggin page, fsdata); 226989e10787SNick Piggin if (err < 0) 227089e10787SNick Piggin goto out; 227189e10787SNick Piggin BUG_ON(err != len); 227289e10787SNick Piggin err = 0; 227389e10787SNick Piggin } 227489e10787SNick Piggin out: 227589e10787SNick Piggin return err; 22761da177e4SLinus Torvalds } 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds /* 22791da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 22801da177e4SLinus Torvalds * We may have to extend the file. 22811da177e4SLinus Torvalds */ 228289e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping, 228389e10787SNick Piggin loff_t pos, unsigned len, unsigned flags, 228489e10787SNick Piggin struct page **pagep, void **fsdata, 228589e10787SNick Piggin get_block_t *get_block, loff_t *bytes) 22861da177e4SLinus Torvalds { 22871da177e4SLinus Torvalds struct inode *inode = mapping->host; 22881da177e4SLinus Torvalds unsigned blocksize = 1 << inode->i_blkbits; 228989e10787SNick Piggin unsigned zerofrom; 229089e10787SNick Piggin int err; 22911da177e4SLinus Torvalds 229289e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes); 229389e10787SNick Piggin if (err) 22941da177e4SLinus Torvalds goto out; 22951da177e4SLinus Torvalds 22961da177e4SLinus Torvalds zerofrom = *bytes & ~PAGE_CACHE_MASK; 229789e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) { 22981da177e4SLinus Torvalds *bytes |= (blocksize-1); 22991da177e4SLinus Torvalds (*bytes)++; 23001da177e4SLinus Torvalds } 23011da177e4SLinus Torvalds 230289e10787SNick Piggin *pagep = NULL; 230389e10787SNick Piggin err = block_write_begin(file, mapping, pos, len, 230489e10787SNick Piggin flags, pagep, fsdata, get_block); 23051da177e4SLinus Torvalds out: 230689e10787SNick Piggin return err; 23071da177e4SLinus Torvalds } 23081da177e4SLinus Torvalds 23091da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to, 23101da177e4SLinus Torvalds get_block_t *get_block) 23111da177e4SLinus Torvalds { 23121da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 23131da177e4SLinus Torvalds int err = __block_prepare_write(inode, page, from, to, get_block); 23141da177e4SLinus Torvalds if (err) 23151da177e4SLinus Torvalds ClearPageUptodate(page); 23161da177e4SLinus Torvalds return err; 23171da177e4SLinus Torvalds } 23181da177e4SLinus Torvalds 23191da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to) 23201da177e4SLinus Torvalds { 23211da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 23221da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 23231da177e4SLinus Torvalds return 0; 23241da177e4SLinus Torvalds } 23251da177e4SLinus Torvalds 23261da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page, 23271da177e4SLinus Torvalds unsigned from, unsigned to) 23281da177e4SLinus Torvalds { 23291da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 23301da177e4SLinus Torvalds loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 23311da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 23321da177e4SLinus Torvalds /* 23331da177e4SLinus Torvalds * No need to use i_size_read() here, the i_size 23341b1dcc1bSJes Sorensen * cannot change under us because we hold i_mutex. 23351da177e4SLinus Torvalds */ 23361da177e4SLinus Torvalds if (pos > inode->i_size) { 23371da177e4SLinus Torvalds i_size_write(inode, pos); 23381da177e4SLinus Torvalds mark_inode_dirty(inode); 23391da177e4SLinus Torvalds } 23401da177e4SLinus Torvalds return 0; 23411da177e4SLinus Torvalds } 23421da177e4SLinus Torvalds 234354171690SDavid Chinner /* 234454171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets 234554171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must 234654171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly 234754171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into 234854171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that 234954171690SDavid Chinner * support these features. 235054171690SDavid Chinner * 235154171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to 235254171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because 235354171690SDavid Chinner * vmtruncate() writes the inode size before removing pages, once we have the 235454171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not 235554171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we 235654171690SDavid Chinner * unlock the page. 235754171690SDavid Chinner */ 235854171690SDavid Chinner int 235954171690SDavid Chinner block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 236054171690SDavid Chinner get_block_t get_block) 236154171690SDavid Chinner { 236254171690SDavid Chinner struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 236354171690SDavid Chinner unsigned long end; 236454171690SDavid Chinner loff_t size; 236554171690SDavid Chinner int ret = -EINVAL; 236654171690SDavid Chinner 236754171690SDavid Chinner lock_page(page); 236854171690SDavid Chinner size = i_size_read(inode); 236954171690SDavid Chinner if ((page->mapping != inode->i_mapping) || 237018336338SNick Piggin (page_offset(page) > size)) { 237154171690SDavid Chinner /* page got truncated out from underneath us */ 237254171690SDavid Chinner goto out_unlock; 237354171690SDavid Chinner } 237454171690SDavid Chinner 237554171690SDavid Chinner /* page is wholly or partially inside EOF */ 237654171690SDavid Chinner if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 237754171690SDavid Chinner end = size & ~PAGE_CACHE_MASK; 237854171690SDavid Chinner else 237954171690SDavid Chinner end = PAGE_CACHE_SIZE; 238054171690SDavid Chinner 238154171690SDavid Chinner ret = block_prepare_write(page, 0, end, get_block); 238254171690SDavid Chinner if (!ret) 238354171690SDavid Chinner ret = block_commit_write(page, 0, end); 238454171690SDavid Chinner 238554171690SDavid Chinner out_unlock: 238654171690SDavid Chinner unlock_page(page); 238754171690SDavid Chinner return ret; 238854171690SDavid Chinner } 23891da177e4SLinus Torvalds 23901da177e4SLinus Torvalds /* 239103158cd7SNick Piggin * nobh_write_begin()'s prereads are special: the buffer_heads are freed 23921da177e4SLinus Torvalds * immediately, while under the page lock. So it needs a special end_io 23931da177e4SLinus Torvalds * handler which does not touch the bh after unlocking it. 23941da177e4SLinus Torvalds */ 23951da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 23961da177e4SLinus Torvalds { 239768671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 23981da177e4SLinus Torvalds } 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds /* 240103158cd7SNick Piggin * Attach the singly-linked list of buffers created by nobh_write_begin, to 240203158cd7SNick Piggin * the page (converting it to circular linked list and taking care of page 240303158cd7SNick Piggin * dirty races). 240403158cd7SNick Piggin */ 240503158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head) 240603158cd7SNick Piggin { 240703158cd7SNick Piggin struct buffer_head *bh; 240803158cd7SNick Piggin 240903158cd7SNick Piggin BUG_ON(!PageLocked(page)); 241003158cd7SNick Piggin 241103158cd7SNick Piggin spin_lock(&page->mapping->private_lock); 241203158cd7SNick Piggin bh = head; 241303158cd7SNick Piggin do { 241403158cd7SNick Piggin if (PageDirty(page)) 241503158cd7SNick Piggin set_buffer_dirty(bh); 241603158cd7SNick Piggin if (!bh->b_this_page) 241703158cd7SNick Piggin bh->b_this_page = head; 241803158cd7SNick Piggin bh = bh->b_this_page; 241903158cd7SNick Piggin } while (bh != head); 242003158cd7SNick Piggin attach_page_buffers(page, head); 242103158cd7SNick Piggin spin_unlock(&page->mapping->private_lock); 242203158cd7SNick Piggin } 242303158cd7SNick Piggin 242403158cd7SNick Piggin /* 24251da177e4SLinus Torvalds * On entry, the page is fully not uptodate. 24261da177e4SLinus Torvalds * On exit the page is fully uptodate in the areas outside (from,to) 24271da177e4SLinus Torvalds */ 242803158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping, 242903158cd7SNick Piggin loff_t pos, unsigned len, unsigned flags, 243003158cd7SNick Piggin struct page **pagep, void **fsdata, 24311da177e4SLinus Torvalds get_block_t *get_block) 24321da177e4SLinus Torvalds { 243303158cd7SNick Piggin struct inode *inode = mapping->host; 24341da177e4SLinus Torvalds const unsigned blkbits = inode->i_blkbits; 24351da177e4SLinus Torvalds const unsigned blocksize = 1 << blkbits; 2436a4b0672dSNick Piggin struct buffer_head *head, *bh; 243703158cd7SNick Piggin struct page *page; 243803158cd7SNick Piggin pgoff_t index; 243903158cd7SNick Piggin unsigned from, to; 24401da177e4SLinus Torvalds unsigned block_in_page; 2441a4b0672dSNick Piggin unsigned block_start, block_end; 24421da177e4SLinus Torvalds sector_t block_in_file; 24431da177e4SLinus Torvalds int nr_reads = 0; 24441da177e4SLinus Torvalds int ret = 0; 24451da177e4SLinus Torvalds int is_mapped_to_disk = 1; 24461da177e4SLinus Torvalds 244703158cd7SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 244803158cd7SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 244903158cd7SNick Piggin to = from + len; 245003158cd7SNick Piggin 245103158cd7SNick Piggin page = __grab_cache_page(mapping, index); 245203158cd7SNick Piggin if (!page) 245303158cd7SNick Piggin return -ENOMEM; 245403158cd7SNick Piggin *pagep = page; 245503158cd7SNick Piggin *fsdata = NULL; 245603158cd7SNick Piggin 245703158cd7SNick Piggin if (page_has_buffers(page)) { 245803158cd7SNick Piggin unlock_page(page); 245903158cd7SNick Piggin page_cache_release(page); 246003158cd7SNick Piggin *pagep = NULL; 246103158cd7SNick Piggin return block_write_begin(file, mapping, pos, len, flags, pagep, 246203158cd7SNick Piggin fsdata, get_block); 246303158cd7SNick Piggin } 2464a4b0672dSNick Piggin 24651da177e4SLinus Torvalds if (PageMappedToDisk(page)) 24661da177e4SLinus Torvalds return 0; 24671da177e4SLinus Torvalds 2468a4b0672dSNick Piggin /* 2469a4b0672dSNick Piggin * Allocate buffers so that we can keep track of state, and potentially 2470a4b0672dSNick Piggin * attach them to the page if an error occurs. In the common case of 2471a4b0672dSNick Piggin * no error, they will just be freed again without ever being attached 2472a4b0672dSNick Piggin * to the page (which is all OK, because we're under the page lock). 2473a4b0672dSNick Piggin * 2474a4b0672dSNick Piggin * Be careful: the buffer linked list is a NULL terminated one, rather 2475a4b0672dSNick Piggin * than the circular one we're used to. 2476a4b0672dSNick Piggin */ 2477a4b0672dSNick Piggin head = alloc_page_buffers(page, blocksize, 0); 247803158cd7SNick Piggin if (!head) { 247903158cd7SNick Piggin ret = -ENOMEM; 248003158cd7SNick Piggin goto out_release; 248103158cd7SNick Piggin } 2482a4b0672dSNick Piggin 24831da177e4SLinus Torvalds block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 24841da177e4SLinus Torvalds 24851da177e4SLinus Torvalds /* 24861da177e4SLinus Torvalds * We loop across all blocks in the page, whether or not they are 24871da177e4SLinus Torvalds * part of the affected region. This is so we can discover if the 24881da177e4SLinus Torvalds * page is fully mapped-to-disk. 24891da177e4SLinus Torvalds */ 2490a4b0672dSNick Piggin for (block_start = 0, block_in_page = 0, bh = head; 24911da177e4SLinus Torvalds block_start < PAGE_CACHE_SIZE; 2492a4b0672dSNick Piggin block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 24931da177e4SLinus Torvalds int create; 24941da177e4SLinus Torvalds 2495a4b0672dSNick Piggin block_end = block_start + blocksize; 2496a4b0672dSNick Piggin bh->b_state = 0; 24971da177e4SLinus Torvalds create = 1; 24981da177e4SLinus Torvalds if (block_start >= to) 24991da177e4SLinus Torvalds create = 0; 25001da177e4SLinus Torvalds ret = get_block(inode, block_in_file + block_in_page, 2501a4b0672dSNick Piggin bh, create); 25021da177e4SLinus Torvalds if (ret) 25031da177e4SLinus Torvalds goto failed; 2504a4b0672dSNick Piggin if (!buffer_mapped(bh)) 25051da177e4SLinus Torvalds is_mapped_to_disk = 0; 2506a4b0672dSNick Piggin if (buffer_new(bh)) 2507a4b0672dSNick Piggin unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 2508a4b0672dSNick Piggin if (PageUptodate(page)) { 2509a4b0672dSNick Piggin set_buffer_uptodate(bh); 25101da177e4SLinus Torvalds continue; 2511a4b0672dSNick Piggin } 2512a4b0672dSNick Piggin if (buffer_new(bh) || !buffer_mapped(bh)) { 2513eebd2aa3SChristoph Lameter zero_user_segments(page, block_start, from, 2514eebd2aa3SChristoph Lameter to, block_end); 25151da177e4SLinus Torvalds continue; 25161da177e4SLinus Torvalds } 2517a4b0672dSNick Piggin if (buffer_uptodate(bh)) 25181da177e4SLinus Torvalds continue; /* reiserfs does this */ 25191da177e4SLinus Torvalds if (block_start < from || block_end > to) { 2520a4b0672dSNick Piggin lock_buffer(bh); 2521a4b0672dSNick Piggin bh->b_end_io = end_buffer_read_nobh; 2522a4b0672dSNick Piggin submit_bh(READ, bh); 2523a4b0672dSNick Piggin nr_reads++; 25241da177e4SLinus Torvalds } 25251da177e4SLinus Torvalds } 25261da177e4SLinus Torvalds 25271da177e4SLinus Torvalds if (nr_reads) { 25281da177e4SLinus Torvalds /* 25291da177e4SLinus Torvalds * The page is locked, so these buffers are protected from 25301da177e4SLinus Torvalds * any VM or truncate activity. Hence we don't need to care 25311da177e4SLinus Torvalds * for the buffer_head refcounts. 25321da177e4SLinus Torvalds */ 2533a4b0672dSNick Piggin for (bh = head; bh; bh = bh->b_this_page) { 25341da177e4SLinus Torvalds wait_on_buffer(bh); 25351da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 25361da177e4SLinus Torvalds ret = -EIO; 25371da177e4SLinus Torvalds } 25381da177e4SLinus Torvalds if (ret) 25391da177e4SLinus Torvalds goto failed; 25401da177e4SLinus Torvalds } 25411da177e4SLinus Torvalds 25421da177e4SLinus Torvalds if (is_mapped_to_disk) 25431da177e4SLinus Torvalds SetPageMappedToDisk(page); 25441da177e4SLinus Torvalds 254503158cd7SNick Piggin *fsdata = head; /* to be released by nobh_write_end */ 2546a4b0672dSNick Piggin 25471da177e4SLinus Torvalds return 0; 25481da177e4SLinus Torvalds 25491da177e4SLinus Torvalds failed: 255003158cd7SNick Piggin BUG_ON(!ret); 25511da177e4SLinus Torvalds /* 2552a4b0672dSNick Piggin * Error recovery is a bit difficult. We need to zero out blocks that 2553a4b0672dSNick Piggin * were newly allocated, and dirty them to ensure they get written out. 2554a4b0672dSNick Piggin * Buffers need to be attached to the page at this point, otherwise 2555a4b0672dSNick Piggin * the handling of potential IO errors during writeout would be hard 2556a4b0672dSNick Piggin * (could try doing synchronous writeout, but what if that fails too?) 25571da177e4SLinus Torvalds */ 255803158cd7SNick Piggin attach_nobh_buffers(page, head); 255903158cd7SNick Piggin page_zero_new_buffers(page, from, to); 2560a4b0672dSNick Piggin 256103158cd7SNick Piggin out_release: 256203158cd7SNick Piggin unlock_page(page); 256303158cd7SNick Piggin page_cache_release(page); 256403158cd7SNick Piggin *pagep = NULL; 2565a4b0672dSNick Piggin 256603158cd7SNick Piggin if (pos + len > inode->i_size) 256703158cd7SNick Piggin vmtruncate(inode, inode->i_size); 2568a4b0672dSNick Piggin 25691da177e4SLinus Torvalds return ret; 25701da177e4SLinus Torvalds } 257103158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin); 25721da177e4SLinus Torvalds 257303158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping, 257403158cd7SNick Piggin loff_t pos, unsigned len, unsigned copied, 257503158cd7SNick Piggin struct page *page, void *fsdata) 25761da177e4SLinus Torvalds { 25771da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 2578efdc3131SNick Piggin struct buffer_head *head = fsdata; 257903158cd7SNick Piggin struct buffer_head *bh; 25805b41e74aSDmitri Monakhov BUG_ON(fsdata != NULL && page_has_buffers(page)); 25811da177e4SLinus Torvalds 258203158cd7SNick Piggin if (unlikely(copied < len) && !page_has_buffers(page)) 258303158cd7SNick Piggin attach_nobh_buffers(page, head); 2584a4b0672dSNick Piggin if (page_has_buffers(page)) 258503158cd7SNick Piggin return generic_write_end(file, mapping, pos, len, 258603158cd7SNick Piggin copied, page, fsdata); 2587a4b0672dSNick Piggin 258822c8ca78SNick Piggin SetPageUptodate(page); 25891da177e4SLinus Torvalds set_page_dirty(page); 259003158cd7SNick Piggin if (pos+copied > inode->i_size) { 259103158cd7SNick Piggin i_size_write(inode, pos+copied); 25921da177e4SLinus Torvalds mark_inode_dirty(inode); 25931da177e4SLinus Torvalds } 259403158cd7SNick Piggin 259503158cd7SNick Piggin unlock_page(page); 259603158cd7SNick Piggin page_cache_release(page); 259703158cd7SNick Piggin 259803158cd7SNick Piggin while (head) { 259903158cd7SNick Piggin bh = head; 260003158cd7SNick Piggin head = head->b_this_page; 260103158cd7SNick Piggin free_buffer_head(bh); 26021da177e4SLinus Torvalds } 260303158cd7SNick Piggin 260403158cd7SNick Piggin return copied; 260503158cd7SNick Piggin } 260603158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end); 26071da177e4SLinus Torvalds 26081da177e4SLinus Torvalds /* 26091da177e4SLinus Torvalds * nobh_writepage() - based on block_full_write_page() except 26101da177e4SLinus Torvalds * that it tries to operate without attaching bufferheads to 26111da177e4SLinus Torvalds * the page. 26121da177e4SLinus Torvalds */ 26131da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block, 26141da177e4SLinus Torvalds struct writeback_control *wbc) 26151da177e4SLinus Torvalds { 26161da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 26171da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 26181da177e4SLinus Torvalds const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 26191da177e4SLinus Torvalds unsigned offset; 26201da177e4SLinus Torvalds int ret; 26211da177e4SLinus Torvalds 26221da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 26231da177e4SLinus Torvalds if (page->index < end_index) 26241da177e4SLinus Torvalds goto out; 26251da177e4SLinus Torvalds 26261da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 26271da177e4SLinus Torvalds offset = i_size & (PAGE_CACHE_SIZE-1); 26281da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 26291da177e4SLinus Torvalds /* 26301da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 26311da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 26321da177e4SLinus Torvalds * freeable here, so the page does not leak. 26331da177e4SLinus Torvalds */ 26341da177e4SLinus Torvalds #if 0 26351da177e4SLinus Torvalds /* Not really sure about this - do we need this ? */ 26361da177e4SLinus Torvalds if (page->mapping->a_ops->invalidatepage) 26371da177e4SLinus Torvalds page->mapping->a_ops->invalidatepage(page, offset); 26381da177e4SLinus Torvalds #endif 26391da177e4SLinus Torvalds unlock_page(page); 26401da177e4SLinus Torvalds return 0; /* don't care */ 26411da177e4SLinus Torvalds } 26421da177e4SLinus Torvalds 26431da177e4SLinus Torvalds /* 26441da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 26451da177e4SLinus Torvalds * writepage invocation because it may be mmapped. "A file is mapped 26461da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 26471da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 26481da177e4SLinus Torvalds * writes to that region are not written out to the file." 26491da177e4SLinus Torvalds */ 2650eebd2aa3SChristoph Lameter zero_user_segment(page, offset, PAGE_CACHE_SIZE); 26511da177e4SLinus Torvalds out: 26521da177e4SLinus Torvalds ret = mpage_writepage(page, get_block, wbc); 26531da177e4SLinus Torvalds if (ret == -EAGAIN) 26541da177e4SLinus Torvalds ret = __block_write_full_page(inode, page, get_block, wbc); 26551da177e4SLinus Torvalds return ret; 26561da177e4SLinus Torvalds } 26571da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage); 26581da177e4SLinus Torvalds 265903158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping, 266003158cd7SNick Piggin loff_t from, get_block_t *get_block) 26611da177e4SLinus Torvalds { 26621da177e4SLinus Torvalds pgoff_t index = from >> PAGE_CACHE_SHIFT; 26631da177e4SLinus Torvalds unsigned offset = from & (PAGE_CACHE_SIZE-1); 266403158cd7SNick Piggin unsigned blocksize; 266503158cd7SNick Piggin sector_t iblock; 266603158cd7SNick Piggin unsigned length, pos; 266703158cd7SNick Piggin struct inode *inode = mapping->host; 26681da177e4SLinus Torvalds struct page *page; 266903158cd7SNick Piggin struct buffer_head map_bh; 267003158cd7SNick Piggin int err; 26711da177e4SLinus Torvalds 267203158cd7SNick Piggin blocksize = 1 << inode->i_blkbits; 267303158cd7SNick Piggin length = offset & (blocksize - 1); 26741da177e4SLinus Torvalds 267503158cd7SNick Piggin /* Block boundary? Nothing to do */ 267603158cd7SNick Piggin if (!length) 267703158cd7SNick Piggin return 0; 267803158cd7SNick Piggin 267903158cd7SNick Piggin length = blocksize - length; 268003158cd7SNick Piggin iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 268103158cd7SNick Piggin 26821da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 268303158cd7SNick Piggin err = -ENOMEM; 26841da177e4SLinus Torvalds if (!page) 26851da177e4SLinus Torvalds goto out; 26861da177e4SLinus Torvalds 268703158cd7SNick Piggin if (page_has_buffers(page)) { 268803158cd7SNick Piggin has_buffers: 268903158cd7SNick Piggin unlock_page(page); 269003158cd7SNick Piggin page_cache_release(page); 269103158cd7SNick Piggin return block_truncate_page(mapping, from, get_block); 26921da177e4SLinus Torvalds } 269303158cd7SNick Piggin 269403158cd7SNick Piggin /* Find the buffer that contains "offset" */ 269503158cd7SNick Piggin pos = blocksize; 269603158cd7SNick Piggin while (offset >= pos) { 269703158cd7SNick Piggin iblock++; 269803158cd7SNick Piggin pos += blocksize; 269903158cd7SNick Piggin } 270003158cd7SNick Piggin 270103158cd7SNick Piggin err = get_block(inode, iblock, &map_bh, 0); 270203158cd7SNick Piggin if (err) 270303158cd7SNick Piggin goto unlock; 270403158cd7SNick Piggin /* unmapped? It's a hole - nothing to do */ 270503158cd7SNick Piggin if (!buffer_mapped(&map_bh)) 270603158cd7SNick Piggin goto unlock; 270703158cd7SNick Piggin 270803158cd7SNick Piggin /* Ok, it's mapped. Make sure it's up-to-date */ 270903158cd7SNick Piggin if (!PageUptodate(page)) { 271003158cd7SNick Piggin err = mapping->a_ops->readpage(NULL, page); 271103158cd7SNick Piggin if (err) { 271203158cd7SNick Piggin page_cache_release(page); 271303158cd7SNick Piggin goto out; 271403158cd7SNick Piggin } 271503158cd7SNick Piggin lock_page(page); 271603158cd7SNick Piggin if (!PageUptodate(page)) { 271703158cd7SNick Piggin err = -EIO; 271803158cd7SNick Piggin goto unlock; 271903158cd7SNick Piggin } 272003158cd7SNick Piggin if (page_has_buffers(page)) 272103158cd7SNick Piggin goto has_buffers; 272203158cd7SNick Piggin } 2723eebd2aa3SChristoph Lameter zero_user(page, offset, length); 272403158cd7SNick Piggin set_page_dirty(page); 272503158cd7SNick Piggin err = 0; 272603158cd7SNick Piggin 272703158cd7SNick Piggin unlock: 27281da177e4SLinus Torvalds unlock_page(page); 27291da177e4SLinus Torvalds page_cache_release(page); 27301da177e4SLinus Torvalds out: 273103158cd7SNick Piggin return err; 27321da177e4SLinus Torvalds } 27331da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page); 27341da177e4SLinus Torvalds 27351da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 27361da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 27371da177e4SLinus Torvalds { 27381da177e4SLinus Torvalds pgoff_t index = from >> PAGE_CACHE_SHIFT; 27391da177e4SLinus Torvalds unsigned offset = from & (PAGE_CACHE_SIZE-1); 27401da177e4SLinus Torvalds unsigned blocksize; 274154b21a79SAndrew Morton sector_t iblock; 27421da177e4SLinus Torvalds unsigned length, pos; 27431da177e4SLinus Torvalds struct inode *inode = mapping->host; 27441da177e4SLinus Torvalds struct page *page; 27451da177e4SLinus Torvalds struct buffer_head *bh; 27461da177e4SLinus Torvalds int err; 27471da177e4SLinus Torvalds 27481da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 27491da177e4SLinus Torvalds length = offset & (blocksize - 1); 27501da177e4SLinus Torvalds 27511da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 27521da177e4SLinus Torvalds if (!length) 27531da177e4SLinus Torvalds return 0; 27541da177e4SLinus Torvalds 27551da177e4SLinus Torvalds length = blocksize - length; 275654b21a79SAndrew Morton iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 27571da177e4SLinus Torvalds 27581da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 27591da177e4SLinus Torvalds err = -ENOMEM; 27601da177e4SLinus Torvalds if (!page) 27611da177e4SLinus Torvalds goto out; 27621da177e4SLinus Torvalds 27631da177e4SLinus Torvalds if (!page_has_buffers(page)) 27641da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 27651da177e4SLinus Torvalds 27661da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 27671da177e4SLinus Torvalds bh = page_buffers(page); 27681da177e4SLinus Torvalds pos = blocksize; 27691da177e4SLinus Torvalds while (offset >= pos) { 27701da177e4SLinus Torvalds bh = bh->b_this_page; 27711da177e4SLinus Torvalds iblock++; 27721da177e4SLinus Torvalds pos += blocksize; 27731da177e4SLinus Torvalds } 27741da177e4SLinus Torvalds 27751da177e4SLinus Torvalds err = 0; 27761da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2777b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 27781da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 27791da177e4SLinus Torvalds if (err) 27801da177e4SLinus Torvalds goto unlock; 27811da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 27821da177e4SLinus Torvalds if (!buffer_mapped(bh)) 27831da177e4SLinus Torvalds goto unlock; 27841da177e4SLinus Torvalds } 27851da177e4SLinus Torvalds 27861da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 27871da177e4SLinus Torvalds if (PageUptodate(page)) 27881da177e4SLinus Torvalds set_buffer_uptodate(bh); 27891da177e4SLinus Torvalds 279033a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 27911da177e4SLinus Torvalds err = -EIO; 27921da177e4SLinus Torvalds ll_rw_block(READ, 1, &bh); 27931da177e4SLinus Torvalds wait_on_buffer(bh); 27941da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 27951da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 27961da177e4SLinus Torvalds goto unlock; 27971da177e4SLinus Torvalds } 27981da177e4SLinus Torvalds 2799eebd2aa3SChristoph Lameter zero_user(page, offset, length); 28001da177e4SLinus Torvalds mark_buffer_dirty(bh); 28011da177e4SLinus Torvalds err = 0; 28021da177e4SLinus Torvalds 28031da177e4SLinus Torvalds unlock: 28041da177e4SLinus Torvalds unlock_page(page); 28051da177e4SLinus Torvalds page_cache_release(page); 28061da177e4SLinus Torvalds out: 28071da177e4SLinus Torvalds return err; 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds 28101da177e4SLinus Torvalds /* 28111da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 28121da177e4SLinus Torvalds */ 28131da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block, 28141da177e4SLinus Torvalds struct writeback_control *wbc) 28151da177e4SLinus Torvalds { 28161da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 28171da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 28181da177e4SLinus Torvalds const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 28191da177e4SLinus Torvalds unsigned offset; 28201da177e4SLinus Torvalds 28211da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 28221da177e4SLinus Torvalds if (page->index < end_index) 28231da177e4SLinus Torvalds return __block_write_full_page(inode, page, get_block, wbc); 28241da177e4SLinus Torvalds 28251da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 28261da177e4SLinus Torvalds offset = i_size & (PAGE_CACHE_SIZE-1); 28271da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 28281da177e4SLinus Torvalds /* 28291da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 28301da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 28311da177e4SLinus Torvalds * freeable here, so the page does not leak. 28321da177e4SLinus Torvalds */ 2833aaa4059bSJan Kara do_invalidatepage(page, 0); 28341da177e4SLinus Torvalds unlock_page(page); 28351da177e4SLinus Torvalds return 0; /* don't care */ 28361da177e4SLinus Torvalds } 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds /* 28391da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 28401da177e4SLinus Torvalds * writepage invokation because it may be mmapped. "A file is mapped 28411da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 28421da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 28431da177e4SLinus Torvalds * writes to that region are not written out to the file." 28441da177e4SLinus Torvalds */ 2845eebd2aa3SChristoph Lameter zero_user_segment(page, offset, PAGE_CACHE_SIZE); 28461da177e4SLinus Torvalds return __block_write_full_page(inode, page, get_block, wbc); 28471da177e4SLinus Torvalds } 28481da177e4SLinus Torvalds 28491da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 28501da177e4SLinus Torvalds get_block_t *get_block) 28511da177e4SLinus Torvalds { 28521da177e4SLinus Torvalds struct buffer_head tmp; 28531da177e4SLinus Torvalds struct inode *inode = mapping->host; 28541da177e4SLinus Torvalds tmp.b_state = 0; 28551da177e4SLinus Torvalds tmp.b_blocknr = 0; 2856b0cf2321SBadari Pulavarty tmp.b_size = 1 << inode->i_blkbits; 28571da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 28581da177e4SLinus Torvalds return tmp.b_blocknr; 28591da177e4SLinus Torvalds } 28601da177e4SLinus Torvalds 28616712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err) 28621da177e4SLinus Torvalds { 28631da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 28641da177e4SLinus Torvalds 28651da177e4SLinus Torvalds if (err == -EOPNOTSUPP) { 28661da177e4SLinus Torvalds set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 28671da177e4SLinus Torvalds set_bit(BH_Eopnotsupp, &bh->b_state); 28681da177e4SLinus Torvalds } 28691da177e4SLinus Torvalds 28701da177e4SLinus Torvalds bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 28711da177e4SLinus Torvalds bio_put(bio); 28721da177e4SLinus Torvalds } 28731da177e4SLinus Torvalds 28741da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh) 28751da177e4SLinus Torvalds { 28761da177e4SLinus Torvalds struct bio *bio; 28771da177e4SLinus Torvalds int ret = 0; 28781da177e4SLinus Torvalds 28791da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 28801da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 28811da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 28821da177e4SLinus Torvalds 28831da177e4SLinus Torvalds if (buffer_ordered(bh) && (rw == WRITE)) 28841da177e4SLinus Torvalds rw = WRITE_BARRIER; 28851da177e4SLinus Torvalds 28861da177e4SLinus Torvalds /* 28871da177e4SLinus Torvalds * Only clear out a write error when rewriting, should this 28881da177e4SLinus Torvalds * include WRITE_SYNC as well? 28891da177e4SLinus Torvalds */ 28901da177e4SLinus Torvalds if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER)) 28911da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 28921da177e4SLinus Torvalds 28931da177e4SLinus Torvalds /* 28941da177e4SLinus Torvalds * from here on down, it's all bio -- do the initial mapping, 28951da177e4SLinus Torvalds * submit_bio -> generic_make_request may further map this bio around 28961da177e4SLinus Torvalds */ 28971da177e4SLinus Torvalds bio = bio_alloc(GFP_NOIO, 1); 28981da177e4SLinus Torvalds 28991da177e4SLinus Torvalds bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 29001da177e4SLinus Torvalds bio->bi_bdev = bh->b_bdev; 29011da177e4SLinus Torvalds bio->bi_io_vec[0].bv_page = bh->b_page; 29021da177e4SLinus Torvalds bio->bi_io_vec[0].bv_len = bh->b_size; 29031da177e4SLinus Torvalds bio->bi_io_vec[0].bv_offset = bh_offset(bh); 29041da177e4SLinus Torvalds 29051da177e4SLinus Torvalds bio->bi_vcnt = 1; 29061da177e4SLinus Torvalds bio->bi_idx = 0; 29071da177e4SLinus Torvalds bio->bi_size = bh->b_size; 29081da177e4SLinus Torvalds 29091da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 29101da177e4SLinus Torvalds bio->bi_private = bh; 29111da177e4SLinus Torvalds 29121da177e4SLinus Torvalds bio_get(bio); 29131da177e4SLinus Torvalds submit_bio(rw, bio); 29141da177e4SLinus Torvalds 29151da177e4SLinus Torvalds if (bio_flagged(bio, BIO_EOPNOTSUPP)) 29161da177e4SLinus Torvalds ret = -EOPNOTSUPP; 29171da177e4SLinus Torvalds 29181da177e4SLinus Torvalds bio_put(bio); 29191da177e4SLinus Torvalds return ret; 29201da177e4SLinus Torvalds } 29211da177e4SLinus Torvalds 29221da177e4SLinus Torvalds /** 29231da177e4SLinus Torvalds * ll_rw_block: low-level access to block devices (DEPRECATED) 2924a7662236SJan Kara * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 29251da177e4SLinus Torvalds * @nr: number of &struct buffer_heads in the array 29261da177e4SLinus Torvalds * @bhs: array of pointers to &struct buffer_head 29271da177e4SLinus Torvalds * 2928a7662236SJan Kara * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2929a7662236SJan Kara * requests an I/O operation on them, either a %READ or a %WRITE. The third 2930a7662236SJan Kara * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2931a7662236SJan Kara * are sent to disk. The fourth %READA option is described in the documentation 2932a7662236SJan Kara * for generic_make_request() which ll_rw_block() calls. 29331da177e4SLinus Torvalds * 29341da177e4SLinus Torvalds * This function drops any buffer that it cannot get a lock on (with the 2935a7662236SJan Kara * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2936a7662236SJan Kara * clean when doing a write request, and any buffer that appears to be 2937a7662236SJan Kara * up-to-date when doing read request. Further it marks as clean buffers that 2938a7662236SJan Kara * are processed for writing (the buffer cache won't assume that they are 2939a7662236SJan Kara * actually clean until the buffer gets unlocked). 29401da177e4SLinus Torvalds * 29411da177e4SLinus Torvalds * ll_rw_block sets b_end_io to simple completion handler that marks 29421da177e4SLinus Torvalds * the buffer up-to-date (if approriate), unlocks the buffer and wakes 29431da177e4SLinus Torvalds * any waiters. 29441da177e4SLinus Torvalds * 29451da177e4SLinus Torvalds * All of the buffers must be for the same device, and must also be a 29461da177e4SLinus Torvalds * multiple of the current approved size for the device. 29471da177e4SLinus Torvalds */ 29481da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 29491da177e4SLinus Torvalds { 29501da177e4SLinus Torvalds int i; 29511da177e4SLinus Torvalds 29521da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 29531da177e4SLinus Torvalds struct buffer_head *bh = bhs[i]; 29541da177e4SLinus Torvalds 2955a7662236SJan Kara if (rw == SWRITE) 2956a7662236SJan Kara lock_buffer(bh); 2957a7662236SJan Kara else if (test_set_buffer_locked(bh)) 29581da177e4SLinus Torvalds continue; 29591da177e4SLinus Torvalds 2960a7662236SJan Kara if (rw == WRITE || rw == SWRITE) { 29611da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 296276c3073aSakpm@osdl.org bh->b_end_io = end_buffer_write_sync; 2963e60e5c50SOGAWA Hirofumi get_bh(bh); 29641da177e4SLinus Torvalds submit_bh(WRITE, bh); 29651da177e4SLinus Torvalds continue; 29661da177e4SLinus Torvalds } 29671da177e4SLinus Torvalds } else { 29681da177e4SLinus Torvalds if (!buffer_uptodate(bh)) { 296976c3073aSakpm@osdl.org bh->b_end_io = end_buffer_read_sync; 2970e60e5c50SOGAWA Hirofumi get_bh(bh); 29711da177e4SLinus Torvalds submit_bh(rw, bh); 29721da177e4SLinus Torvalds continue; 29731da177e4SLinus Torvalds } 29741da177e4SLinus Torvalds } 29751da177e4SLinus Torvalds unlock_buffer(bh); 29761da177e4SLinus Torvalds } 29771da177e4SLinus Torvalds } 29781da177e4SLinus Torvalds 29791da177e4SLinus Torvalds /* 29801da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 29811da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 29821da177e4SLinus Torvalds * the buffer_head. 29831da177e4SLinus Torvalds */ 29841da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh) 29851da177e4SLinus Torvalds { 29861da177e4SLinus Torvalds int ret = 0; 29871da177e4SLinus Torvalds 29881da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 29891da177e4SLinus Torvalds lock_buffer(bh); 29901da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 29911da177e4SLinus Torvalds get_bh(bh); 29921da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 29931da177e4SLinus Torvalds ret = submit_bh(WRITE, bh); 29941da177e4SLinus Torvalds wait_on_buffer(bh); 29951da177e4SLinus Torvalds if (buffer_eopnotsupp(bh)) { 29961da177e4SLinus Torvalds clear_buffer_eopnotsupp(bh); 29971da177e4SLinus Torvalds ret = -EOPNOTSUPP; 29981da177e4SLinus Torvalds } 29991da177e4SLinus Torvalds if (!ret && !buffer_uptodate(bh)) 30001da177e4SLinus Torvalds ret = -EIO; 30011da177e4SLinus Torvalds } else { 30021da177e4SLinus Torvalds unlock_buffer(bh); 30031da177e4SLinus Torvalds } 30041da177e4SLinus Torvalds return ret; 30051da177e4SLinus Torvalds } 30061da177e4SLinus Torvalds 30071da177e4SLinus Torvalds /* 30081da177e4SLinus Torvalds * try_to_free_buffers() checks if all the buffers on this particular page 30091da177e4SLinus Torvalds * are unused, and releases them if so. 30101da177e4SLinus Torvalds * 30111da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either 30121da177e4SLinus Torvalds * locking the page or by holding its mapping's private_lock. 30131da177e4SLinus Torvalds * 30141da177e4SLinus Torvalds * If the page is dirty but all the buffers are clean then we need to 30151da177e4SLinus Torvalds * be sure to mark the page clean as well. This is because the page 30161da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers 30171da177e4SLinus Torvalds * to a dirty page will set *all* buffers dirty. Which would corrupt 30181da177e4SLinus Torvalds * filesystem data on the same device. 30191da177e4SLinus Torvalds * 30201da177e4SLinus Torvalds * The same applies to regular filesystem pages: if all the buffers are 30211da177e4SLinus Torvalds * clean then we set the page clean and proceed. To do that, we require 30221da177e4SLinus Torvalds * total exclusion from __set_page_dirty_buffers(). That is obtained with 30231da177e4SLinus Torvalds * private_lock. 30241da177e4SLinus Torvalds * 30251da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking. 30261da177e4SLinus Torvalds */ 30271da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 30281da177e4SLinus Torvalds { 30291da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 30301da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 30311da177e4SLinus Torvalds } 30321da177e4SLinus Torvalds 30331da177e4SLinus Torvalds static int 30341da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 30351da177e4SLinus Torvalds { 30361da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 30371da177e4SLinus Torvalds struct buffer_head *bh; 30381da177e4SLinus Torvalds 30391da177e4SLinus Torvalds bh = head; 30401da177e4SLinus Torvalds do { 3041de7d5a3bSakpm@osdl.org if (buffer_write_io_error(bh) && page->mapping) 30421da177e4SLinus Torvalds set_bit(AS_EIO, &page->mapping->flags); 30431da177e4SLinus Torvalds if (buffer_busy(bh)) 30441da177e4SLinus Torvalds goto failed; 30451da177e4SLinus Torvalds bh = bh->b_this_page; 30461da177e4SLinus Torvalds } while (bh != head); 30471da177e4SLinus Torvalds 30481da177e4SLinus Torvalds do { 30491da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 30501da177e4SLinus Torvalds 3051535ee2fbSJan Kara if (bh->b_assoc_map) 30521da177e4SLinus Torvalds __remove_assoc_queue(bh); 30531da177e4SLinus Torvalds bh = next; 30541da177e4SLinus Torvalds } while (bh != head); 30551da177e4SLinus Torvalds *buffers_to_free = head; 30561da177e4SLinus Torvalds __clear_page_buffers(page); 30571da177e4SLinus Torvalds return 1; 30581da177e4SLinus Torvalds failed: 30591da177e4SLinus Torvalds return 0; 30601da177e4SLinus Torvalds } 30611da177e4SLinus Torvalds 30621da177e4SLinus Torvalds int try_to_free_buffers(struct page *page) 30631da177e4SLinus Torvalds { 30641da177e4SLinus Torvalds struct address_space * const mapping = page->mapping; 30651da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 30661da177e4SLinus Torvalds int ret = 0; 30671da177e4SLinus Torvalds 30681da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3069ecdfc978SLinus Torvalds if (PageWriteback(page)) 30701da177e4SLinus Torvalds return 0; 30711da177e4SLinus Torvalds 30721da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 30731da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 30741da177e4SLinus Torvalds goto out; 30751da177e4SLinus Torvalds } 30761da177e4SLinus Torvalds 30771da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 30781da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 3079ecdfc978SLinus Torvalds 3080ecdfc978SLinus Torvalds /* 3081ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 3082ecdfc978SLinus Torvalds * then we can have clean buffers against a dirty page. We 3083ecdfc978SLinus Torvalds * clean the page here; otherwise the VM will never notice 3084ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 3085ecdfc978SLinus Torvalds * 3086ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 3087ecdfc978SLinus Torvalds * the page's buffers clean. We discover that here and clean 3088ecdfc978SLinus Torvalds * the page also. 308987df7241SNick Piggin * 309087df7241SNick Piggin * private_lock must be held over this entire operation in order 309187df7241SNick Piggin * to synchronise against __set_page_dirty_buffers and prevent the 309287df7241SNick Piggin * dirty bit from being lost. 3093ecdfc978SLinus Torvalds */ 3094ecdfc978SLinus Torvalds if (ret) 3095ecdfc978SLinus Torvalds cancel_dirty_page(page, PAGE_CACHE_SIZE); 309687df7241SNick Piggin spin_unlock(&mapping->private_lock); 30971da177e4SLinus Torvalds out: 30981da177e4SLinus Torvalds if (buffers_to_free) { 30991da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 31001da177e4SLinus Torvalds 31011da177e4SLinus Torvalds do { 31021da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 31031da177e4SLinus Torvalds free_buffer_head(bh); 31041da177e4SLinus Torvalds bh = next; 31051da177e4SLinus Torvalds } while (bh != buffers_to_free); 31061da177e4SLinus Torvalds } 31071da177e4SLinus Torvalds return ret; 31081da177e4SLinus Torvalds } 31091da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 31101da177e4SLinus Torvalds 31113978d717SNeilBrown void block_sync_page(struct page *page) 31121da177e4SLinus Torvalds { 31131da177e4SLinus Torvalds struct address_space *mapping; 31141da177e4SLinus Torvalds 31151da177e4SLinus Torvalds smp_mb(); 31161da177e4SLinus Torvalds mapping = page_mapping(page); 31171da177e4SLinus Torvalds if (mapping) 31181da177e4SLinus Torvalds blk_run_backing_dev(mapping->backing_dev_info, page); 31191da177e4SLinus Torvalds } 31201da177e4SLinus Torvalds 31211da177e4SLinus Torvalds /* 31221da177e4SLinus Torvalds * There are no bdflush tunables left. But distributions are 31231da177e4SLinus Torvalds * still running obsolete flush daemons, so we terminate them here. 31241da177e4SLinus Torvalds * 31251da177e4SLinus Torvalds * Use of bdflush() is deprecated and will be removed in a future kernel. 31261da177e4SLinus Torvalds * The `pdflush' kernel threads fully replace bdflush daemons and this call. 31271da177e4SLinus Torvalds */ 31281da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data) 31291da177e4SLinus Torvalds { 31301da177e4SLinus Torvalds static int msg_count; 31311da177e4SLinus Torvalds 31321da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 31331da177e4SLinus Torvalds return -EPERM; 31341da177e4SLinus Torvalds 31351da177e4SLinus Torvalds if (msg_count < 5) { 31361da177e4SLinus Torvalds msg_count++; 31371da177e4SLinus Torvalds printk(KERN_INFO 31381da177e4SLinus Torvalds "warning: process `%s' used the obsolete bdflush" 31391da177e4SLinus Torvalds " system call\n", current->comm); 31401da177e4SLinus Torvalds printk(KERN_INFO "Fix your initscripts?\n"); 31411da177e4SLinus Torvalds } 31421da177e4SLinus Torvalds 31431da177e4SLinus Torvalds if (func == 1) 31441da177e4SLinus Torvalds do_exit(0); 31451da177e4SLinus Torvalds return 0; 31461da177e4SLinus Torvalds } 31471da177e4SLinus Torvalds 31481da177e4SLinus Torvalds /* 31491da177e4SLinus Torvalds * Buffer-head allocation 31501da177e4SLinus Torvalds */ 3151e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep; 31521da177e4SLinus Torvalds 31531da177e4SLinus Torvalds /* 31541da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 31551da177e4SLinus Torvalds * stripping them in writeback. 31561da177e4SLinus Torvalds */ 31571da177e4SLinus Torvalds static int max_buffer_heads; 31581da177e4SLinus Torvalds 31591da177e4SLinus Torvalds int buffer_heads_over_limit; 31601da177e4SLinus Torvalds 31611da177e4SLinus Torvalds struct bh_accounting { 31621da177e4SLinus Torvalds int nr; /* Number of live bh's */ 31631da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 31641da177e4SLinus Torvalds }; 31651da177e4SLinus Torvalds 31661da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 31671da177e4SLinus Torvalds 31681da177e4SLinus Torvalds static void recalc_bh_state(void) 31691da177e4SLinus Torvalds { 31701da177e4SLinus Torvalds int i; 31711da177e4SLinus Torvalds int tot = 0; 31721da177e4SLinus Torvalds 31731da177e4SLinus Torvalds if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) 31741da177e4SLinus Torvalds return; 31751da177e4SLinus Torvalds __get_cpu_var(bh_accounting).ratelimit = 0; 31768a143426SEric Dumazet for_each_online_cpu(i) 31771da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 31781da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 31791da177e4SLinus Torvalds } 31801da177e4SLinus Torvalds 3181dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 31821da177e4SLinus Torvalds { 3183488514d1SChristoph Lameter struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 31841da177e4SLinus Torvalds if (ret) { 3185a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 3186736c7b80SCoywolf Qi Hunt get_cpu_var(bh_accounting).nr++; 31871da177e4SLinus Torvalds recalc_bh_state(); 3188736c7b80SCoywolf Qi Hunt put_cpu_var(bh_accounting); 31891da177e4SLinus Torvalds } 31901da177e4SLinus Torvalds return ret; 31911da177e4SLinus Torvalds } 31921da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 31931da177e4SLinus Torvalds 31941da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 31951da177e4SLinus Torvalds { 31961da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 31971da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 3198736c7b80SCoywolf Qi Hunt get_cpu_var(bh_accounting).nr--; 31991da177e4SLinus Torvalds recalc_bh_state(); 3200736c7b80SCoywolf Qi Hunt put_cpu_var(bh_accounting); 32011da177e4SLinus Torvalds } 32021da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 32031da177e4SLinus Torvalds 32041da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu) 32051da177e4SLinus Torvalds { 32061da177e4SLinus Torvalds int i; 32071da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 32081da177e4SLinus Torvalds 32091da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 32101da177e4SLinus Torvalds brelse(b->bhs[i]); 32111da177e4SLinus Torvalds b->bhs[i] = NULL; 32121da177e4SLinus Torvalds } 32138a143426SEric Dumazet get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; 32148a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 32158a143426SEric Dumazet put_cpu_var(bh_accounting); 32161da177e4SLinus Torvalds } 32171da177e4SLinus Torvalds 32181da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self, 32191da177e4SLinus Torvalds unsigned long action, void *hcpu) 32201da177e4SLinus Torvalds { 32218bb78442SRafael J. Wysocki if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 32221da177e4SLinus Torvalds buffer_exit_cpu((unsigned long)hcpu); 32231da177e4SLinus Torvalds return NOTIFY_OK; 32241da177e4SLinus Torvalds } 32251da177e4SLinus Torvalds 3226389d1b08SAneesh Kumar K.V /** 3227a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate 3228389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3229389d1b08SAneesh Kumar K.V * 3230389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false, 3231389d1b08SAneesh Kumar K.V * with the buffer locked, if not. 3232389d1b08SAneesh Kumar K.V */ 3233389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh) 3234389d1b08SAneesh Kumar K.V { 3235389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) { 3236389d1b08SAneesh Kumar K.V lock_buffer(bh); 3237389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) 3238389d1b08SAneesh Kumar K.V return 0; 3239389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3240389d1b08SAneesh Kumar K.V } 3241389d1b08SAneesh Kumar K.V return 1; 3242389d1b08SAneesh Kumar K.V } 3243389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock); 3244389d1b08SAneesh Kumar K.V 3245389d1b08SAneesh Kumar K.V /** 3246a6b91919SRandy Dunlap * bh_submit_read - Submit a locked buffer for reading 3247389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3248389d1b08SAneesh Kumar K.V * 3249389d1b08SAneesh Kumar K.V * Returns zero on success and -EIO on error. 3250389d1b08SAneesh Kumar K.V */ 3251389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh) 3252389d1b08SAneesh Kumar K.V { 3253389d1b08SAneesh Kumar K.V BUG_ON(!buffer_locked(bh)); 3254389d1b08SAneesh Kumar K.V 3255389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) { 3256389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3257389d1b08SAneesh Kumar K.V return 0; 3258389d1b08SAneesh Kumar K.V } 3259389d1b08SAneesh Kumar K.V 3260389d1b08SAneesh Kumar K.V get_bh(bh); 3261389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync; 3262389d1b08SAneesh Kumar K.V submit_bh(READ, bh); 3263389d1b08SAneesh Kumar K.V wait_on_buffer(bh); 3264389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) 3265389d1b08SAneesh Kumar K.V return 0; 3266389d1b08SAneesh Kumar K.V return -EIO; 3267389d1b08SAneesh Kumar K.V } 3268389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read); 3269389d1b08SAneesh Kumar K.V 3270b98938c3SChristoph Lameter static void 3271b98938c3SChristoph Lameter init_buffer_head(struct kmem_cache *cachep, void *data) 3272b98938c3SChristoph Lameter { 3273b98938c3SChristoph Lameter struct buffer_head *bh = data; 3274b98938c3SChristoph Lameter 3275b98938c3SChristoph Lameter memset(bh, 0, sizeof(*bh)); 3276b98938c3SChristoph Lameter INIT_LIST_HEAD(&bh->b_assoc_buffers); 3277b98938c3SChristoph Lameter } 3278b98938c3SChristoph Lameter 32791da177e4SLinus Torvalds void __init buffer_init(void) 32801da177e4SLinus Torvalds { 32811da177e4SLinus Torvalds int nrpages; 32821da177e4SLinus Torvalds 3283b98938c3SChristoph Lameter bh_cachep = kmem_cache_create("buffer_head", 3284b98938c3SChristoph Lameter sizeof(struct buffer_head), 0, 3285b98938c3SChristoph Lameter (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3286b98938c3SChristoph Lameter SLAB_MEM_SPREAD), 3287b98938c3SChristoph Lameter init_buffer_head); 32881da177e4SLinus Torvalds 32891da177e4SLinus Torvalds /* 32901da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 32911da177e4SLinus Torvalds */ 32921da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 32931da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 32941da177e4SLinus Torvalds hotcpu_notifier(buffer_cpu_notify, 0); 32951da177e4SLinus Torvalds } 32961da177e4SLinus Torvalds 32971da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget); 32981da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse); 32991da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer); 33001da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write); 33011da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write); 330254171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite); 33031da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page); 33041da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page); 33051da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page); 33061da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page); 330789e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin); 33081da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync); 33091da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync); 33101da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync); 33111da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev); 33121da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap); 33131da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write); 331405eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple); 33151da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer); 33161da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev); 33171da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block); 33181da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty); 33191da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh); 33201da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer); 33211da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer); 3322