11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/buffer.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 81da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 111da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 141da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds #include <linux/kernel.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 231da177e4SLinus Torvalds #include <linux/fs.h> 241da177e4SLinus Torvalds #include <linux/mm.h> 251da177e4SLinus Torvalds #include <linux/percpu.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 2716f7e0feSRandy Dunlap #include <linux/capability.h> 281da177e4SLinus Torvalds #include <linux/blkdev.h> 291da177e4SLinus Torvalds #include <linux/file.h> 301da177e4SLinus Torvalds #include <linux/quotaops.h> 311da177e4SLinus Torvalds #include <linux/highmem.h> 321da177e4SLinus Torvalds #include <linux/module.h> 331da177e4SLinus Torvalds #include <linux/writeback.h> 341da177e4SLinus Torvalds #include <linux/hash.h> 351da177e4SLinus Torvalds #include <linux/suspend.h> 361da177e4SLinus Torvalds #include <linux/buffer_head.h> 3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 381da177e4SLinus Torvalds #include <linux/bio.h> 391da177e4SLinus Torvalds #include <linux/notifier.h> 401da177e4SLinus Torvalds #include <linux/cpu.h> 411da177e4SLinus Torvalds #include <linux/bitops.h> 421da177e4SLinus Torvalds #include <linux/mpage.h> 43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds inline void 501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 511da177e4SLinus Torvalds { 521da177e4SLinus Torvalds bh->b_end_io = handler; 531da177e4SLinus Torvalds bh->b_private = private; 541da177e4SLinus Torvalds } 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static int sync_buffer(void *word) 571da177e4SLinus Torvalds { 581da177e4SLinus Torvalds struct block_device *bd; 591da177e4SLinus Torvalds struct buffer_head *bh 601da177e4SLinus Torvalds = container_of(word, struct buffer_head, b_state); 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds smp_mb(); 631da177e4SLinus Torvalds bd = bh->b_bdev; 641da177e4SLinus Torvalds if (bd) 651da177e4SLinus Torvalds blk_run_address_space(bd->bd_inode->i_mapping); 661da177e4SLinus Torvalds io_schedule(); 671da177e4SLinus Torvalds return 0; 681da177e4SLinus Torvalds } 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds void fastcall __lock_buffer(struct buffer_head *bh) 711da177e4SLinus Torvalds { 721da177e4SLinus Torvalds wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 731da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 741da177e4SLinus Torvalds } 751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 761da177e4SLinus Torvalds 771da177e4SLinus Torvalds void fastcall unlock_buffer(struct buffer_head *bh) 781da177e4SLinus Torvalds { 7972ed3d03SNick Piggin smp_mb__before_clear_bit(); 801da177e4SLinus Torvalds clear_buffer_locked(bh); 811da177e4SLinus Torvalds smp_mb__after_clear_bit(); 821da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 831da177e4SLinus Torvalds } 841da177e4SLinus Torvalds 851da177e4SLinus Torvalds /* 861da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 871da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 881da177e4SLinus Torvalds * if you want to preserve its state. 891da177e4SLinus Torvalds */ 901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 911da177e4SLinus Torvalds { 921da177e4SLinus Torvalds wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 931da177e4SLinus Torvalds } 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds static void 961da177e4SLinus Torvalds __clear_page_buffers(struct page *page) 971da177e4SLinus Torvalds { 981da177e4SLinus Torvalds ClearPagePrivate(page); 994c21e2f2SHugh Dickins set_page_private(page, 0); 1001da177e4SLinus Torvalds page_cache_release(page); 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh) 1041da177e4SLinus Torvalds { 1051da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 1081da177e4SLinus Torvalds bdevname(bh->b_bdev, b), 1091da177e4SLinus Torvalds (unsigned long long)bh->b_blocknr); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds 1121da177e4SLinus Torvalds /* 1131da177e4SLinus Torvalds * Default synchronous end-of-IO handler.. Just mark it up-to-date and 1141da177e4SLinus Torvalds * unlock the buffer. This is what ll_rw_block uses too. 1151da177e4SLinus Torvalds */ 1161da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 1171da177e4SLinus Torvalds { 1181da177e4SLinus Torvalds if (uptodate) { 1191da177e4SLinus Torvalds set_buffer_uptodate(bh); 1201da177e4SLinus Torvalds } else { 1211da177e4SLinus Torvalds /* This happens, due to failed READA attempts. */ 1221da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1231da177e4SLinus Torvalds } 1241da177e4SLinus Torvalds unlock_buffer(bh); 1251da177e4SLinus Torvalds put_bh(bh); 1261da177e4SLinus Torvalds } 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1291da177e4SLinus Torvalds { 1301da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds if (uptodate) { 1331da177e4SLinus Torvalds set_buffer_uptodate(bh); 1341da177e4SLinus Torvalds } else { 1351da177e4SLinus Torvalds if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { 1361da177e4SLinus Torvalds buffer_io_error(bh); 1371da177e4SLinus Torvalds printk(KERN_WARNING "lost page write due to " 1381da177e4SLinus Torvalds "I/O error on %s\n", 1391da177e4SLinus Torvalds bdevname(bh->b_bdev, b)); 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds set_buffer_write_io_error(bh); 1421da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds unlock_buffer(bh); 1451da177e4SLinus Torvalds put_bh(bh); 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds /* 1491da177e4SLinus Torvalds * Write out and wait upon all the dirty data associated with a block 1501da177e4SLinus Torvalds * device via its mapping. Does not take the superblock lock. 1511da177e4SLinus Torvalds */ 1521da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev) 1531da177e4SLinus Torvalds { 1541da177e4SLinus Torvalds int ret = 0; 1551da177e4SLinus Torvalds 15628fd1298SOGAWA Hirofumi if (bdev) 15728fd1298SOGAWA Hirofumi ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); 1581da177e4SLinus Torvalds return ret; 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev); 1611da177e4SLinus Torvalds 1621da177e4SLinus Torvalds /* 1631da177e4SLinus Torvalds * Write out and wait upon all dirty data associated with this 1641da177e4SLinus Torvalds * device. Filesystem data as well as the underlying block 1651da177e4SLinus Torvalds * device. Takes the superblock lock. 1661da177e4SLinus Torvalds */ 1671da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev) 1681da177e4SLinus Torvalds { 1691da177e4SLinus Torvalds struct super_block *sb = get_super(bdev); 1701da177e4SLinus Torvalds if (sb) { 1711da177e4SLinus Torvalds int res = fsync_super(sb); 1721da177e4SLinus Torvalds drop_super(sb); 1731da177e4SLinus Torvalds return res; 1741da177e4SLinus Torvalds } 1751da177e4SLinus Torvalds return sync_blockdev(bdev); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /** 1791da177e4SLinus Torvalds * freeze_bdev -- lock a filesystem and force it into a consistent state 1801da177e4SLinus Torvalds * @bdev: blockdevice to lock 1811da177e4SLinus Torvalds * 182f73ca1b7SDavid Chinner * This takes the block device bd_mount_sem to make sure no new mounts 1831da177e4SLinus Torvalds * happen on bdev until thaw_bdev() is called. 1841da177e4SLinus Torvalds * If a superblock is found on this device, we take the s_umount semaphore 1851da177e4SLinus Torvalds * on it to make sure nobody unmounts until the snapshot creation is done. 1861da177e4SLinus Torvalds */ 1871da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev) 1881da177e4SLinus Torvalds { 1891da177e4SLinus Torvalds struct super_block *sb; 1901da177e4SLinus Torvalds 191f73ca1b7SDavid Chinner down(&bdev->bd_mount_sem); 1921da177e4SLinus Torvalds sb = get_super(bdev); 1931da177e4SLinus Torvalds if (sb && !(sb->s_flags & MS_RDONLY)) { 1941da177e4SLinus Torvalds sb->s_frozen = SB_FREEZE_WRITE; 195d59dd462Sakpm@osdl.org smp_wmb(); 1961da177e4SLinus Torvalds 197d25b9a1fSOGAWA Hirofumi __fsync_super(sb); 1981da177e4SLinus Torvalds 1991da177e4SLinus Torvalds sb->s_frozen = SB_FREEZE_TRANS; 200d59dd462Sakpm@osdl.org smp_wmb(); 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds sync_blockdev(sb->s_bdev); 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds if (sb->s_op->write_super_lockfs) 2051da177e4SLinus Torvalds sb->s_op->write_super_lockfs(sb); 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds 2081da177e4SLinus Torvalds sync_blockdev(bdev); 2091da177e4SLinus Torvalds return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev); 2121da177e4SLinus Torvalds 2131da177e4SLinus Torvalds /** 2141da177e4SLinus Torvalds * thaw_bdev -- unlock filesystem 2151da177e4SLinus Torvalds * @bdev: blockdevice to unlock 2161da177e4SLinus Torvalds * @sb: associated superblock 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * Unlocks the filesystem and marks it writeable again after freeze_bdev(). 2191da177e4SLinus Torvalds */ 2201da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb) 2211da177e4SLinus Torvalds { 2221da177e4SLinus Torvalds if (sb) { 2231da177e4SLinus Torvalds BUG_ON(sb->s_bdev != bdev); 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds if (sb->s_op->unlockfs) 2261da177e4SLinus Torvalds sb->s_op->unlockfs(sb); 2271da177e4SLinus Torvalds sb->s_frozen = SB_UNFROZEN; 228d59dd462Sakpm@osdl.org smp_wmb(); 2291da177e4SLinus Torvalds wake_up(&sb->s_wait_unfrozen); 2301da177e4SLinus Torvalds drop_super(sb); 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds 233f73ca1b7SDavid Chinner up(&bdev->bd_mount_sem); 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev); 2361da177e4SLinus Torvalds 2371da177e4SLinus Torvalds /* 2381da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 2391da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 2401da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 2411da177e4SLinus Torvalds * private_lock. 2421da177e4SLinus Torvalds * 2431da177e4SLinus Torvalds * Hack idea: for the blockdev mapping, i_bufferlist_lock contention 2441da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 2451da177e4SLinus Torvalds * succeeds, there is no need to take private_lock. (But if 2461da177e4SLinus Torvalds * private_lock is contended then so is mapping->tree_lock). 2471da177e4SLinus Torvalds */ 2481da177e4SLinus Torvalds static struct buffer_head * 249385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 2501da177e4SLinus Torvalds { 2511da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 2521da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 2531da177e4SLinus Torvalds struct buffer_head *ret = NULL; 2541da177e4SLinus Torvalds pgoff_t index; 2551da177e4SLinus Torvalds struct buffer_head *bh; 2561da177e4SLinus Torvalds struct buffer_head *head; 2571da177e4SLinus Torvalds struct page *page; 2581da177e4SLinus Torvalds int all_mapped = 1; 2591da177e4SLinus Torvalds 2601da177e4SLinus Torvalds index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 2611da177e4SLinus Torvalds page = find_get_page(bd_mapping, index); 2621da177e4SLinus Torvalds if (!page) 2631da177e4SLinus Torvalds goto out; 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock); 2661da177e4SLinus Torvalds if (!page_has_buffers(page)) 2671da177e4SLinus Torvalds goto out_unlock; 2681da177e4SLinus Torvalds head = page_buffers(page); 2691da177e4SLinus Torvalds bh = head; 2701da177e4SLinus Torvalds do { 2711da177e4SLinus Torvalds if (bh->b_blocknr == block) { 2721da177e4SLinus Torvalds ret = bh; 2731da177e4SLinus Torvalds get_bh(bh); 2741da177e4SLinus Torvalds goto out_unlock; 2751da177e4SLinus Torvalds } 2761da177e4SLinus Torvalds if (!buffer_mapped(bh)) 2771da177e4SLinus Torvalds all_mapped = 0; 2781da177e4SLinus Torvalds bh = bh->b_this_page; 2791da177e4SLinus Torvalds } while (bh != head); 2801da177e4SLinus Torvalds 2811da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2821da177e4SLinus Torvalds * not mapped. This is due to various races between 2831da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2841da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2851da177e4SLinus Torvalds */ 2861da177e4SLinus Torvalds if (all_mapped) { 2871da177e4SLinus Torvalds printk("__find_get_block_slow() failed. " 2881da177e4SLinus Torvalds "block=%llu, b_blocknr=%llu\n", 289205f87f6SBadari Pulavarty (unsigned long long)block, 290205f87f6SBadari Pulavarty (unsigned long long)bh->b_blocknr); 291205f87f6SBadari Pulavarty printk("b_state=0x%08lx, b_size=%zu\n", 292205f87f6SBadari Pulavarty bh->b_state, bh->b_size); 2931da177e4SLinus Torvalds printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); 2941da177e4SLinus Torvalds } 2951da177e4SLinus Torvalds out_unlock: 2961da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock); 2971da177e4SLinus Torvalds page_cache_release(page); 2981da177e4SLinus Torvalds out: 2991da177e4SLinus Torvalds return ret; 3001da177e4SLinus Torvalds } 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind 3031da177e4SLinus Torvalds of fs corruption is going on. Trashing dirty data always imply losing 3041da177e4SLinus Torvalds information that was supposed to be just stored on the physical layer 3051da177e4SLinus Torvalds by the user. 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds Thus invalidate_buffers in general usage is not allwowed to trash 3081da177e4SLinus Torvalds dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to 3091da177e4SLinus Torvalds be preserved. These buffers are simply skipped. 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds We also skip buffers which are still in use. For example this can 3121da177e4SLinus Torvalds happen if a userspace program is reading the block device. 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds NOTE: In the case where the user removed a removable-media-disk even if 3151da177e4SLinus Torvalds there's still dirty data not synced on disk (due a bug in the device driver 3161da177e4SLinus Torvalds or due an error of the user), by not destroying the dirty buffers we could 3171da177e4SLinus Torvalds generate corruption also on the next media inserted, thus a parameter is 3181da177e4SLinus Torvalds necessary to handle this case in the most safe way possible (trying 3191da177e4SLinus Torvalds to not corrupt also the new disk inserted with the data belonging to 3201da177e4SLinus Torvalds the old now corrupted disk). Also for the ramdisk the natural thing 3211da177e4SLinus Torvalds to do in order to release the ramdisk memory is to destroy dirty buffers. 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds These are two special cases. Normal usage imply the device driver 3241da177e4SLinus Torvalds to issue a sync on the device (without waiting I/O completion) and 3251da177e4SLinus Torvalds then an invalidate_buffers call that doesn't trash dirty buffers. 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds For handling cache coherency with the blkdev pagecache the 'update' case 3281da177e4SLinus Torvalds is been introduced. It is needed to re-read from disk any pinned 3291da177e4SLinus Torvalds buffer. NOTE: re-reading from disk is destructive so we can do it only 3301da177e4SLinus Torvalds when we assume nobody is changing the buffercache under our I/O and when 3311da177e4SLinus Torvalds we think the disk contains more recent information than the buffercache. 3321da177e4SLinus Torvalds The update == 1 pass marks the buffers we need to update, the update == 2 3331da177e4SLinus Torvalds pass does the actual I/O. */ 334f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev) 3351da177e4SLinus Torvalds { 3360e1dfc66SAndrew Morton struct address_space *mapping = bdev->bd_inode->i_mapping; 3370e1dfc66SAndrew Morton 3380e1dfc66SAndrew Morton if (mapping->nrpages == 0) 3390e1dfc66SAndrew Morton return; 3400e1dfc66SAndrew Morton 3411da177e4SLinus Torvalds invalidate_bh_lrus(); 342fc0ecff6SAndrew Morton invalidate_mapping_pages(mapping, 0, -1); 3431da177e4SLinus Torvalds } 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds /* 3461da177e4SLinus Torvalds * Kick pdflush then try to free up some ZONE_NORMAL memory. 3471da177e4SLinus Torvalds */ 3481da177e4SLinus Torvalds static void free_more_memory(void) 3491da177e4SLinus Torvalds { 3501da177e4SLinus Torvalds struct zone **zones; 3511da177e4SLinus Torvalds pg_data_t *pgdat; 3521da177e4SLinus Torvalds 353687a21ceSPekka J Enberg wakeup_pdflush(1024); 3541da177e4SLinus Torvalds yield(); 3551da177e4SLinus Torvalds 356ec936fc5SKAMEZAWA Hiroyuki for_each_online_pgdat(pgdat) { 357af4ca457SAl Viro zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; 3581da177e4SLinus Torvalds if (*zones) 3591ad539b2SDarren Hart try_to_free_pages(zones, GFP_NOFS); 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds } 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds /* 3641da177e4SLinus Torvalds * I/O completion handler for block_read_full_page() - pages 3651da177e4SLinus Torvalds * which come unlocked at the end of I/O. 3661da177e4SLinus Torvalds */ 3671da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 3681da177e4SLinus Torvalds { 3691da177e4SLinus Torvalds unsigned long flags; 370a3972203SNick Piggin struct buffer_head *first; 3711da177e4SLinus Torvalds struct buffer_head *tmp; 3721da177e4SLinus Torvalds struct page *page; 3731da177e4SLinus Torvalds int page_uptodate = 1; 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds page = bh->b_page; 3781da177e4SLinus Torvalds if (uptodate) { 3791da177e4SLinus Torvalds set_buffer_uptodate(bh); 3801da177e4SLinus Torvalds } else { 3811da177e4SLinus Torvalds clear_buffer_uptodate(bh); 3821da177e4SLinus Torvalds if (printk_ratelimit()) 3831da177e4SLinus Torvalds buffer_io_error(bh); 3841da177e4SLinus Torvalds SetPageError(page); 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds 3871da177e4SLinus Torvalds /* 3881da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 3891da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 3901da177e4SLinus Torvalds * decide that the page is now completely done. 3911da177e4SLinus Torvalds */ 392a3972203SNick Piggin first = page_buffers(page); 393a3972203SNick Piggin local_irq_save(flags); 394a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 3951da177e4SLinus Torvalds clear_buffer_async_read(bh); 3961da177e4SLinus Torvalds unlock_buffer(bh); 3971da177e4SLinus Torvalds tmp = bh; 3981da177e4SLinus Torvalds do { 3991da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 4001da177e4SLinus Torvalds page_uptodate = 0; 4011da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 4021da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4031da177e4SLinus Torvalds goto still_busy; 4041da177e4SLinus Torvalds } 4051da177e4SLinus Torvalds tmp = tmp->b_this_page; 4061da177e4SLinus Torvalds } while (tmp != bh); 407a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 408a3972203SNick Piggin local_irq_restore(flags); 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds /* 4111da177e4SLinus Torvalds * If none of the buffers had errors and they are all 4121da177e4SLinus Torvalds * uptodate then we can set the page uptodate. 4131da177e4SLinus Torvalds */ 4141da177e4SLinus Torvalds if (page_uptodate && !PageError(page)) 4151da177e4SLinus Torvalds SetPageUptodate(page); 4161da177e4SLinus Torvalds unlock_page(page); 4171da177e4SLinus Torvalds return; 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds still_busy: 420a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 421a3972203SNick Piggin local_irq_restore(flags); 4221da177e4SLinus Torvalds return; 4231da177e4SLinus Torvalds } 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked 4271da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion. 4281da177e4SLinus Torvalds */ 429b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 4301da177e4SLinus Torvalds { 4311da177e4SLinus Torvalds char b[BDEVNAME_SIZE]; 4321da177e4SLinus Torvalds unsigned long flags; 433a3972203SNick Piggin struct buffer_head *first; 4341da177e4SLinus Torvalds struct buffer_head *tmp; 4351da177e4SLinus Torvalds struct page *page; 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds page = bh->b_page; 4401da177e4SLinus Torvalds if (uptodate) { 4411da177e4SLinus Torvalds set_buffer_uptodate(bh); 4421da177e4SLinus Torvalds } else { 4431da177e4SLinus Torvalds if (printk_ratelimit()) { 4441da177e4SLinus Torvalds buffer_io_error(bh); 4451da177e4SLinus Torvalds printk(KERN_WARNING "lost page write due to " 4461da177e4SLinus Torvalds "I/O error on %s\n", 4471da177e4SLinus Torvalds bdevname(bh->b_bdev, b)); 4481da177e4SLinus Torvalds } 4491da177e4SLinus Torvalds set_bit(AS_EIO, &page->mapping->flags); 45058ff407bSJan Kara set_buffer_write_io_error(bh); 4511da177e4SLinus Torvalds clear_buffer_uptodate(bh); 4521da177e4SLinus Torvalds SetPageError(page); 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds 455a3972203SNick Piggin first = page_buffers(page); 456a3972203SNick Piggin local_irq_save(flags); 457a3972203SNick Piggin bit_spin_lock(BH_Uptodate_Lock, &first->b_state); 458a3972203SNick Piggin 4591da177e4SLinus Torvalds clear_buffer_async_write(bh); 4601da177e4SLinus Torvalds unlock_buffer(bh); 4611da177e4SLinus Torvalds tmp = bh->b_this_page; 4621da177e4SLinus Torvalds while (tmp != bh) { 4631da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 4641da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4651da177e4SLinus Torvalds goto still_busy; 4661da177e4SLinus Torvalds } 4671da177e4SLinus Torvalds tmp = tmp->b_this_page; 4681da177e4SLinus Torvalds } 469a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 470a3972203SNick Piggin local_irq_restore(flags); 4711da177e4SLinus Torvalds end_page_writeback(page); 4721da177e4SLinus Torvalds return; 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds still_busy: 475a3972203SNick Piggin bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); 476a3972203SNick Piggin local_irq_restore(flags); 4771da177e4SLinus Torvalds return; 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds /* 4811da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 4821da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 4831da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 4841da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 4851da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 4861da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 4871da177e4SLinus Torvalds * that this buffer is not under async I/O. 4881da177e4SLinus Torvalds * 4891da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 4901da177e4SLinus Torvalds * left. 4911da177e4SLinus Torvalds * 4921da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 4931da177e4SLinus Torvalds * the buffers. 4941da177e4SLinus Torvalds * 4951da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 4961da177e4SLinus Torvalds * page. 4971da177e4SLinus Torvalds * 4981da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 4991da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 5001da177e4SLinus Torvalds */ 5011da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 5021da177e4SLinus Torvalds { 5031da177e4SLinus Torvalds bh->b_end_io = end_buffer_async_read; 5041da177e4SLinus Torvalds set_buffer_async_read(bh); 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds 5071da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 5081da177e4SLinus Torvalds { 5091da177e4SLinus Torvalds bh->b_end_io = end_buffer_async_write; 5101da177e4SLinus Torvalds set_buffer_async_write(bh); 5111da177e4SLinus Torvalds } 5121da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 5131da177e4SLinus Torvalds 5141da177e4SLinus Torvalds 5151da177e4SLinus Torvalds /* 5161da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 5171da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 5181da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 5191da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 5201da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 5211da177e4SLinus Torvalds * 5221da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 5231da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 5241da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list. 5251da177e4SLinus Torvalds * 5261da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 5271da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 5281da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 5291da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 5301da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space 5311da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 5321da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 5331da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact, 5341da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's 5351da177e4SLinus Torvalds * ->private_lock. 5361da177e4SLinus Torvalds * 5371da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 5381da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's. 5391da177e4SLinus Torvalds * 5401da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these 5411da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for 5421da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list) 5431da177e4SLinus Torvalds * be true at clear_inode() time. 5441da177e4SLinus Torvalds * 5451da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 5461da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 5471da177e4SLinus Torvalds * BUG_ON(!list_empty). 5481da177e4SLinus Torvalds * 5491da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 5501da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 5511da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 5521da177e4SLinus Torvalds * queued up. 5531da177e4SLinus Torvalds * 5541da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 5551da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 5561da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 5571da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 5581da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 5591da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 5601da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 5611da177e4SLinus Torvalds * b_inode back. 5621da177e4SLinus Torvalds */ 5631da177e4SLinus Torvalds 5641da177e4SLinus Torvalds /* 5651da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held 5661da177e4SLinus Torvalds */ 5671da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh) 5681da177e4SLinus Torvalds { 5691da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 57058ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 57158ff407bSJan Kara if (buffer_write_io_error(bh)) 57258ff407bSJan Kara set_bit(AS_EIO, &bh->b_assoc_map->flags); 57358ff407bSJan Kara bh->b_assoc_map = NULL; 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 5761da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 5771da177e4SLinus Torvalds { 5781da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list); 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds 5811da177e4SLinus Torvalds /* 5821da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 5831da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 5841da177e4SLinus Torvalds * writes to the disk. 5851da177e4SLinus Torvalds * 5861da177e4SLinus Torvalds * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 5871da177e4SLinus Torvalds * you dirty the buffers, and then use osync_inode_buffers to wait for 5881da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 5891da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 5901da177e4SLinus Torvalds */ 5911da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 5921da177e4SLinus Torvalds { 5931da177e4SLinus Torvalds struct buffer_head *bh; 5941da177e4SLinus Torvalds struct list_head *p; 5951da177e4SLinus Torvalds int err = 0; 5961da177e4SLinus Torvalds 5971da177e4SLinus Torvalds spin_lock(lock); 5981da177e4SLinus Torvalds repeat: 5991da177e4SLinus Torvalds list_for_each_prev(p, list) { 6001da177e4SLinus Torvalds bh = BH_ENTRY(p); 6011da177e4SLinus Torvalds if (buffer_locked(bh)) { 6021da177e4SLinus Torvalds get_bh(bh); 6031da177e4SLinus Torvalds spin_unlock(lock); 6041da177e4SLinus Torvalds wait_on_buffer(bh); 6051da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 6061da177e4SLinus Torvalds err = -EIO; 6071da177e4SLinus Torvalds brelse(bh); 6081da177e4SLinus Torvalds spin_lock(lock); 6091da177e4SLinus Torvalds goto repeat; 6101da177e4SLinus Torvalds } 6111da177e4SLinus Torvalds } 6121da177e4SLinus Torvalds spin_unlock(lock); 6131da177e4SLinus Torvalds return err; 6141da177e4SLinus Torvalds } 6151da177e4SLinus Torvalds 6161da177e4SLinus Torvalds /** 6171da177e4SLinus Torvalds * sync_mapping_buffers - write out and wait upon a mapping's "associated" 6181da177e4SLinus Torvalds * buffers 61967be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 6201da177e4SLinus Torvalds * 6211da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon 6221da177e4SLinus Torvalds * that I/O. 6231da177e4SLinus Torvalds * 62467be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 62567be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 62667be2dd1SMartin Waitz * a successful fsync(). 6271da177e4SLinus Torvalds */ 6281da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 6291da177e4SLinus Torvalds { 6301da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 6311da177e4SLinus Torvalds 6321da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 6331da177e4SLinus Torvalds return 0; 6341da177e4SLinus Torvalds 6351da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock, 6361da177e4SLinus Torvalds &mapping->private_list); 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds /* 6411da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 6421da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 6431da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 6441da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 6451da177e4SLinus Torvalds */ 6461da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 6471da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 6481da177e4SLinus Torvalds { 6491da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 6501da177e4SLinus Torvalds if (bh) { 6511da177e4SLinus Torvalds if (buffer_dirty(bh)) 6521da177e4SLinus Torvalds ll_rw_block(WRITE, 1, &bh); 6531da177e4SLinus Torvalds put_bh(bh); 6541da177e4SLinus Torvalds } 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 6571da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 6581da177e4SLinus Torvalds { 6591da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 6601da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 6611da177e4SLinus Torvalds 6621da177e4SLinus Torvalds mark_buffer_dirty(bh); 6631da177e4SLinus Torvalds if (!mapping->assoc_mapping) { 6641da177e4SLinus Torvalds mapping->assoc_mapping = buffer_mapping; 6651da177e4SLinus Torvalds } else { 666e827f923SEric Sesterhenn BUG_ON(mapping->assoc_mapping != buffer_mapping); 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds if (list_empty(&bh->b_assoc_buffers)) { 6691da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 6701da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 6711da177e4SLinus Torvalds &mapping->private_list); 67258ff407bSJan Kara bh->b_assoc_map = mapping; 6731da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds /* 6791da177e4SLinus Torvalds * Add a page to the dirty page list. 6801da177e4SLinus Torvalds * 6811da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places 6821da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep. 6831da177e4SLinus Torvalds * 6841da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve 6851da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does 6861da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set 6871da177e4SLinus Torvalds * dirty. 6881da177e4SLinus Torvalds * 6891da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race 6901da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the 6911da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty 6921da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty 6931da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 6941da177e4SLinus Torvalds * page on the dirty page list. 6951da177e4SLinus Torvalds * 6961da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the 6971da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being 6981da177e4SLinus Torvalds * added to the page after it was set dirty. 6991da177e4SLinus Torvalds * 7001da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the 7011da177e4SLinus Torvalds * address_space though. 7021da177e4SLinus Torvalds */ 7031da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page) 7041da177e4SLinus Torvalds { 705ebf7a227SNick Piggin struct address_space * const mapping = page_mapping(page); 706ebf7a227SNick Piggin 707ebf7a227SNick Piggin if (unlikely(!mapping)) 708ebf7a227SNick Piggin return !TestSetPageDirty(page); 7091da177e4SLinus Torvalds 7101da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 7111da177e4SLinus Torvalds if (page_has_buffers(page)) { 7121da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 7131da177e4SLinus Torvalds struct buffer_head *bh = head; 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds do { 7161da177e4SLinus Torvalds set_buffer_dirty(bh); 7171da177e4SLinus Torvalds bh = bh->b_this_page; 7181da177e4SLinus Torvalds } while (bh != head); 7191da177e4SLinus Torvalds } 7201da177e4SLinus Torvalds spin_unlock(&mapping->private_lock); 7211da177e4SLinus Torvalds 7228c08540fSAndrew Morton if (TestSetPageDirty(page)) 7238c08540fSAndrew Morton return 0; 7248c08540fSAndrew Morton 7251da177e4SLinus Torvalds write_lock_irq(&mapping->tree_lock); 7261da177e4SLinus Torvalds if (page->mapping) { /* Race with truncate? */ 72755e829afSAndrew Morton if (mapping_cap_account_dirty(mapping)) { 728b1e7a8fdSChristoph Lameter __inc_zone_page_state(page, NR_FILE_DIRTY); 72955e829afSAndrew Morton task_io_account_write(PAGE_CACHE_SIZE); 73055e829afSAndrew Morton } 7311da177e4SLinus Torvalds radix_tree_tag_set(&mapping->page_tree, 7328c08540fSAndrew Morton page_index(page), PAGECACHE_TAG_DIRTY); 7331da177e4SLinus Torvalds } 7341da177e4SLinus Torvalds write_unlock_irq(&mapping->tree_lock); 7351da177e4SLinus Torvalds __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 7364741c9fdSAndrew Morton return 1; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers); 7391da177e4SLinus Torvalds 7401da177e4SLinus Torvalds /* 7411da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 7421da177e4SLinus Torvalds * 7431da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 7441da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 7451da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 7461da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 7471da177e4SLinus Torvalds * 7481da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 7491da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 7501da177e4SLinus Torvalds * up, waiting for those writes to complete. 7511da177e4SLinus Torvalds * 7521da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 7531da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 7541da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 7551da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 7561da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 7571da177e4SLinus Torvalds * any newly dirty buffers for write. 7581da177e4SLinus Torvalds */ 7591da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 7601da177e4SLinus Torvalds { 7611da177e4SLinus Torvalds struct buffer_head *bh; 7621da177e4SLinus Torvalds struct list_head tmp; 7631da177e4SLinus Torvalds int err = 0, err2; 7641da177e4SLinus Torvalds 7651da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 7661da177e4SLinus Torvalds 7671da177e4SLinus Torvalds spin_lock(lock); 7681da177e4SLinus Torvalds while (!list_empty(list)) { 7691da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 77058ff407bSJan Kara __remove_assoc_queue(bh); 7711da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 7721da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 7731da177e4SLinus Torvalds if (buffer_dirty(bh)) { 7741da177e4SLinus Torvalds get_bh(bh); 7751da177e4SLinus Torvalds spin_unlock(lock); 7761da177e4SLinus Torvalds /* 7771da177e4SLinus Torvalds * Ensure any pending I/O completes so that 7781da177e4SLinus Torvalds * ll_rw_block() actually writes the current 7791da177e4SLinus Torvalds * contents - it is a noop if I/O is still in 7801da177e4SLinus Torvalds * flight on potentially older contents. 7811da177e4SLinus Torvalds */ 782a7662236SJan Kara ll_rw_block(SWRITE, 1, &bh); 7831da177e4SLinus Torvalds brelse(bh); 7841da177e4SLinus Torvalds spin_lock(lock); 7851da177e4SLinus Torvalds } 7861da177e4SLinus Torvalds } 7871da177e4SLinus Torvalds } 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds while (!list_empty(&tmp)) { 7901da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 79158ff407bSJan Kara list_del_init(&bh->b_assoc_buffers); 7921da177e4SLinus Torvalds get_bh(bh); 7931da177e4SLinus Torvalds spin_unlock(lock); 7941da177e4SLinus Torvalds wait_on_buffer(bh); 7951da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 7961da177e4SLinus Torvalds err = -EIO; 7971da177e4SLinus Torvalds brelse(bh); 7981da177e4SLinus Torvalds spin_lock(lock); 7991da177e4SLinus Torvalds } 8001da177e4SLinus Torvalds 8011da177e4SLinus Torvalds spin_unlock(lock); 8021da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 8031da177e4SLinus Torvalds if (err) 8041da177e4SLinus Torvalds return err; 8051da177e4SLinus Torvalds else 8061da177e4SLinus Torvalds return err2; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 8091da177e4SLinus Torvalds /* 8101da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 8111da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 8121da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 8131da177e4SLinus Torvalds * 8141da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which 8151da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 8161da177e4SLinus Torvalds * for reiserfs. 8171da177e4SLinus Torvalds */ 8181da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 8191da177e4SLinus Torvalds { 8201da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8211da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8221da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 8231da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8261da177e4SLinus Torvalds while (!list_empty(list)) 8271da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 8281da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8291da177e4SLinus Torvalds } 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds /* 8331da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 8341da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 8351da177e4SLinus Torvalds * 8361da177e4SLinus Torvalds * Returns true if all buffers were removed. 8371da177e4SLinus Torvalds */ 8381da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 8391da177e4SLinus Torvalds { 8401da177e4SLinus Torvalds int ret = 1; 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8431da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8441da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 8451da177e4SLinus Torvalds struct address_space *buffer_mapping = mapping->assoc_mapping; 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8481da177e4SLinus Torvalds while (!list_empty(list)) { 8491da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 8501da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8511da177e4SLinus Torvalds ret = 0; 8521da177e4SLinus Torvalds break; 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds __remove_assoc_queue(bh); 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds return ret; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds /* 8621da177e4SLinus Torvalds * Create the appropriate buffers when given a page for data area and 8631da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 8641da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 8651da177e4SLinus Torvalds * buffers. 8661da177e4SLinus Torvalds * 8671da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 8681da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 8691da177e4SLinus Torvalds */ 8701da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 8711da177e4SLinus Torvalds int retry) 8721da177e4SLinus Torvalds { 8731da177e4SLinus Torvalds struct buffer_head *bh, *head; 8741da177e4SLinus Torvalds long offset; 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds try_again: 8771da177e4SLinus Torvalds head = NULL; 8781da177e4SLinus Torvalds offset = PAGE_SIZE; 8791da177e4SLinus Torvalds while ((offset -= size) >= 0) { 8801da177e4SLinus Torvalds bh = alloc_buffer_head(GFP_NOFS); 8811da177e4SLinus Torvalds if (!bh) 8821da177e4SLinus Torvalds goto no_grow; 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds bh->b_bdev = NULL; 8851da177e4SLinus Torvalds bh->b_this_page = head; 8861da177e4SLinus Torvalds bh->b_blocknr = -1; 8871da177e4SLinus Torvalds head = bh; 8881da177e4SLinus Torvalds 8891da177e4SLinus Torvalds bh->b_state = 0; 8901da177e4SLinus Torvalds atomic_set(&bh->b_count, 0); 891fc5cd582SChris Mason bh->b_private = NULL; 8921da177e4SLinus Torvalds bh->b_size = size; 8931da177e4SLinus Torvalds 8941da177e4SLinus Torvalds /* Link the buffer to its page */ 8951da177e4SLinus Torvalds set_bh_page(bh, page, offset); 8961da177e4SLinus Torvalds 89701ffe339SNathan Scott init_buffer(bh, NULL, NULL); 8981da177e4SLinus Torvalds } 8991da177e4SLinus Torvalds return head; 9001da177e4SLinus Torvalds /* 9011da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 9021da177e4SLinus Torvalds */ 9031da177e4SLinus Torvalds no_grow: 9041da177e4SLinus Torvalds if (head) { 9051da177e4SLinus Torvalds do { 9061da177e4SLinus Torvalds bh = head; 9071da177e4SLinus Torvalds head = head->b_this_page; 9081da177e4SLinus Torvalds free_buffer_head(bh); 9091da177e4SLinus Torvalds } while (head); 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds /* 9131da177e4SLinus Torvalds * Return failure for non-async IO requests. Async IO requests 9141da177e4SLinus Torvalds * are not allowed to fail, so we have to wait until buffer heads 9151da177e4SLinus Torvalds * become available. But we don't want tasks sleeping with 9161da177e4SLinus Torvalds * partially complete buffers, so all were released above. 9171da177e4SLinus Torvalds */ 9181da177e4SLinus Torvalds if (!retry) 9191da177e4SLinus Torvalds return NULL; 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds /* We're _really_ low on memory. Now we just 9221da177e4SLinus Torvalds * wait for old buffer heads to become free due to 9231da177e4SLinus Torvalds * finishing IO. Since this is an async request and 9241da177e4SLinus Torvalds * the reserve list is empty, we're sure there are 9251da177e4SLinus Torvalds * async buffer heads in use. 9261da177e4SLinus Torvalds */ 9271da177e4SLinus Torvalds free_more_memory(); 9281da177e4SLinus Torvalds goto try_again; 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 9311da177e4SLinus Torvalds 9321da177e4SLinus Torvalds static inline void 9331da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head) 9341da177e4SLinus Torvalds { 9351da177e4SLinus Torvalds struct buffer_head *bh, *tail; 9361da177e4SLinus Torvalds 9371da177e4SLinus Torvalds bh = head; 9381da177e4SLinus Torvalds do { 9391da177e4SLinus Torvalds tail = bh; 9401da177e4SLinus Torvalds bh = bh->b_this_page; 9411da177e4SLinus Torvalds } while (bh); 9421da177e4SLinus Torvalds tail->b_this_page = head; 9431da177e4SLinus Torvalds attach_page_buffers(page, head); 9441da177e4SLinus Torvalds } 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds /* 9471da177e4SLinus Torvalds * Initialise the state of a blockdev page's buffers. 9481da177e4SLinus Torvalds */ 9491da177e4SLinus Torvalds static void 9501da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev, 9511da177e4SLinus Torvalds sector_t block, int size) 9521da177e4SLinus Torvalds { 9531da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 9541da177e4SLinus Torvalds struct buffer_head *bh = head; 9551da177e4SLinus Torvalds int uptodate = PageUptodate(page); 9561da177e4SLinus Torvalds 9571da177e4SLinus Torvalds do { 9581da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 9591da177e4SLinus Torvalds init_buffer(bh, NULL, NULL); 9601da177e4SLinus Torvalds bh->b_bdev = bdev; 9611da177e4SLinus Torvalds bh->b_blocknr = block; 9621da177e4SLinus Torvalds if (uptodate) 9631da177e4SLinus Torvalds set_buffer_uptodate(bh); 9641da177e4SLinus Torvalds set_buffer_mapped(bh); 9651da177e4SLinus Torvalds } 9661da177e4SLinus Torvalds block++; 9671da177e4SLinus Torvalds bh = bh->b_this_page; 9681da177e4SLinus Torvalds } while (bh != head); 9691da177e4SLinus Torvalds } 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds /* 9721da177e4SLinus Torvalds * Create the page-cache page that contains the requested block. 9731da177e4SLinus Torvalds * 9741da177e4SLinus Torvalds * This is user purely for blockdev mappings. 9751da177e4SLinus Torvalds */ 9761da177e4SLinus Torvalds static struct page * 9771da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block, 9781da177e4SLinus Torvalds pgoff_t index, int size) 9791da177e4SLinus Torvalds { 9801da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 9811da177e4SLinus Torvalds struct page *page; 9821da177e4SLinus Torvalds struct buffer_head *bh; 9831da177e4SLinus Torvalds 9841da177e4SLinus Torvalds page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 9851da177e4SLinus Torvalds if (!page) 9861da177e4SLinus Torvalds return NULL; 9871da177e4SLinus Torvalds 988e827f923SEric Sesterhenn BUG_ON(!PageLocked(page)); 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds if (page_has_buffers(page)) { 9911da177e4SLinus Torvalds bh = page_buffers(page); 9921da177e4SLinus Torvalds if (bh->b_size == size) { 9931da177e4SLinus Torvalds init_page_buffers(page, bdev, block, size); 9941da177e4SLinus Torvalds return page; 9951da177e4SLinus Torvalds } 9961da177e4SLinus Torvalds if (!try_to_free_buffers(page)) 9971da177e4SLinus Torvalds goto failed; 9981da177e4SLinus Torvalds } 9991da177e4SLinus Torvalds 10001da177e4SLinus Torvalds /* 10011da177e4SLinus Torvalds * Allocate some buffers for this page 10021da177e4SLinus Torvalds */ 10031da177e4SLinus Torvalds bh = alloc_page_buffers(page, size, 0); 10041da177e4SLinus Torvalds if (!bh) 10051da177e4SLinus Torvalds goto failed; 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds /* 10081da177e4SLinus Torvalds * Link the page to the buffers and initialise them. Take the 10091da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 10101da177e4SLinus Torvalds * run under the page lock. 10111da177e4SLinus Torvalds */ 10121da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock); 10131da177e4SLinus Torvalds link_dev_buffers(page, bh); 10141da177e4SLinus Torvalds init_page_buffers(page, bdev, block, size); 10151da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock); 10161da177e4SLinus Torvalds return page; 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds failed: 10191da177e4SLinus Torvalds BUG(); 10201da177e4SLinus Torvalds unlock_page(page); 10211da177e4SLinus Torvalds page_cache_release(page); 10221da177e4SLinus Torvalds return NULL; 10231da177e4SLinus Torvalds } 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds /* 10261da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If 10271da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also. 10281da177e4SLinus Torvalds * 10291da177e4SLinus Torvalds * Except that's a bug. Attaching dirty buffers to a dirty 10301da177e4SLinus Torvalds * blockdev's page can result in filesystem corruption, because 10311da177e4SLinus Torvalds * some of those buffers may be aliases of filesystem data. 10321da177e4SLinus Torvalds * grow_dev_page() will go BUG() if this happens. 10331da177e4SLinus Torvalds */ 1034858119e1SArjan van de Ven static int 10351da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size) 10361da177e4SLinus Torvalds { 10371da177e4SLinus Torvalds struct page *page; 10381da177e4SLinus Torvalds pgoff_t index; 10391da177e4SLinus Torvalds int sizebits; 10401da177e4SLinus Torvalds 10411da177e4SLinus Torvalds sizebits = -1; 10421da177e4SLinus Torvalds do { 10431da177e4SLinus Torvalds sizebits++; 10441da177e4SLinus Torvalds } while ((size << sizebits) < PAGE_SIZE); 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds index = block >> sizebits; 10471da177e4SLinus Torvalds 1048e5657933SAndrew Morton /* 1049e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible 1050e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types). 1051e5657933SAndrew Morton */ 1052e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) { 1053e5657933SAndrew Morton char b[BDEVNAME_SIZE]; 1054e5657933SAndrew Morton 1055e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for " 1056e5657933SAndrew Morton "device %s\n", 1057e5657933SAndrew Morton __FUNCTION__, (unsigned long long)block, 1058e5657933SAndrew Morton bdevname(bdev, b)); 1059e5657933SAndrew Morton return -EIO; 1060e5657933SAndrew Morton } 1061e5657933SAndrew Morton block = index << sizebits; 10621da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */ 10631da177e4SLinus Torvalds page = grow_dev_page(bdev, block, index, size); 10641da177e4SLinus Torvalds if (!page) 10651da177e4SLinus Torvalds return 0; 10661da177e4SLinus Torvalds unlock_page(page); 10671da177e4SLinus Torvalds page_cache_release(page); 10681da177e4SLinus Torvalds return 1; 10691da177e4SLinus Torvalds } 10701da177e4SLinus Torvalds 107175c96f85SAdrian Bunk static struct buffer_head * 10721da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size) 10731da177e4SLinus Torvalds { 10741da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 10751da177e4SLinus Torvalds if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 10761da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 10771da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 10781da177e4SLinus Torvalds size); 10791da177e4SLinus Torvalds printk(KERN_ERR "hardsect size: %d\n", 10801da177e4SLinus Torvalds bdev_hardsect_size(bdev)); 10811da177e4SLinus Torvalds 10821da177e4SLinus Torvalds dump_stack(); 10831da177e4SLinus Torvalds return NULL; 10841da177e4SLinus Torvalds } 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds for (;;) { 10871da177e4SLinus Torvalds struct buffer_head * bh; 1088e5657933SAndrew Morton int ret; 10891da177e4SLinus Torvalds 10901da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 10911da177e4SLinus Torvalds if (bh) 10921da177e4SLinus Torvalds return bh; 10931da177e4SLinus Torvalds 1094e5657933SAndrew Morton ret = grow_buffers(bdev, block, size); 1095e5657933SAndrew Morton if (ret < 0) 1096e5657933SAndrew Morton return NULL; 1097e5657933SAndrew Morton if (ret == 0) 10981da177e4SLinus Torvalds free_more_memory(); 10991da177e4SLinus Torvalds } 11001da177e4SLinus Torvalds } 11011da177e4SLinus Torvalds 11021da177e4SLinus Torvalds /* 11031da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 11041da177e4SLinus Torvalds * 11051da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 11061da177e4SLinus Torvalds * the page is tagged dirty in its radix tree. 11071da177e4SLinus Torvalds * 11081da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 11091da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 11101da177e4SLinus Torvalds * merely a hint about the true dirty state. 11111da177e4SLinus Torvalds * 11121da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 11131da177e4SLinus Torvalds * (if the page has buffers). 11141da177e4SLinus Torvalds * 11151da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 11161da177e4SLinus Torvalds * buffers are not. 11171da177e4SLinus Torvalds * 11181da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 11191da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 11201da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 11211da177e4SLinus Torvalds * block_read_full_page() against that page will discover all the uptodate 11221da177e4SLinus Torvalds * buffers, will set the page uptodate and will perform no I/O. 11231da177e4SLinus Torvalds */ 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds /** 11261da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 112767be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 11281da177e4SLinus Torvalds * 11291da177e4SLinus Torvalds * mark_buffer_dirty() will set the dirty bit against the buffer, then set its 11301da177e4SLinus Torvalds * backing page dirty, then tag the page as dirty in its address_space's radix 11311da177e4SLinus Torvalds * tree and then attach the address_space's inode to its superblock's dirty 11321da177e4SLinus Torvalds * inode list. 11331da177e4SLinus Torvalds * 11341da177e4SLinus Torvalds * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 11351da177e4SLinus Torvalds * mapping->tree_lock and the global inode_lock. 11361da177e4SLinus Torvalds */ 11371da177e4SLinus Torvalds void fastcall mark_buffer_dirty(struct buffer_head *bh) 11381da177e4SLinus Torvalds { 11391da177e4SLinus Torvalds if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 11401da177e4SLinus Torvalds __set_page_dirty_nobuffers(bh->b_page); 11411da177e4SLinus Torvalds } 11421da177e4SLinus Torvalds 11431da177e4SLinus Torvalds /* 11441da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page 11451da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean 11461da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page 11471da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from 11481da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached). 11491da177e4SLinus Torvalds */ 11501da177e4SLinus Torvalds void __brelse(struct buffer_head * buf) 11511da177e4SLinus Torvalds { 11521da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) { 11531da177e4SLinus Torvalds put_bh(buf); 11541da177e4SLinus Torvalds return; 11551da177e4SLinus Torvalds } 11561da177e4SLinus Torvalds printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 11571da177e4SLinus Torvalds WARN_ON(1); 11581da177e4SLinus Torvalds } 11591da177e4SLinus Torvalds 11601da177e4SLinus Torvalds /* 11611da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any 11621da177e4SLinus Torvalds * potentially dirty data. 11631da177e4SLinus Torvalds */ 11641da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 11651da177e4SLinus Torvalds { 11661da177e4SLinus Torvalds clear_buffer_dirty(bh); 11671da177e4SLinus Torvalds if (!list_empty(&bh->b_assoc_buffers)) { 11681da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 11691da177e4SLinus Torvalds 11701da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 11711da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 117258ff407bSJan Kara bh->b_assoc_map = NULL; 11731da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 11741da177e4SLinus Torvalds } 11751da177e4SLinus Torvalds __brelse(bh); 11761da177e4SLinus Torvalds } 11771da177e4SLinus Torvalds 11781da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 11791da177e4SLinus Torvalds { 11801da177e4SLinus Torvalds lock_buffer(bh); 11811da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 11821da177e4SLinus Torvalds unlock_buffer(bh); 11831da177e4SLinus Torvalds return bh; 11841da177e4SLinus Torvalds } else { 11851da177e4SLinus Torvalds get_bh(bh); 11861da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 11871da177e4SLinus Torvalds submit_bh(READ, bh); 11881da177e4SLinus Torvalds wait_on_buffer(bh); 11891da177e4SLinus Torvalds if (buffer_uptodate(bh)) 11901da177e4SLinus Torvalds return bh; 11911da177e4SLinus Torvalds } 11921da177e4SLinus Torvalds brelse(bh); 11931da177e4SLinus Torvalds return NULL; 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 11961da177e4SLinus Torvalds /* 11971da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 11981da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 11991da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 12001da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 12011da177e4SLinus Torvalds * CPU's LRUs at the same time. 12021da177e4SLinus Torvalds * 12031da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 12041da177e4SLinus Torvalds * sb_find_get_block(). 12051da177e4SLinus Torvalds * 12061da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 12071da177e4SLinus Torvalds * a local interrupt disable for that. 12081da177e4SLinus Torvalds */ 12091da177e4SLinus Torvalds 12101da177e4SLinus Torvalds #define BH_LRU_SIZE 8 12111da177e4SLinus Torvalds 12121da177e4SLinus Torvalds struct bh_lru { 12131da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12141da177e4SLinus Torvalds }; 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 12171da177e4SLinus Torvalds 12181da177e4SLinus Torvalds #ifdef CONFIG_SMP 12191da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 12201da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 12211da177e4SLinus Torvalds #else 12221da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 12231da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 12241da177e4SLinus Torvalds #endif 12251da177e4SLinus Torvalds 12261da177e4SLinus Torvalds static inline void check_irqs_on(void) 12271da177e4SLinus Torvalds { 12281da177e4SLinus Torvalds #ifdef irqs_disabled 12291da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 12301da177e4SLinus Torvalds #endif 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds /* 12341da177e4SLinus Torvalds * The LRU management algorithm is dopey-but-simple. Sorry. 12351da177e4SLinus Torvalds */ 12361da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 12371da177e4SLinus Torvalds { 12381da177e4SLinus Torvalds struct buffer_head *evictee = NULL; 12391da177e4SLinus Torvalds struct bh_lru *lru; 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds check_irqs_on(); 12421da177e4SLinus Torvalds bh_lru_lock(); 12431da177e4SLinus Torvalds lru = &__get_cpu_var(bh_lrus); 12441da177e4SLinus Torvalds if (lru->bhs[0] != bh) { 12451da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12461da177e4SLinus Torvalds int in; 12471da177e4SLinus Torvalds int out = 0; 12481da177e4SLinus Torvalds 12491da177e4SLinus Torvalds get_bh(bh); 12501da177e4SLinus Torvalds bhs[out++] = bh; 12511da177e4SLinus Torvalds for (in = 0; in < BH_LRU_SIZE; in++) { 12521da177e4SLinus Torvalds struct buffer_head *bh2 = lru->bhs[in]; 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds if (bh2 == bh) { 12551da177e4SLinus Torvalds __brelse(bh2); 12561da177e4SLinus Torvalds } else { 12571da177e4SLinus Torvalds if (out >= BH_LRU_SIZE) { 12581da177e4SLinus Torvalds BUG_ON(evictee != NULL); 12591da177e4SLinus Torvalds evictee = bh2; 12601da177e4SLinus Torvalds } else { 12611da177e4SLinus Torvalds bhs[out++] = bh2; 12621da177e4SLinus Torvalds } 12631da177e4SLinus Torvalds } 12641da177e4SLinus Torvalds } 12651da177e4SLinus Torvalds while (out < BH_LRU_SIZE) 12661da177e4SLinus Torvalds bhs[out++] = NULL; 12671da177e4SLinus Torvalds memcpy(lru->bhs, bhs, sizeof(bhs)); 12681da177e4SLinus Torvalds } 12691da177e4SLinus Torvalds bh_lru_unlock(); 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds if (evictee) 12721da177e4SLinus Torvalds __brelse(evictee); 12731da177e4SLinus Torvalds } 12741da177e4SLinus Torvalds 12751da177e4SLinus Torvalds /* 12761da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 12771da177e4SLinus Torvalds */ 1278858119e1SArjan van de Ven static struct buffer_head * 12793991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 12801da177e4SLinus Torvalds { 12811da177e4SLinus Torvalds struct buffer_head *ret = NULL; 12821da177e4SLinus Torvalds struct bh_lru *lru; 12833991d3bdSTomasz Kvarsin unsigned int i; 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds check_irqs_on(); 12861da177e4SLinus Torvalds bh_lru_lock(); 12871da177e4SLinus Torvalds lru = &__get_cpu_var(bh_lrus); 12881da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 12891da177e4SLinus Torvalds struct buffer_head *bh = lru->bhs[i]; 12901da177e4SLinus Torvalds 12911da177e4SLinus Torvalds if (bh && bh->b_bdev == bdev && 12921da177e4SLinus Torvalds bh->b_blocknr == block && bh->b_size == size) { 12931da177e4SLinus Torvalds if (i) { 12941da177e4SLinus Torvalds while (i) { 12951da177e4SLinus Torvalds lru->bhs[i] = lru->bhs[i - 1]; 12961da177e4SLinus Torvalds i--; 12971da177e4SLinus Torvalds } 12981da177e4SLinus Torvalds lru->bhs[0] = bh; 12991da177e4SLinus Torvalds } 13001da177e4SLinus Torvalds get_bh(bh); 13011da177e4SLinus Torvalds ret = bh; 13021da177e4SLinus Torvalds break; 13031da177e4SLinus Torvalds } 13041da177e4SLinus Torvalds } 13051da177e4SLinus Torvalds bh_lru_unlock(); 13061da177e4SLinus Torvalds return ret; 13071da177e4SLinus Torvalds } 13081da177e4SLinus Torvalds 13091da177e4SLinus Torvalds /* 13101da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 13111da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 13121da177e4SLinus Torvalds * NULL 13131da177e4SLinus Torvalds */ 13141da177e4SLinus Torvalds struct buffer_head * 13153991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 13161da177e4SLinus Torvalds { 13171da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds if (bh == NULL) { 1320385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 13211da177e4SLinus Torvalds if (bh) 13221da177e4SLinus Torvalds bh_lru_install(bh); 13231da177e4SLinus Torvalds } 13241da177e4SLinus Torvalds if (bh) 13251da177e4SLinus Torvalds touch_buffer(bh); 13261da177e4SLinus Torvalds return bh; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds /* 13311da177e4SLinus Torvalds * __getblk will locate (and, if necessary, create) the buffer_head 13321da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The 13331da177e4SLinus Torvalds * returned buffer has its reference count incremented. 13341da177e4SLinus Torvalds * 13351da177e4SLinus Torvalds * __getblk() cannot fail - it just keeps trying. If you pass it an 13361da177e4SLinus Torvalds * illegal block number, __getblk() will happily return a buffer_head 13371da177e4SLinus Torvalds * which represents the non-existent block. Very weird. 13381da177e4SLinus Torvalds * 13391da177e4SLinus Torvalds * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 13401da177e4SLinus Torvalds * attempt is failing. FIXME, perhaps? 13411da177e4SLinus Torvalds */ 13421da177e4SLinus Torvalds struct buffer_head * 13433991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size) 13441da177e4SLinus Torvalds { 13451da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size); 13461da177e4SLinus Torvalds 13471da177e4SLinus Torvalds might_sleep(); 13481da177e4SLinus Torvalds if (bh == NULL) 13491da177e4SLinus Torvalds bh = __getblk_slow(bdev, block, size); 13501da177e4SLinus Torvalds return bh; 13511da177e4SLinus Torvalds } 13521da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk); 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds /* 13551da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 13561da177e4SLinus Torvalds */ 13573991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 13581da177e4SLinus Torvalds { 13591da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 1360a3e713b5SAndrew Morton if (likely(bh)) { 13611da177e4SLinus Torvalds ll_rw_block(READA, 1, &bh); 13621da177e4SLinus Torvalds brelse(bh); 13631da177e4SLinus Torvalds } 1364a3e713b5SAndrew Morton } 13651da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds /** 13681da177e4SLinus Torvalds * __bread() - reads a specified block and returns the bh 136967be2dd1SMartin Waitz * @bdev: the block_device to read from 13701da177e4SLinus Torvalds * @block: number of block 13711da177e4SLinus Torvalds * @size: size (in bytes) to read 13721da177e4SLinus Torvalds * 13731da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it. 13741da177e4SLinus Torvalds * It returns NULL if the block was unreadable. 13751da177e4SLinus Torvalds */ 13761da177e4SLinus Torvalds struct buffer_head * 13773991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size) 13781da177e4SLinus Torvalds { 13791da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 13801da177e4SLinus Torvalds 1381a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 13821da177e4SLinus Torvalds bh = __bread_slow(bh); 13831da177e4SLinus Torvalds return bh; 13841da177e4SLinus Torvalds } 13851da177e4SLinus Torvalds EXPORT_SYMBOL(__bread); 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds /* 13881da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 13891da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 13901da177e4SLinus Torvalds * or with preempt disabled. 13911da177e4SLinus Torvalds */ 13921da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 13931da177e4SLinus Torvalds { 13941da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 13951da177e4SLinus Torvalds int i; 13961da177e4SLinus Torvalds 13971da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 13981da177e4SLinus Torvalds brelse(b->bhs[i]); 13991da177e4SLinus Torvalds b->bhs[i] = NULL; 14001da177e4SLinus Torvalds } 14011da177e4SLinus Torvalds put_cpu_var(bh_lrus); 14021da177e4SLinus Torvalds } 14031da177e4SLinus Torvalds 1404f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 14051da177e4SLinus Torvalds { 14061da177e4SLinus Torvalds on_each_cpu(invalidate_bh_lru, NULL, 1, 1); 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh, 14101da177e4SLinus Torvalds struct page *page, unsigned long offset) 14111da177e4SLinus Torvalds { 14121da177e4SLinus Torvalds bh->b_page = page; 1413e827f923SEric Sesterhenn BUG_ON(offset >= PAGE_SIZE); 14141da177e4SLinus Torvalds if (PageHighMem(page)) 14151da177e4SLinus Torvalds /* 14161da177e4SLinus Torvalds * This catches illegal uses and preserves the offset: 14171da177e4SLinus Torvalds */ 14181da177e4SLinus Torvalds bh->b_data = (char *)(0 + offset); 14191da177e4SLinus Torvalds else 14201da177e4SLinus Torvalds bh->b_data = page_address(page) + offset; 14211da177e4SLinus Torvalds } 14221da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page); 14231da177e4SLinus Torvalds 14241da177e4SLinus Torvalds /* 14251da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 14261da177e4SLinus Torvalds */ 1427858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 14281da177e4SLinus Torvalds { 14291da177e4SLinus Torvalds lock_buffer(bh); 14301da177e4SLinus Torvalds clear_buffer_dirty(bh); 14311da177e4SLinus Torvalds bh->b_bdev = NULL; 14321da177e4SLinus Torvalds clear_buffer_mapped(bh); 14331da177e4SLinus Torvalds clear_buffer_req(bh); 14341da177e4SLinus Torvalds clear_buffer_new(bh); 14351da177e4SLinus Torvalds clear_buffer_delay(bh); 143633a266ddSDavid Chinner clear_buffer_unwritten(bh); 14371da177e4SLinus Torvalds unlock_buffer(bh); 14381da177e4SLinus Torvalds } 14391da177e4SLinus Torvalds 14401da177e4SLinus Torvalds /** 14411da177e4SLinus Torvalds * block_invalidatepage - invalidate part of all of a buffer-backed page 14421da177e4SLinus Torvalds * 14431da177e4SLinus Torvalds * @page: the page which is affected 14441da177e4SLinus Torvalds * @offset: the index of the truncation point 14451da177e4SLinus Torvalds * 14461da177e4SLinus Torvalds * block_invalidatepage() is called when all or part of the page has become 14471da177e4SLinus Torvalds * invalidatedby a truncate operation. 14481da177e4SLinus Torvalds * 14491da177e4SLinus Torvalds * block_invalidatepage() does not have to release all buffers, but it must 14501da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 14511da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 14521da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 14531da177e4SLinus Torvalds * blocks on-disk. 14541da177e4SLinus Torvalds */ 14552ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset) 14561da177e4SLinus Torvalds { 14571da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 14581da177e4SLinus Torvalds unsigned int curr_off = 0; 14591da177e4SLinus Torvalds 14601da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 14611da177e4SLinus Torvalds if (!page_has_buffers(page)) 14621da177e4SLinus Torvalds goto out; 14631da177e4SLinus Torvalds 14641da177e4SLinus Torvalds head = page_buffers(page); 14651da177e4SLinus Torvalds bh = head; 14661da177e4SLinus Torvalds do { 14671da177e4SLinus Torvalds unsigned int next_off = curr_off + bh->b_size; 14681da177e4SLinus Torvalds next = bh->b_this_page; 14691da177e4SLinus Torvalds 14701da177e4SLinus Torvalds /* 14711da177e4SLinus Torvalds * is this block fully invalidated? 14721da177e4SLinus Torvalds */ 14731da177e4SLinus Torvalds if (offset <= curr_off) 14741da177e4SLinus Torvalds discard_buffer(bh); 14751da177e4SLinus Torvalds curr_off = next_off; 14761da177e4SLinus Torvalds bh = next; 14771da177e4SLinus Torvalds } while (bh != head); 14781da177e4SLinus Torvalds 14791da177e4SLinus Torvalds /* 14801da177e4SLinus Torvalds * We release buffers only if the entire page is being invalidated. 14811da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 14821da177e4SLinus Torvalds * so real IO is not possible anymore. 14831da177e4SLinus Torvalds */ 14841da177e4SLinus Torvalds if (offset == 0) 14852ff28e22SNeilBrown try_to_release_page(page, 0); 14861da177e4SLinus Torvalds out: 14872ff28e22SNeilBrown return; 14881da177e4SLinus Torvalds } 14891da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage); 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds /* 14921da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 14931da177e4SLinus Torvalds * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 14941da177e4SLinus Torvalds * is already excluded via the page lock. 14951da177e4SLinus Torvalds */ 14961da177e4SLinus Torvalds void create_empty_buffers(struct page *page, 14971da177e4SLinus Torvalds unsigned long blocksize, unsigned long b_state) 14981da177e4SLinus Torvalds { 14991da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 15001da177e4SLinus Torvalds 15011da177e4SLinus Torvalds head = alloc_page_buffers(page, blocksize, 1); 15021da177e4SLinus Torvalds bh = head; 15031da177e4SLinus Torvalds do { 15041da177e4SLinus Torvalds bh->b_state |= b_state; 15051da177e4SLinus Torvalds tail = bh; 15061da177e4SLinus Torvalds bh = bh->b_this_page; 15071da177e4SLinus Torvalds } while (bh); 15081da177e4SLinus Torvalds tail->b_this_page = head; 15091da177e4SLinus Torvalds 15101da177e4SLinus Torvalds spin_lock(&page->mapping->private_lock); 15111da177e4SLinus Torvalds if (PageUptodate(page) || PageDirty(page)) { 15121da177e4SLinus Torvalds bh = head; 15131da177e4SLinus Torvalds do { 15141da177e4SLinus Torvalds if (PageDirty(page)) 15151da177e4SLinus Torvalds set_buffer_dirty(bh); 15161da177e4SLinus Torvalds if (PageUptodate(page)) 15171da177e4SLinus Torvalds set_buffer_uptodate(bh); 15181da177e4SLinus Torvalds bh = bh->b_this_page; 15191da177e4SLinus Torvalds } while (bh != head); 15201da177e4SLinus Torvalds } 15211da177e4SLinus Torvalds attach_page_buffers(page, head); 15221da177e4SLinus Torvalds spin_unlock(&page->mapping->private_lock); 15231da177e4SLinus Torvalds } 15241da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 15251da177e4SLinus Torvalds 15261da177e4SLinus Torvalds /* 15271da177e4SLinus Torvalds * We are taking a block for data and we don't want any output from any 15281da177e4SLinus Torvalds * buffer-cache aliases starting from return from that function and 15291da177e4SLinus Torvalds * until the moment when something will explicitly mark the buffer 15301da177e4SLinus Torvalds * dirty (hopefully that will not happen until we will free that block ;-) 15311da177e4SLinus Torvalds * We don't even need to mark it not-uptodate - nobody can expect 15321da177e4SLinus Torvalds * anything from a newly allocated buffer anyway. We used to used 15331da177e4SLinus Torvalds * unmap_buffer() for such invalidation, but that was wrong. We definitely 15341da177e4SLinus Torvalds * don't want to mark the alias unmapped, for example - it would confuse 15351da177e4SLinus Torvalds * anyone who might pick it with bread() afterwards... 15361da177e4SLinus Torvalds * 15371da177e4SLinus Torvalds * Also.. Note that bforget() doesn't lock the buffer. So there can 15381da177e4SLinus Torvalds * be writeout I/O going on against recently-freed buffers. We don't 15391da177e4SLinus Torvalds * wait on that I/O in bforget() - it's more efficient to wait on the I/O 15401da177e4SLinus Torvalds * only if we really need to. That happens here. 15411da177e4SLinus Torvalds */ 15421da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block) 15431da177e4SLinus Torvalds { 15441da177e4SLinus Torvalds struct buffer_head *old_bh; 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds might_sleep(); 15471da177e4SLinus Torvalds 1548385fd4c5SCoywolf Qi Hunt old_bh = __find_get_block_slow(bdev, block); 15491da177e4SLinus Torvalds if (old_bh) { 15501da177e4SLinus Torvalds clear_buffer_dirty(old_bh); 15511da177e4SLinus Torvalds wait_on_buffer(old_bh); 15521da177e4SLinus Torvalds clear_buffer_req(old_bh); 15531da177e4SLinus Torvalds __brelse(old_bh); 15541da177e4SLinus Torvalds } 15551da177e4SLinus Torvalds } 15561da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata); 15571da177e4SLinus Torvalds 15581da177e4SLinus Torvalds /* 15591da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 15601da177e4SLinus Torvalds * 15611da177e4SLinus Torvalds * Mapped Uptodate Meaning 15621da177e4SLinus Torvalds * 15631da177e4SLinus Torvalds * No No "unknown" - must do get_block() 15641da177e4SLinus Torvalds * No Yes "hole" - zero-filled 15651da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 15661da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 15671da177e4SLinus Torvalds * 15681da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 15691da177e4SLinus Torvalds */ 15701da177e4SLinus Torvalds 15711da177e4SLinus Torvalds /* 15721da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under 15731da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 15741da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 15751da177e4SLinus Torvalds * state inside lock_buffer(). 15761da177e4SLinus Torvalds * 15771da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback 15781da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 15791da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 15801da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 15811da177e4SLinus Torvalds * prevents this contention from occurring. 15821da177e4SLinus Torvalds */ 15831da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page, 15841da177e4SLinus Torvalds get_block_t *get_block, struct writeback_control *wbc) 15851da177e4SLinus Torvalds { 15861da177e4SLinus Torvalds int err; 15871da177e4SLinus Torvalds sector_t block; 15881da177e4SLinus Torvalds sector_t last_block; 1589f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 1590b0cf2321SBadari Pulavarty const unsigned blocksize = 1 << inode->i_blkbits; 15911da177e4SLinus Torvalds int nr_underway = 0; 15921da177e4SLinus Torvalds 15931da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 15941da177e4SLinus Torvalds 15951da177e4SLinus Torvalds last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; 15961da177e4SLinus Torvalds 15971da177e4SLinus Torvalds if (!page_has_buffers(page)) { 1598b0cf2321SBadari Pulavarty create_empty_buffers(page, blocksize, 15991da177e4SLinus Torvalds (1 << BH_Dirty)|(1 << BH_Uptodate)); 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 16021da177e4SLinus Torvalds /* 16031da177e4SLinus Torvalds * Be very careful. We have no exclusion from __set_page_dirty_buffers 16041da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 16051da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 16061da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty. 16071da177e4SLinus Torvalds * 16081da177e4SLinus Torvalds * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; 16091da177e4SLinus Torvalds * handle that here by just cleaning them. 16101da177e4SLinus Torvalds */ 16111da177e4SLinus Torvalds 161254b21a79SAndrew Morton block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 16131da177e4SLinus Torvalds head = page_buffers(page); 16141da177e4SLinus Torvalds bh = head; 16151da177e4SLinus Torvalds 16161da177e4SLinus Torvalds /* 16171da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 16181da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 16191da177e4SLinus Torvalds */ 16201da177e4SLinus Torvalds do { 16211da177e4SLinus Torvalds if (block > last_block) { 16221da177e4SLinus Torvalds /* 16231da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 16241da177e4SLinus Torvalds * this page can be outside i_size when there is a 16251da177e4SLinus Torvalds * truncate in progress. 16261da177e4SLinus Torvalds */ 16271da177e4SLinus Torvalds /* 16281da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page() 16291da177e4SLinus Torvalds */ 16301da177e4SLinus Torvalds clear_buffer_dirty(bh); 16311da177e4SLinus Torvalds set_buffer_uptodate(bh); 16321da177e4SLinus Torvalds } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1633b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 16341da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 16351da177e4SLinus Torvalds if (err) 16361da177e4SLinus Torvalds goto recover; 16371da177e4SLinus Torvalds if (buffer_new(bh)) { 16381da177e4SLinus Torvalds /* blockdev mappings never come here */ 16391da177e4SLinus Torvalds clear_buffer_new(bh); 16401da177e4SLinus Torvalds unmap_underlying_metadata(bh->b_bdev, 16411da177e4SLinus Torvalds bh->b_blocknr); 16421da177e4SLinus Torvalds } 16431da177e4SLinus Torvalds } 16441da177e4SLinus Torvalds bh = bh->b_this_page; 16451da177e4SLinus Torvalds block++; 16461da177e4SLinus Torvalds } while (bh != head); 16471da177e4SLinus Torvalds 16481da177e4SLinus Torvalds do { 16491da177e4SLinus Torvalds if (!buffer_mapped(bh)) 16501da177e4SLinus Torvalds continue; 16511da177e4SLinus Torvalds /* 16521da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 16531da177e4SLinus Torvalds * lock the buffer then redirty the page. Note that this can 16541da177e4SLinus Torvalds * potentially cause a busy-wait loop from pdflush and kswapd 16551da177e4SLinus Torvalds * activity, but those code paths have their own higher-level 16561da177e4SLinus Torvalds * throttling. 16571da177e4SLinus Torvalds */ 16581da177e4SLinus Torvalds if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 16591da177e4SLinus Torvalds lock_buffer(bh); 16601da177e4SLinus Torvalds } else if (test_set_buffer_locked(bh)) { 16611da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page); 16621da177e4SLinus Torvalds continue; 16631da177e4SLinus Torvalds } 16641da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 16651da177e4SLinus Torvalds mark_buffer_async_write(bh); 16661da177e4SLinus Torvalds } else { 16671da177e4SLinus Torvalds unlock_buffer(bh); 16681da177e4SLinus Torvalds } 16691da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 16701da177e4SLinus Torvalds 16711da177e4SLinus Torvalds /* 16721da177e4SLinus Torvalds * The page and its buffers are protected by PageWriteback(), so we can 16731da177e4SLinus Torvalds * drop the bh refcounts early. 16741da177e4SLinus Torvalds */ 16751da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 16761da177e4SLinus Torvalds set_page_writeback(page); 16771da177e4SLinus Torvalds 16781da177e4SLinus Torvalds do { 16791da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 16801da177e4SLinus Torvalds if (buffer_async_write(bh)) { 16811da177e4SLinus Torvalds submit_bh(WRITE, bh); 16821da177e4SLinus Torvalds nr_underway++; 1683ad576e63SNick Piggin } 16841da177e4SLinus Torvalds bh = next; 16851da177e4SLinus Torvalds } while (bh != head); 168605937baaSAndrew Morton unlock_page(page); 16871da177e4SLinus Torvalds 16881da177e4SLinus Torvalds err = 0; 16891da177e4SLinus Torvalds done: 16901da177e4SLinus Torvalds if (nr_underway == 0) { 16911da177e4SLinus Torvalds /* 16921da177e4SLinus Torvalds * The page was marked dirty, but the buffers were 16931da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 16941da177e4SLinus Torvalds * ll_rw_block/submit_bh. A rare case. 16951da177e4SLinus Torvalds */ 16961da177e4SLinus Torvalds end_page_writeback(page); 16973d67f2d7SNick Piggin 16981da177e4SLinus Torvalds /* 16991da177e4SLinus Torvalds * The page and buffer_heads can be released at any time from 17001da177e4SLinus Torvalds * here on. 17011da177e4SLinus Torvalds */ 17021da177e4SLinus Torvalds wbc->pages_skipped++; /* We didn't write this page */ 17031da177e4SLinus Torvalds } 17041da177e4SLinus Torvalds return err; 17051da177e4SLinus Torvalds 17061da177e4SLinus Torvalds recover: 17071da177e4SLinus Torvalds /* 17081da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 17091da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 17101da177e4SLinus Torvalds * exposing stale data. 17111da177e4SLinus Torvalds * The page is currently locked and not marked for writeback 17121da177e4SLinus Torvalds */ 17131da177e4SLinus Torvalds bh = head; 17141da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 17151da177e4SLinus Torvalds do { 17161da177e4SLinus Torvalds if (buffer_mapped(bh) && buffer_dirty(bh)) { 17171da177e4SLinus Torvalds lock_buffer(bh); 17181da177e4SLinus Torvalds mark_buffer_async_write(bh); 17191da177e4SLinus Torvalds } else { 17201da177e4SLinus Torvalds /* 17211da177e4SLinus Torvalds * The buffer may have been set dirty during 17221da177e4SLinus Torvalds * attachment to a dirty page. 17231da177e4SLinus Torvalds */ 17241da177e4SLinus Torvalds clear_buffer_dirty(bh); 17251da177e4SLinus Torvalds } 17261da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 17271da177e4SLinus Torvalds SetPageError(page); 17281da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 17297e4c3690SAndrew Morton mapping_set_error(page->mapping, err); 17301da177e4SLinus Torvalds set_page_writeback(page); 17311da177e4SLinus Torvalds do { 17321da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 17331da177e4SLinus Torvalds if (buffer_async_write(bh)) { 17341da177e4SLinus Torvalds clear_buffer_dirty(bh); 17351da177e4SLinus Torvalds submit_bh(WRITE, bh); 17361da177e4SLinus Torvalds nr_underway++; 1737ad576e63SNick Piggin } 17381da177e4SLinus Torvalds bh = next; 17391da177e4SLinus Torvalds } while (bh != head); 1740ffda9d30SNick Piggin unlock_page(page); 17411da177e4SLinus Torvalds goto done; 17421da177e4SLinus Torvalds } 17431da177e4SLinus Torvalds 17441da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page, 17451da177e4SLinus Torvalds unsigned from, unsigned to, get_block_t *get_block) 17461da177e4SLinus Torvalds { 17471da177e4SLinus Torvalds unsigned block_start, block_end; 17481da177e4SLinus Torvalds sector_t block; 17491da177e4SLinus Torvalds int err = 0; 17501da177e4SLinus Torvalds unsigned blocksize, bbits; 17511da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 17521da177e4SLinus Torvalds 17531da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 17541da177e4SLinus Torvalds BUG_ON(from > PAGE_CACHE_SIZE); 17551da177e4SLinus Torvalds BUG_ON(to > PAGE_CACHE_SIZE); 17561da177e4SLinus Torvalds BUG_ON(from > to); 17571da177e4SLinus Torvalds 17581da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 17591da177e4SLinus Torvalds if (!page_has_buffers(page)) 17601da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 17611da177e4SLinus Torvalds head = page_buffers(page); 17621da177e4SLinus Torvalds 17631da177e4SLinus Torvalds bbits = inode->i_blkbits; 17641da177e4SLinus Torvalds block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); 17651da177e4SLinus Torvalds 17661da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start; 17671da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 17681da177e4SLinus Torvalds block_end = block_start + blocksize; 17691da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 17701da177e4SLinus Torvalds if (PageUptodate(page)) { 17711da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 17721da177e4SLinus Torvalds set_buffer_uptodate(bh); 17731da177e4SLinus Torvalds } 17741da177e4SLinus Torvalds continue; 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds if (buffer_new(bh)) 17771da177e4SLinus Torvalds clear_buffer_new(bh); 17781da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 1779b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 17801da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 17811da177e4SLinus Torvalds if (err) 1782f3ddbdc6SNick Piggin break; 17831da177e4SLinus Torvalds if (buffer_new(bh)) { 17841da177e4SLinus Torvalds unmap_underlying_metadata(bh->b_bdev, 17851da177e4SLinus Torvalds bh->b_blocknr); 17861da177e4SLinus Torvalds if (PageUptodate(page)) { 17871da177e4SLinus Torvalds set_buffer_uptodate(bh); 17881da177e4SLinus Torvalds continue; 17891da177e4SLinus Torvalds } 17901da177e4SLinus Torvalds if (block_end > to || block_start < from) { 17911da177e4SLinus Torvalds void *kaddr; 17921da177e4SLinus Torvalds 17931da177e4SLinus Torvalds kaddr = kmap_atomic(page, KM_USER0); 17941da177e4SLinus Torvalds if (block_end > to) 17951da177e4SLinus Torvalds memset(kaddr+to, 0, 17961da177e4SLinus Torvalds block_end-to); 17971da177e4SLinus Torvalds if (block_start < from) 17981da177e4SLinus Torvalds memset(kaddr+block_start, 17991da177e4SLinus Torvalds 0, from-block_start); 18001da177e4SLinus Torvalds flush_dcache_page(page); 18011da177e4SLinus Torvalds kunmap_atomic(kaddr, KM_USER0); 18021da177e4SLinus Torvalds } 18031da177e4SLinus Torvalds continue; 18041da177e4SLinus Torvalds } 18051da177e4SLinus Torvalds } 18061da177e4SLinus Torvalds if (PageUptodate(page)) { 18071da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 18081da177e4SLinus Torvalds set_buffer_uptodate(bh); 18091da177e4SLinus Torvalds continue; 18101da177e4SLinus Torvalds } 18111da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 181233a266ddSDavid Chinner !buffer_unwritten(bh) && 18131da177e4SLinus Torvalds (block_start < from || block_end > to)) { 18141da177e4SLinus Torvalds ll_rw_block(READ, 1, &bh); 18151da177e4SLinus Torvalds *wait_bh++=bh; 18161da177e4SLinus Torvalds } 18171da177e4SLinus Torvalds } 18181da177e4SLinus Torvalds /* 18191da177e4SLinus Torvalds * If we issued read requests - let them complete. 18201da177e4SLinus Torvalds */ 18211da177e4SLinus Torvalds while(wait_bh > wait) { 18221da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 18231da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 1824f3ddbdc6SNick Piggin err = -EIO; 18251da177e4SLinus Torvalds } 1826152becd2SAnton Altaparmakov if (!err) { 1827152becd2SAnton Altaparmakov bh = head; 1828152becd2SAnton Altaparmakov do { 1829152becd2SAnton Altaparmakov if (buffer_new(bh)) 1830152becd2SAnton Altaparmakov clear_buffer_new(bh); 1831152becd2SAnton Altaparmakov } while ((bh = bh->b_this_page) != head); 1832152becd2SAnton Altaparmakov return 0; 1833152becd2SAnton Altaparmakov } 1834f3ddbdc6SNick Piggin /* Error case: */ 18351da177e4SLinus Torvalds /* 18361da177e4SLinus Torvalds * Zero out any newly allocated blocks to avoid exposing stale 18371da177e4SLinus Torvalds * data. If BH_New is set, we know that the block was newly 18381da177e4SLinus Torvalds * allocated in the above loop. 18391da177e4SLinus Torvalds */ 18401da177e4SLinus Torvalds bh = head; 18411da177e4SLinus Torvalds block_start = 0; 18421da177e4SLinus Torvalds do { 18431da177e4SLinus Torvalds block_end = block_start+blocksize; 18441da177e4SLinus Torvalds if (block_end <= from) 18451da177e4SLinus Torvalds goto next_bh; 18461da177e4SLinus Torvalds if (block_start >= to) 18471da177e4SLinus Torvalds break; 18481da177e4SLinus Torvalds if (buffer_new(bh)) { 18491da177e4SLinus Torvalds clear_buffer_new(bh); 185001f2705dSNate Diller zero_user_page(page, block_start, bh->b_size, KM_USER0); 18511da177e4SLinus Torvalds set_buffer_uptodate(bh); 18521da177e4SLinus Torvalds mark_buffer_dirty(bh); 18531da177e4SLinus Torvalds } 18541da177e4SLinus Torvalds next_bh: 18551da177e4SLinus Torvalds block_start = block_end; 18561da177e4SLinus Torvalds bh = bh->b_this_page; 18571da177e4SLinus Torvalds } while (bh != head); 18581da177e4SLinus Torvalds return err; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 18611da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page, 18621da177e4SLinus Torvalds unsigned from, unsigned to) 18631da177e4SLinus Torvalds { 18641da177e4SLinus Torvalds unsigned block_start, block_end; 18651da177e4SLinus Torvalds int partial = 0; 18661da177e4SLinus Torvalds unsigned blocksize; 18671da177e4SLinus Torvalds struct buffer_head *bh, *head; 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 18701da177e4SLinus Torvalds 18711da177e4SLinus Torvalds for(bh = head = page_buffers(page), block_start = 0; 18721da177e4SLinus Torvalds bh != head || !block_start; 18731da177e4SLinus Torvalds block_start=block_end, bh = bh->b_this_page) { 18741da177e4SLinus Torvalds block_end = block_start + blocksize; 18751da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 18761da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 18771da177e4SLinus Torvalds partial = 1; 18781da177e4SLinus Torvalds } else { 18791da177e4SLinus Torvalds set_buffer_uptodate(bh); 18801da177e4SLinus Torvalds mark_buffer_dirty(bh); 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds } 18831da177e4SLinus Torvalds 18841da177e4SLinus Torvalds /* 18851da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 18861da177e4SLinus Torvalds * uptodate then we can optimize away a bogus readpage() for 18871da177e4SLinus Torvalds * the next read(). Here we 'discover' whether the page went 18881da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 18891da177e4SLinus Torvalds */ 18901da177e4SLinus Torvalds if (!partial) 18911da177e4SLinus Torvalds SetPageUptodate(page); 18921da177e4SLinus Torvalds return 0; 18931da177e4SLinus Torvalds } 18941da177e4SLinus Torvalds 18951da177e4SLinus Torvalds /* 18961da177e4SLinus Torvalds * Generic "read page" function for block devices that have the normal 18971da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 18981da177e4SLinus Torvalds * Reads the page asynchronously --- the unlock_buffer() and 18991da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 19001da177e4SLinus Torvalds * page struct once IO has completed. 19011da177e4SLinus Torvalds */ 19021da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block) 19031da177e4SLinus Torvalds { 19041da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 19051da177e4SLinus Torvalds sector_t iblock, lblock; 19061da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 19071da177e4SLinus Torvalds unsigned int blocksize; 19081da177e4SLinus Torvalds int nr, i; 19091da177e4SLinus Torvalds int fully_mapped = 1; 19101da177e4SLinus Torvalds 1911cd7619d6SMatt Mackall BUG_ON(!PageLocked(page)); 19121da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 19131da177e4SLinus Torvalds if (!page_has_buffers(page)) 19141da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 19151da177e4SLinus Torvalds head = page_buffers(page); 19161da177e4SLinus Torvalds 19171da177e4SLinus Torvalds iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 19181da177e4SLinus Torvalds lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; 19191da177e4SLinus Torvalds bh = head; 19201da177e4SLinus Torvalds nr = 0; 19211da177e4SLinus Torvalds i = 0; 19221da177e4SLinus Torvalds 19231da177e4SLinus Torvalds do { 19241da177e4SLinus Torvalds if (buffer_uptodate(bh)) 19251da177e4SLinus Torvalds continue; 19261da177e4SLinus Torvalds 19271da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 1928c64610baSAndrew Morton int err = 0; 1929c64610baSAndrew Morton 19301da177e4SLinus Torvalds fully_mapped = 0; 19311da177e4SLinus Torvalds if (iblock < lblock) { 1932b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 1933c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 1934c64610baSAndrew Morton if (err) 19351da177e4SLinus Torvalds SetPageError(page); 19361da177e4SLinus Torvalds } 19371da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 193801f2705dSNate Diller zero_user_page(page, i * blocksize, blocksize, 193901f2705dSNate Diller KM_USER0); 1940c64610baSAndrew Morton if (!err) 19411da177e4SLinus Torvalds set_buffer_uptodate(bh); 19421da177e4SLinus Torvalds continue; 19431da177e4SLinus Torvalds } 19441da177e4SLinus Torvalds /* 19451da177e4SLinus Torvalds * get_block() might have updated the buffer 19461da177e4SLinus Torvalds * synchronously 19471da177e4SLinus Torvalds */ 19481da177e4SLinus Torvalds if (buffer_uptodate(bh)) 19491da177e4SLinus Torvalds continue; 19501da177e4SLinus Torvalds } 19511da177e4SLinus Torvalds arr[nr++] = bh; 19521da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 19531da177e4SLinus Torvalds 19541da177e4SLinus Torvalds if (fully_mapped) 19551da177e4SLinus Torvalds SetPageMappedToDisk(page); 19561da177e4SLinus Torvalds 19571da177e4SLinus Torvalds if (!nr) { 19581da177e4SLinus Torvalds /* 19591da177e4SLinus Torvalds * All buffers are uptodate - we can set the page uptodate 19601da177e4SLinus Torvalds * as well. But not if get_block() returned an error. 19611da177e4SLinus Torvalds */ 19621da177e4SLinus Torvalds if (!PageError(page)) 19631da177e4SLinus Torvalds SetPageUptodate(page); 19641da177e4SLinus Torvalds unlock_page(page); 19651da177e4SLinus Torvalds return 0; 19661da177e4SLinus Torvalds } 19671da177e4SLinus Torvalds 19681da177e4SLinus Torvalds /* Stage two: lock the buffers */ 19691da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 19701da177e4SLinus Torvalds bh = arr[i]; 19711da177e4SLinus Torvalds lock_buffer(bh); 19721da177e4SLinus Torvalds mark_buffer_async_read(bh); 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 19751da177e4SLinus Torvalds /* 19761da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 19771da177e4SLinus Torvalds * inside the buffer lock in case another process reading 19781da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 19791da177e4SLinus Torvalds */ 19801da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 19811da177e4SLinus Torvalds bh = arr[i]; 19821da177e4SLinus Torvalds if (buffer_uptodate(bh)) 19831da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 19841da177e4SLinus Torvalds else 19851da177e4SLinus Torvalds submit_bh(READ, bh); 19861da177e4SLinus Torvalds } 19871da177e4SLinus Torvalds return 0; 19881da177e4SLinus Torvalds } 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 19911da177e4SLinus Torvalds * truncates. Uses prepare/commit_write to allow the filesystem to 19921da177e4SLinus Torvalds * deal with the hole. 19931da177e4SLinus Torvalds */ 199405eb0b51SOGAWA Hirofumi static int __generic_cont_expand(struct inode *inode, loff_t size, 199505eb0b51SOGAWA Hirofumi pgoff_t index, unsigned int offset) 19961da177e4SLinus Torvalds { 19971da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 19981da177e4SLinus Torvalds struct page *page; 199905eb0b51SOGAWA Hirofumi unsigned long limit; 20001da177e4SLinus Torvalds int err; 20011da177e4SLinus Torvalds 20021da177e4SLinus Torvalds err = -EFBIG; 20031da177e4SLinus Torvalds limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 20041da177e4SLinus Torvalds if (limit != RLIM_INFINITY && size > (loff_t)limit) { 20051da177e4SLinus Torvalds send_sig(SIGXFSZ, current, 0); 20061da177e4SLinus Torvalds goto out; 20071da177e4SLinus Torvalds } 20081da177e4SLinus Torvalds if (size > inode->i_sb->s_maxbytes) 20091da177e4SLinus Torvalds goto out; 20101da177e4SLinus Torvalds 201105eb0b51SOGAWA Hirofumi err = -ENOMEM; 201205eb0b51SOGAWA Hirofumi page = grab_cache_page(mapping, index); 201305eb0b51SOGAWA Hirofumi if (!page) 201405eb0b51SOGAWA Hirofumi goto out; 201505eb0b51SOGAWA Hirofumi err = mapping->a_ops->prepare_write(NULL, page, offset, offset); 201605eb0b51SOGAWA Hirofumi if (err) { 201705eb0b51SOGAWA Hirofumi /* 201805eb0b51SOGAWA Hirofumi * ->prepare_write() may have instantiated a few blocks 201905eb0b51SOGAWA Hirofumi * outside i_size. Trim these off again. 202005eb0b51SOGAWA Hirofumi */ 202105eb0b51SOGAWA Hirofumi unlock_page(page); 202205eb0b51SOGAWA Hirofumi page_cache_release(page); 202305eb0b51SOGAWA Hirofumi vmtruncate(inode, inode->i_size); 202405eb0b51SOGAWA Hirofumi goto out; 202505eb0b51SOGAWA Hirofumi } 202605eb0b51SOGAWA Hirofumi 202705eb0b51SOGAWA Hirofumi err = mapping->a_ops->commit_write(NULL, page, offset, offset); 202805eb0b51SOGAWA Hirofumi 202905eb0b51SOGAWA Hirofumi unlock_page(page); 203005eb0b51SOGAWA Hirofumi page_cache_release(page); 203105eb0b51SOGAWA Hirofumi if (err > 0) 203205eb0b51SOGAWA Hirofumi err = 0; 203305eb0b51SOGAWA Hirofumi out: 203405eb0b51SOGAWA Hirofumi return err; 203505eb0b51SOGAWA Hirofumi } 203605eb0b51SOGAWA Hirofumi 203705eb0b51SOGAWA Hirofumi int generic_cont_expand(struct inode *inode, loff_t size) 203805eb0b51SOGAWA Hirofumi { 203905eb0b51SOGAWA Hirofumi pgoff_t index; 204005eb0b51SOGAWA Hirofumi unsigned int offset; 204105eb0b51SOGAWA Hirofumi 20421da177e4SLinus Torvalds offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */ 20431da177e4SLinus Torvalds 20441da177e4SLinus Torvalds /* ugh. in prepare/commit_write, if from==to==start of block, we 20451da177e4SLinus Torvalds ** skip the prepare. make sure we never send an offset for the start 20461da177e4SLinus Torvalds ** of a block 20471da177e4SLinus Torvalds */ 20481da177e4SLinus Torvalds if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { 204905eb0b51SOGAWA Hirofumi /* caller must handle this extra byte. */ 20501da177e4SLinus Torvalds offset++; 20511da177e4SLinus Torvalds } 20521da177e4SLinus Torvalds index = size >> PAGE_CACHE_SHIFT; 205305eb0b51SOGAWA Hirofumi 205405eb0b51SOGAWA Hirofumi return __generic_cont_expand(inode, size, index, offset); 20551da177e4SLinus Torvalds } 205605eb0b51SOGAWA Hirofumi 205705eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size) 205805eb0b51SOGAWA Hirofumi { 205905eb0b51SOGAWA Hirofumi loff_t pos = size - 1; 206005eb0b51SOGAWA Hirofumi pgoff_t index = pos >> PAGE_CACHE_SHIFT; 206105eb0b51SOGAWA Hirofumi unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1; 206205eb0b51SOGAWA Hirofumi 206305eb0b51SOGAWA Hirofumi /* prepare/commit_write can handle even if from==to==start of block. */ 206405eb0b51SOGAWA Hirofumi return __generic_cont_expand(inode, size, index, offset); 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 20671da177e4SLinus Torvalds /* 20681da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 20691da177e4SLinus Torvalds * We may have to extend the file. 20701da177e4SLinus Torvalds */ 20711da177e4SLinus Torvalds 20721da177e4SLinus Torvalds int cont_prepare_write(struct page *page, unsigned offset, 20731da177e4SLinus Torvalds unsigned to, get_block_t *get_block, loff_t *bytes) 20741da177e4SLinus Torvalds { 20751da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 20761da177e4SLinus Torvalds struct inode *inode = mapping->host; 20771da177e4SLinus Torvalds struct page *new_page; 20781da177e4SLinus Torvalds pgoff_t pgpos; 20791da177e4SLinus Torvalds long status; 20801da177e4SLinus Torvalds unsigned zerofrom; 20811da177e4SLinus Torvalds unsigned blocksize = 1 << inode->i_blkbits; 20821da177e4SLinus Torvalds 20831da177e4SLinus Torvalds while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { 20841da177e4SLinus Torvalds status = -ENOMEM; 20851da177e4SLinus Torvalds new_page = grab_cache_page(mapping, pgpos); 20861da177e4SLinus Torvalds if (!new_page) 20871da177e4SLinus Torvalds goto out; 20881da177e4SLinus Torvalds /* we might sleep */ 20891da177e4SLinus Torvalds if (*bytes>>PAGE_CACHE_SHIFT != pgpos) { 20901da177e4SLinus Torvalds unlock_page(new_page); 20911da177e4SLinus Torvalds page_cache_release(new_page); 20921da177e4SLinus Torvalds continue; 20931da177e4SLinus Torvalds } 20941da177e4SLinus Torvalds zerofrom = *bytes & ~PAGE_CACHE_MASK; 20951da177e4SLinus Torvalds if (zerofrom & (blocksize-1)) { 20961da177e4SLinus Torvalds *bytes |= (blocksize-1); 20971da177e4SLinus Torvalds (*bytes)++; 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds status = __block_prepare_write(inode, new_page, zerofrom, 21001da177e4SLinus Torvalds PAGE_CACHE_SIZE, get_block); 21011da177e4SLinus Torvalds if (status) 21021da177e4SLinus Torvalds goto out_unmap; 210301f2705dSNate Diller zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom, 210401f2705dSNate Diller KM_USER0); 21051da177e4SLinus Torvalds generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); 21061da177e4SLinus Torvalds unlock_page(new_page); 21071da177e4SLinus Torvalds page_cache_release(new_page); 21081da177e4SLinus Torvalds } 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds if (page->index < pgpos) { 21111da177e4SLinus Torvalds /* completely inside the area */ 21121da177e4SLinus Torvalds zerofrom = offset; 21131da177e4SLinus Torvalds } else { 21141da177e4SLinus Torvalds /* page covers the boundary, find the boundary offset */ 21151da177e4SLinus Torvalds zerofrom = *bytes & ~PAGE_CACHE_MASK; 21161da177e4SLinus Torvalds 21171da177e4SLinus Torvalds /* if we will expand the thing last block will be filled */ 21181da177e4SLinus Torvalds if (to > zerofrom && (zerofrom & (blocksize-1))) { 21191da177e4SLinus Torvalds *bytes |= (blocksize-1); 21201da177e4SLinus Torvalds (*bytes)++; 21211da177e4SLinus Torvalds } 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds /* starting below the boundary? Nothing to zero out */ 21241da177e4SLinus Torvalds if (offset <= zerofrom) 21251da177e4SLinus Torvalds zerofrom = offset; 21261da177e4SLinus Torvalds } 21271da177e4SLinus Torvalds status = __block_prepare_write(inode, page, zerofrom, to, get_block); 21281da177e4SLinus Torvalds if (status) 21291da177e4SLinus Torvalds goto out1; 21301da177e4SLinus Torvalds if (zerofrom < offset) { 213101f2705dSNate Diller zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0); 21321da177e4SLinus Torvalds __block_commit_write(inode, page, zerofrom, offset); 21331da177e4SLinus Torvalds } 21341da177e4SLinus Torvalds return 0; 21351da177e4SLinus Torvalds out1: 21361da177e4SLinus Torvalds ClearPageUptodate(page); 21371da177e4SLinus Torvalds return status; 21381da177e4SLinus Torvalds 21391da177e4SLinus Torvalds out_unmap: 21401da177e4SLinus Torvalds ClearPageUptodate(new_page); 21411da177e4SLinus Torvalds unlock_page(new_page); 21421da177e4SLinus Torvalds page_cache_release(new_page); 21431da177e4SLinus Torvalds out: 21441da177e4SLinus Torvalds return status; 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds 21471da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to, 21481da177e4SLinus Torvalds get_block_t *get_block) 21491da177e4SLinus Torvalds { 21501da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 21511da177e4SLinus Torvalds int err = __block_prepare_write(inode, page, from, to, get_block); 21521da177e4SLinus Torvalds if (err) 21531da177e4SLinus Torvalds ClearPageUptodate(page); 21541da177e4SLinus Torvalds return err; 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21571da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to) 21581da177e4SLinus Torvalds { 21591da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 21601da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 21611da177e4SLinus Torvalds return 0; 21621da177e4SLinus Torvalds } 21631da177e4SLinus Torvalds 21641da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page, 21651da177e4SLinus Torvalds unsigned from, unsigned to) 21661da177e4SLinus Torvalds { 21671da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 21681da177e4SLinus Torvalds loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 21691da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 21701da177e4SLinus Torvalds /* 21711da177e4SLinus Torvalds * No need to use i_size_read() here, the i_size 21721b1dcc1bSJes Sorensen * cannot change under us because we hold i_mutex. 21731da177e4SLinus Torvalds */ 21741da177e4SLinus Torvalds if (pos > inode->i_size) { 21751da177e4SLinus Torvalds i_size_write(inode, pos); 21761da177e4SLinus Torvalds mark_inode_dirty(inode); 21771da177e4SLinus Torvalds } 21781da177e4SLinus Torvalds return 0; 21791da177e4SLinus Torvalds } 21801da177e4SLinus Torvalds 21811da177e4SLinus Torvalds 21821da177e4SLinus Torvalds /* 21831da177e4SLinus Torvalds * nobh_prepare_write()'s prereads are special: the buffer_heads are freed 21841da177e4SLinus Torvalds * immediately, while under the page lock. So it needs a special end_io 21851da177e4SLinus Torvalds * handler which does not touch the bh after unlocking it. 21861da177e4SLinus Torvalds * 21871da177e4SLinus Torvalds * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 21881da177e4SLinus Torvalds * a race there is benign: unlock_buffer() only use the bh's address for 21891da177e4SLinus Torvalds * hashing after unlocking the buffer, so it doesn't actually touch the bh 21901da177e4SLinus Torvalds * itself. 21911da177e4SLinus Torvalds */ 21921da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 21931da177e4SLinus Torvalds { 21941da177e4SLinus Torvalds if (uptodate) { 21951da177e4SLinus Torvalds set_buffer_uptodate(bh); 21961da177e4SLinus Torvalds } else { 21971da177e4SLinus Torvalds /* This happens, due to failed READA attempts. */ 21981da177e4SLinus Torvalds clear_buffer_uptodate(bh); 21991da177e4SLinus Torvalds } 22001da177e4SLinus Torvalds unlock_buffer(bh); 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds 22031da177e4SLinus Torvalds /* 22041da177e4SLinus Torvalds * On entry, the page is fully not uptodate. 22051da177e4SLinus Torvalds * On exit the page is fully uptodate in the areas outside (from,to) 22061da177e4SLinus Torvalds */ 22071da177e4SLinus Torvalds int nobh_prepare_write(struct page *page, unsigned from, unsigned to, 22081da177e4SLinus Torvalds get_block_t *get_block) 22091da177e4SLinus Torvalds { 22101da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 22111da177e4SLinus Torvalds const unsigned blkbits = inode->i_blkbits; 22121da177e4SLinus Torvalds const unsigned blocksize = 1 << blkbits; 22131da177e4SLinus Torvalds struct buffer_head map_bh; 22141da177e4SLinus Torvalds struct buffer_head *read_bh[MAX_BUF_PER_PAGE]; 22151da177e4SLinus Torvalds unsigned block_in_page; 22161da177e4SLinus Torvalds unsigned block_start; 22171da177e4SLinus Torvalds sector_t block_in_file; 22181da177e4SLinus Torvalds char *kaddr; 22191da177e4SLinus Torvalds int nr_reads = 0; 22201da177e4SLinus Torvalds int i; 22211da177e4SLinus Torvalds int ret = 0; 22221da177e4SLinus Torvalds int is_mapped_to_disk = 1; 22231da177e4SLinus Torvalds 22241da177e4SLinus Torvalds if (PageMappedToDisk(page)) 22251da177e4SLinus Torvalds return 0; 22261da177e4SLinus Torvalds 22271da177e4SLinus Torvalds block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); 22281da177e4SLinus Torvalds map_bh.b_page = page; 22291da177e4SLinus Torvalds 22301da177e4SLinus Torvalds /* 22311da177e4SLinus Torvalds * We loop across all blocks in the page, whether or not they are 22321da177e4SLinus Torvalds * part of the affected region. This is so we can discover if the 22331da177e4SLinus Torvalds * page is fully mapped-to-disk. 22341da177e4SLinus Torvalds */ 22351da177e4SLinus Torvalds for (block_start = 0, block_in_page = 0; 22361da177e4SLinus Torvalds block_start < PAGE_CACHE_SIZE; 22371da177e4SLinus Torvalds block_in_page++, block_start += blocksize) { 22381da177e4SLinus Torvalds unsigned block_end = block_start + blocksize; 22391da177e4SLinus Torvalds int create; 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds map_bh.b_state = 0; 22421da177e4SLinus Torvalds create = 1; 22431da177e4SLinus Torvalds if (block_start >= to) 22441da177e4SLinus Torvalds create = 0; 2245b0cf2321SBadari Pulavarty map_bh.b_size = blocksize; 22461da177e4SLinus Torvalds ret = get_block(inode, block_in_file + block_in_page, 22471da177e4SLinus Torvalds &map_bh, create); 22481da177e4SLinus Torvalds if (ret) 22491da177e4SLinus Torvalds goto failed; 22501da177e4SLinus Torvalds if (!buffer_mapped(&map_bh)) 22511da177e4SLinus Torvalds is_mapped_to_disk = 0; 22521da177e4SLinus Torvalds if (buffer_new(&map_bh)) 22531da177e4SLinus Torvalds unmap_underlying_metadata(map_bh.b_bdev, 22541da177e4SLinus Torvalds map_bh.b_blocknr); 22551da177e4SLinus Torvalds if (PageUptodate(page)) 22561da177e4SLinus Torvalds continue; 22571da177e4SLinus Torvalds if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) { 22581da177e4SLinus Torvalds kaddr = kmap_atomic(page, KM_USER0); 225922c8ca78SNick Piggin if (block_start < from) 22601da177e4SLinus Torvalds memset(kaddr+block_start, 0, from-block_start); 226122c8ca78SNick Piggin if (block_end > to) 22621da177e4SLinus Torvalds memset(kaddr + to, 0, block_end - to); 22631da177e4SLinus Torvalds flush_dcache_page(page); 22641da177e4SLinus Torvalds kunmap_atomic(kaddr, KM_USER0); 22651da177e4SLinus Torvalds continue; 22661da177e4SLinus Torvalds } 22671da177e4SLinus Torvalds if (buffer_uptodate(&map_bh)) 22681da177e4SLinus Torvalds continue; /* reiserfs does this */ 22691da177e4SLinus Torvalds if (block_start < from || block_end > to) { 22701da177e4SLinus Torvalds struct buffer_head *bh = alloc_buffer_head(GFP_NOFS); 22711da177e4SLinus Torvalds 22721da177e4SLinus Torvalds if (!bh) { 22731da177e4SLinus Torvalds ret = -ENOMEM; 22741da177e4SLinus Torvalds goto failed; 22751da177e4SLinus Torvalds } 22761da177e4SLinus Torvalds bh->b_state = map_bh.b_state; 22771da177e4SLinus Torvalds atomic_set(&bh->b_count, 0); 22781da177e4SLinus Torvalds bh->b_this_page = NULL; 22791da177e4SLinus Torvalds bh->b_page = page; 22801da177e4SLinus Torvalds bh->b_blocknr = map_bh.b_blocknr; 22811da177e4SLinus Torvalds bh->b_size = blocksize; 22821da177e4SLinus Torvalds bh->b_data = (char *)(long)block_start; 22831da177e4SLinus Torvalds bh->b_bdev = map_bh.b_bdev; 22841da177e4SLinus Torvalds bh->b_private = NULL; 22851da177e4SLinus Torvalds read_bh[nr_reads++] = bh; 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds } 22881da177e4SLinus Torvalds 22891da177e4SLinus Torvalds if (nr_reads) { 22901da177e4SLinus Torvalds struct buffer_head *bh; 22911da177e4SLinus Torvalds 22921da177e4SLinus Torvalds /* 22931da177e4SLinus Torvalds * The page is locked, so these buffers are protected from 22941da177e4SLinus Torvalds * any VM or truncate activity. Hence we don't need to care 22951da177e4SLinus Torvalds * for the buffer_head refcounts. 22961da177e4SLinus Torvalds */ 22971da177e4SLinus Torvalds for (i = 0; i < nr_reads; i++) { 22981da177e4SLinus Torvalds bh = read_bh[i]; 22991da177e4SLinus Torvalds lock_buffer(bh); 23001da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_nobh; 23011da177e4SLinus Torvalds submit_bh(READ, bh); 23021da177e4SLinus Torvalds } 23031da177e4SLinus Torvalds for (i = 0; i < nr_reads; i++) { 23041da177e4SLinus Torvalds bh = read_bh[i]; 23051da177e4SLinus Torvalds wait_on_buffer(bh); 23061da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 23071da177e4SLinus Torvalds ret = -EIO; 23081da177e4SLinus Torvalds free_buffer_head(bh); 23091da177e4SLinus Torvalds read_bh[i] = NULL; 23101da177e4SLinus Torvalds } 23111da177e4SLinus Torvalds if (ret) 23121da177e4SLinus Torvalds goto failed; 23131da177e4SLinus Torvalds } 23141da177e4SLinus Torvalds 23151da177e4SLinus Torvalds if (is_mapped_to_disk) 23161da177e4SLinus Torvalds SetPageMappedToDisk(page); 23171da177e4SLinus Torvalds 23181da177e4SLinus Torvalds return 0; 23191da177e4SLinus Torvalds 23201da177e4SLinus Torvalds failed: 23211da177e4SLinus Torvalds for (i = 0; i < nr_reads; i++) { 23221da177e4SLinus Torvalds if (read_bh[i]) 23231da177e4SLinus Torvalds free_buffer_head(read_bh[i]); 23241da177e4SLinus Torvalds } 23251da177e4SLinus Torvalds 23261da177e4SLinus Torvalds /* 23271da177e4SLinus Torvalds * Error recovery is pretty slack. Clear the page and mark it dirty 23281da177e4SLinus Torvalds * so we'll later zero out any blocks which _were_ allocated. 23291da177e4SLinus Torvalds */ 233001f2705dSNate Diller zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); 23311da177e4SLinus Torvalds SetPageUptodate(page); 23321da177e4SLinus Torvalds set_page_dirty(page); 23331da177e4SLinus Torvalds return ret; 23341da177e4SLinus Torvalds } 23351da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_prepare_write); 23361da177e4SLinus Torvalds 233757bf63d6SDave Kleikamp /* 233857bf63d6SDave Kleikamp * Make sure any changes to nobh_commit_write() are reflected in 233957bf63d6SDave Kleikamp * nobh_truncate_page(), since it doesn't call commit_write(). 234057bf63d6SDave Kleikamp */ 23411da177e4SLinus Torvalds int nobh_commit_write(struct file *file, struct page *page, 23421da177e4SLinus Torvalds unsigned from, unsigned to) 23431da177e4SLinus Torvalds { 23441da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 23451da177e4SLinus Torvalds loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 23461da177e4SLinus Torvalds 234722c8ca78SNick Piggin SetPageUptodate(page); 23481da177e4SLinus Torvalds set_page_dirty(page); 23491da177e4SLinus Torvalds if (pos > inode->i_size) { 23501da177e4SLinus Torvalds i_size_write(inode, pos); 23511da177e4SLinus Torvalds mark_inode_dirty(inode); 23521da177e4SLinus Torvalds } 23531da177e4SLinus Torvalds return 0; 23541da177e4SLinus Torvalds } 23551da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_commit_write); 23561da177e4SLinus Torvalds 23571da177e4SLinus Torvalds /* 23581da177e4SLinus Torvalds * nobh_writepage() - based on block_full_write_page() except 23591da177e4SLinus Torvalds * that it tries to operate without attaching bufferheads to 23601da177e4SLinus Torvalds * the page. 23611da177e4SLinus Torvalds */ 23621da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block, 23631da177e4SLinus Torvalds struct writeback_control *wbc) 23641da177e4SLinus Torvalds { 23651da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 23661da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 23671da177e4SLinus Torvalds const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 23681da177e4SLinus Torvalds unsigned offset; 23691da177e4SLinus Torvalds int ret; 23701da177e4SLinus Torvalds 23711da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 23721da177e4SLinus Torvalds if (page->index < end_index) 23731da177e4SLinus Torvalds goto out; 23741da177e4SLinus Torvalds 23751da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 23761da177e4SLinus Torvalds offset = i_size & (PAGE_CACHE_SIZE-1); 23771da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 23781da177e4SLinus Torvalds /* 23791da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 23801da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 23811da177e4SLinus Torvalds * freeable here, so the page does not leak. 23821da177e4SLinus Torvalds */ 23831da177e4SLinus Torvalds #if 0 23841da177e4SLinus Torvalds /* Not really sure about this - do we need this ? */ 23851da177e4SLinus Torvalds if (page->mapping->a_ops->invalidatepage) 23861da177e4SLinus Torvalds page->mapping->a_ops->invalidatepage(page, offset); 23871da177e4SLinus Torvalds #endif 23881da177e4SLinus Torvalds unlock_page(page); 23891da177e4SLinus Torvalds return 0; /* don't care */ 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds /* 23931da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 23941da177e4SLinus Torvalds * writepage invocation because it may be mmapped. "A file is mapped 23951da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 23961da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 23971da177e4SLinus Torvalds * writes to that region are not written out to the file." 23981da177e4SLinus Torvalds */ 239901f2705dSNate Diller zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 24001da177e4SLinus Torvalds out: 24011da177e4SLinus Torvalds ret = mpage_writepage(page, get_block, wbc); 24021da177e4SLinus Torvalds if (ret == -EAGAIN) 24031da177e4SLinus Torvalds ret = __block_write_full_page(inode, page, get_block, wbc); 24041da177e4SLinus Torvalds return ret; 24051da177e4SLinus Torvalds } 24061da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage); 24071da177e4SLinus Torvalds 24081da177e4SLinus Torvalds /* 24091da177e4SLinus Torvalds * This function assumes that ->prepare_write() uses nobh_prepare_write(). 24101da177e4SLinus Torvalds */ 24111da177e4SLinus Torvalds int nobh_truncate_page(struct address_space *mapping, loff_t from) 24121da177e4SLinus Torvalds { 24131da177e4SLinus Torvalds struct inode *inode = mapping->host; 24141da177e4SLinus Torvalds unsigned blocksize = 1 << inode->i_blkbits; 24151da177e4SLinus Torvalds pgoff_t index = from >> PAGE_CACHE_SHIFT; 24161da177e4SLinus Torvalds unsigned offset = from & (PAGE_CACHE_SIZE-1); 24171da177e4SLinus Torvalds unsigned to; 24181da177e4SLinus Torvalds struct page *page; 2419f5e54d6eSChristoph Hellwig const struct address_space_operations *a_ops = mapping->a_ops; 24201da177e4SLinus Torvalds int ret = 0; 24211da177e4SLinus Torvalds 24221da177e4SLinus Torvalds if ((offset & (blocksize - 1)) == 0) 24231da177e4SLinus Torvalds goto out; 24241da177e4SLinus Torvalds 24251da177e4SLinus Torvalds ret = -ENOMEM; 24261da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 24271da177e4SLinus Torvalds if (!page) 24281da177e4SLinus Torvalds goto out; 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds to = (offset + blocksize) & ~(blocksize - 1); 24311da177e4SLinus Torvalds ret = a_ops->prepare_write(NULL, page, offset, to); 24321da177e4SLinus Torvalds if (ret == 0) { 243301f2705dSNate Diller zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, 243401f2705dSNate Diller KM_USER0); 243557bf63d6SDave Kleikamp /* 243657bf63d6SDave Kleikamp * It would be more correct to call aops->commit_write() 243757bf63d6SDave Kleikamp * here, but this is more efficient. 243857bf63d6SDave Kleikamp */ 243957bf63d6SDave Kleikamp SetPageUptodate(page); 24401da177e4SLinus Torvalds set_page_dirty(page); 24411da177e4SLinus Torvalds } 24421da177e4SLinus Torvalds unlock_page(page); 24431da177e4SLinus Torvalds page_cache_release(page); 24441da177e4SLinus Torvalds out: 24451da177e4SLinus Torvalds return ret; 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page); 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 24501da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 24511da177e4SLinus Torvalds { 24521da177e4SLinus Torvalds pgoff_t index = from >> PAGE_CACHE_SHIFT; 24531da177e4SLinus Torvalds unsigned offset = from & (PAGE_CACHE_SIZE-1); 24541da177e4SLinus Torvalds unsigned blocksize; 245554b21a79SAndrew Morton sector_t iblock; 24561da177e4SLinus Torvalds unsigned length, pos; 24571da177e4SLinus Torvalds struct inode *inode = mapping->host; 24581da177e4SLinus Torvalds struct page *page; 24591da177e4SLinus Torvalds struct buffer_head *bh; 24601da177e4SLinus Torvalds int err; 24611da177e4SLinus Torvalds 24621da177e4SLinus Torvalds blocksize = 1 << inode->i_blkbits; 24631da177e4SLinus Torvalds length = offset & (blocksize - 1); 24641da177e4SLinus Torvalds 24651da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 24661da177e4SLinus Torvalds if (!length) 24671da177e4SLinus Torvalds return 0; 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds length = blocksize - length; 247054b21a79SAndrew Morton iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 24711da177e4SLinus Torvalds 24721da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 24731da177e4SLinus Torvalds err = -ENOMEM; 24741da177e4SLinus Torvalds if (!page) 24751da177e4SLinus Torvalds goto out; 24761da177e4SLinus Torvalds 24771da177e4SLinus Torvalds if (!page_has_buffers(page)) 24781da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 24791da177e4SLinus Torvalds 24801da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 24811da177e4SLinus Torvalds bh = page_buffers(page); 24821da177e4SLinus Torvalds pos = blocksize; 24831da177e4SLinus Torvalds while (offset >= pos) { 24841da177e4SLinus Torvalds bh = bh->b_this_page; 24851da177e4SLinus Torvalds iblock++; 24861da177e4SLinus Torvalds pos += blocksize; 24871da177e4SLinus Torvalds } 24881da177e4SLinus Torvalds 24891da177e4SLinus Torvalds err = 0; 24901da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2491b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 24921da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 24931da177e4SLinus Torvalds if (err) 24941da177e4SLinus Torvalds goto unlock; 24951da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 24961da177e4SLinus Torvalds if (!buffer_mapped(bh)) 24971da177e4SLinus Torvalds goto unlock; 24981da177e4SLinus Torvalds } 24991da177e4SLinus Torvalds 25001da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 25011da177e4SLinus Torvalds if (PageUptodate(page)) 25021da177e4SLinus Torvalds set_buffer_uptodate(bh); 25031da177e4SLinus Torvalds 250433a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 25051da177e4SLinus Torvalds err = -EIO; 25061da177e4SLinus Torvalds ll_rw_block(READ, 1, &bh); 25071da177e4SLinus Torvalds wait_on_buffer(bh); 25081da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 25091da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 25101da177e4SLinus Torvalds goto unlock; 25111da177e4SLinus Torvalds } 25121da177e4SLinus Torvalds 251301f2705dSNate Diller zero_user_page(page, offset, length, KM_USER0); 25141da177e4SLinus Torvalds mark_buffer_dirty(bh); 25151da177e4SLinus Torvalds err = 0; 25161da177e4SLinus Torvalds 25171da177e4SLinus Torvalds unlock: 25181da177e4SLinus Torvalds unlock_page(page); 25191da177e4SLinus Torvalds page_cache_release(page); 25201da177e4SLinus Torvalds out: 25211da177e4SLinus Torvalds return err; 25221da177e4SLinus Torvalds } 25231da177e4SLinus Torvalds 25241da177e4SLinus Torvalds /* 25251da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 25261da177e4SLinus Torvalds */ 25271da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block, 25281da177e4SLinus Torvalds struct writeback_control *wbc) 25291da177e4SLinus Torvalds { 25301da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 25311da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 25321da177e4SLinus Torvalds const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 25331da177e4SLinus Torvalds unsigned offset; 25341da177e4SLinus Torvalds 25351da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 25361da177e4SLinus Torvalds if (page->index < end_index) 25371da177e4SLinus Torvalds return __block_write_full_page(inode, page, get_block, wbc); 25381da177e4SLinus Torvalds 25391da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 25401da177e4SLinus Torvalds offset = i_size & (PAGE_CACHE_SIZE-1); 25411da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 25421da177e4SLinus Torvalds /* 25431da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. For example, 25441da177e4SLinus Torvalds * they may have been added in ext3_writepage(). Make them 25451da177e4SLinus Torvalds * freeable here, so the page does not leak. 25461da177e4SLinus Torvalds */ 2547aaa4059bSJan Kara do_invalidatepage(page, 0); 25481da177e4SLinus Torvalds unlock_page(page); 25491da177e4SLinus Torvalds return 0; /* don't care */ 25501da177e4SLinus Torvalds } 25511da177e4SLinus Torvalds 25521da177e4SLinus Torvalds /* 25531da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 25541da177e4SLinus Torvalds * writepage invokation because it may be mmapped. "A file is mapped 25551da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 25561da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 25571da177e4SLinus Torvalds * writes to that region are not written out to the file." 25581da177e4SLinus Torvalds */ 255901f2705dSNate Diller zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); 25601da177e4SLinus Torvalds return __block_write_full_page(inode, page, get_block, wbc); 25611da177e4SLinus Torvalds } 25621da177e4SLinus Torvalds 25631da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 25641da177e4SLinus Torvalds get_block_t *get_block) 25651da177e4SLinus Torvalds { 25661da177e4SLinus Torvalds struct buffer_head tmp; 25671da177e4SLinus Torvalds struct inode *inode = mapping->host; 25681da177e4SLinus Torvalds tmp.b_state = 0; 25691da177e4SLinus Torvalds tmp.b_blocknr = 0; 2570b0cf2321SBadari Pulavarty tmp.b_size = 1 << inode->i_blkbits; 25711da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 25721da177e4SLinus Torvalds return tmp.b_blocknr; 25731da177e4SLinus Torvalds } 25741da177e4SLinus Torvalds 25751da177e4SLinus Torvalds static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err) 25761da177e4SLinus Torvalds { 25771da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 25781da177e4SLinus Torvalds 25791da177e4SLinus Torvalds if (bio->bi_size) 25801da177e4SLinus Torvalds return 1; 25811da177e4SLinus Torvalds 25821da177e4SLinus Torvalds if (err == -EOPNOTSUPP) { 25831da177e4SLinus Torvalds set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 25841da177e4SLinus Torvalds set_bit(BH_Eopnotsupp, &bh->b_state); 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds 25871da177e4SLinus Torvalds bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 25881da177e4SLinus Torvalds bio_put(bio); 25891da177e4SLinus Torvalds return 0; 25901da177e4SLinus Torvalds } 25911da177e4SLinus Torvalds 25921da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh) 25931da177e4SLinus Torvalds { 25941da177e4SLinus Torvalds struct bio *bio; 25951da177e4SLinus Torvalds int ret = 0; 25961da177e4SLinus Torvalds 25971da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 25981da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 25991da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 26001da177e4SLinus Torvalds 26011da177e4SLinus Torvalds if (buffer_ordered(bh) && (rw == WRITE)) 26021da177e4SLinus Torvalds rw = WRITE_BARRIER; 26031da177e4SLinus Torvalds 26041da177e4SLinus Torvalds /* 26051da177e4SLinus Torvalds * Only clear out a write error when rewriting, should this 26061da177e4SLinus Torvalds * include WRITE_SYNC as well? 26071da177e4SLinus Torvalds */ 26081da177e4SLinus Torvalds if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER)) 26091da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 26101da177e4SLinus Torvalds 26111da177e4SLinus Torvalds /* 26121da177e4SLinus Torvalds * from here on down, it's all bio -- do the initial mapping, 26131da177e4SLinus Torvalds * submit_bio -> generic_make_request may further map this bio around 26141da177e4SLinus Torvalds */ 26151da177e4SLinus Torvalds bio = bio_alloc(GFP_NOIO, 1); 26161da177e4SLinus Torvalds 26171da177e4SLinus Torvalds bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 26181da177e4SLinus Torvalds bio->bi_bdev = bh->b_bdev; 26191da177e4SLinus Torvalds bio->bi_io_vec[0].bv_page = bh->b_page; 26201da177e4SLinus Torvalds bio->bi_io_vec[0].bv_len = bh->b_size; 26211da177e4SLinus Torvalds bio->bi_io_vec[0].bv_offset = bh_offset(bh); 26221da177e4SLinus Torvalds 26231da177e4SLinus Torvalds bio->bi_vcnt = 1; 26241da177e4SLinus Torvalds bio->bi_idx = 0; 26251da177e4SLinus Torvalds bio->bi_size = bh->b_size; 26261da177e4SLinus Torvalds 26271da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 26281da177e4SLinus Torvalds bio->bi_private = bh; 26291da177e4SLinus Torvalds 26301da177e4SLinus Torvalds bio_get(bio); 26311da177e4SLinus Torvalds submit_bio(rw, bio); 26321da177e4SLinus Torvalds 26331da177e4SLinus Torvalds if (bio_flagged(bio, BIO_EOPNOTSUPP)) 26341da177e4SLinus Torvalds ret = -EOPNOTSUPP; 26351da177e4SLinus Torvalds 26361da177e4SLinus Torvalds bio_put(bio); 26371da177e4SLinus Torvalds return ret; 26381da177e4SLinus Torvalds } 26391da177e4SLinus Torvalds 26401da177e4SLinus Torvalds /** 26411da177e4SLinus Torvalds * ll_rw_block: low-level access to block devices (DEPRECATED) 2642a7662236SJan Kara * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) 26431da177e4SLinus Torvalds * @nr: number of &struct buffer_heads in the array 26441da177e4SLinus Torvalds * @bhs: array of pointers to &struct buffer_head 26451da177e4SLinus Torvalds * 2646a7662236SJan Kara * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 2647a7662236SJan Kara * requests an I/O operation on them, either a %READ or a %WRITE. The third 2648a7662236SJan Kara * %SWRITE is like %WRITE only we make sure that the *current* data in buffers 2649a7662236SJan Kara * are sent to disk. The fourth %READA option is described in the documentation 2650a7662236SJan Kara * for generic_make_request() which ll_rw_block() calls. 26511da177e4SLinus Torvalds * 26521da177e4SLinus Torvalds * This function drops any buffer that it cannot get a lock on (with the 2653a7662236SJan Kara * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be 2654a7662236SJan Kara * clean when doing a write request, and any buffer that appears to be 2655a7662236SJan Kara * up-to-date when doing read request. Further it marks as clean buffers that 2656a7662236SJan Kara * are processed for writing (the buffer cache won't assume that they are 2657a7662236SJan Kara * actually clean until the buffer gets unlocked). 26581da177e4SLinus Torvalds * 26591da177e4SLinus Torvalds * ll_rw_block sets b_end_io to simple completion handler that marks 26601da177e4SLinus Torvalds * the buffer up-to-date (if approriate), unlocks the buffer and wakes 26611da177e4SLinus Torvalds * any waiters. 26621da177e4SLinus Torvalds * 26631da177e4SLinus Torvalds * All of the buffers must be for the same device, and must also be a 26641da177e4SLinus Torvalds * multiple of the current approved size for the device. 26651da177e4SLinus Torvalds */ 26661da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 26671da177e4SLinus Torvalds { 26681da177e4SLinus Torvalds int i; 26691da177e4SLinus Torvalds 26701da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 26711da177e4SLinus Torvalds struct buffer_head *bh = bhs[i]; 26721da177e4SLinus Torvalds 2673a7662236SJan Kara if (rw == SWRITE) 2674a7662236SJan Kara lock_buffer(bh); 2675a7662236SJan Kara else if (test_set_buffer_locked(bh)) 26761da177e4SLinus Torvalds continue; 26771da177e4SLinus Torvalds 2678a7662236SJan Kara if (rw == WRITE || rw == SWRITE) { 26791da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 268076c3073aSakpm@osdl.org bh->b_end_io = end_buffer_write_sync; 2681e60e5c50SOGAWA Hirofumi get_bh(bh); 26821da177e4SLinus Torvalds submit_bh(WRITE, bh); 26831da177e4SLinus Torvalds continue; 26841da177e4SLinus Torvalds } 26851da177e4SLinus Torvalds } else { 26861da177e4SLinus Torvalds if (!buffer_uptodate(bh)) { 268776c3073aSakpm@osdl.org bh->b_end_io = end_buffer_read_sync; 2688e60e5c50SOGAWA Hirofumi get_bh(bh); 26891da177e4SLinus Torvalds submit_bh(rw, bh); 26901da177e4SLinus Torvalds continue; 26911da177e4SLinus Torvalds } 26921da177e4SLinus Torvalds } 26931da177e4SLinus Torvalds unlock_buffer(bh); 26941da177e4SLinus Torvalds } 26951da177e4SLinus Torvalds } 26961da177e4SLinus Torvalds 26971da177e4SLinus Torvalds /* 26981da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 26991da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 27001da177e4SLinus Torvalds * the buffer_head. 27011da177e4SLinus Torvalds */ 27021da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh) 27031da177e4SLinus Torvalds { 27041da177e4SLinus Torvalds int ret = 0; 27051da177e4SLinus Torvalds 27061da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 27071da177e4SLinus Torvalds lock_buffer(bh); 27081da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 27091da177e4SLinus Torvalds get_bh(bh); 27101da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 27111da177e4SLinus Torvalds ret = submit_bh(WRITE, bh); 27121da177e4SLinus Torvalds wait_on_buffer(bh); 27131da177e4SLinus Torvalds if (buffer_eopnotsupp(bh)) { 27141da177e4SLinus Torvalds clear_buffer_eopnotsupp(bh); 27151da177e4SLinus Torvalds ret = -EOPNOTSUPP; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds if (!ret && !buffer_uptodate(bh)) 27181da177e4SLinus Torvalds ret = -EIO; 27191da177e4SLinus Torvalds } else { 27201da177e4SLinus Torvalds unlock_buffer(bh); 27211da177e4SLinus Torvalds } 27221da177e4SLinus Torvalds return ret; 27231da177e4SLinus Torvalds } 27241da177e4SLinus Torvalds 27251da177e4SLinus Torvalds /* 27261da177e4SLinus Torvalds * try_to_free_buffers() checks if all the buffers on this particular page 27271da177e4SLinus Torvalds * are unused, and releases them if so. 27281da177e4SLinus Torvalds * 27291da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either 27301da177e4SLinus Torvalds * locking the page or by holding its mapping's private_lock. 27311da177e4SLinus Torvalds * 27321da177e4SLinus Torvalds * If the page is dirty but all the buffers are clean then we need to 27331da177e4SLinus Torvalds * be sure to mark the page clean as well. This is because the page 27341da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers 27351da177e4SLinus Torvalds * to a dirty page will set *all* buffers dirty. Which would corrupt 27361da177e4SLinus Torvalds * filesystem data on the same device. 27371da177e4SLinus Torvalds * 27381da177e4SLinus Torvalds * The same applies to regular filesystem pages: if all the buffers are 27391da177e4SLinus Torvalds * clean then we set the page clean and proceed. To do that, we require 27401da177e4SLinus Torvalds * total exclusion from __set_page_dirty_buffers(). That is obtained with 27411da177e4SLinus Torvalds * private_lock. 27421da177e4SLinus Torvalds * 27431da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking. 27441da177e4SLinus Torvalds */ 27451da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 27461da177e4SLinus Torvalds { 27471da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 27481da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 27491da177e4SLinus Torvalds } 27501da177e4SLinus Torvalds 27511da177e4SLinus Torvalds static int 27521da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 27531da177e4SLinus Torvalds { 27541da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 27551da177e4SLinus Torvalds struct buffer_head *bh; 27561da177e4SLinus Torvalds 27571da177e4SLinus Torvalds bh = head; 27581da177e4SLinus Torvalds do { 2759de7d5a3bSakpm@osdl.org if (buffer_write_io_error(bh) && page->mapping) 27601da177e4SLinus Torvalds set_bit(AS_EIO, &page->mapping->flags); 27611da177e4SLinus Torvalds if (buffer_busy(bh)) 27621da177e4SLinus Torvalds goto failed; 27631da177e4SLinus Torvalds bh = bh->b_this_page; 27641da177e4SLinus Torvalds } while (bh != head); 27651da177e4SLinus Torvalds 27661da177e4SLinus Torvalds do { 27671da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 27681da177e4SLinus Torvalds 27691da177e4SLinus Torvalds if (!list_empty(&bh->b_assoc_buffers)) 27701da177e4SLinus Torvalds __remove_assoc_queue(bh); 27711da177e4SLinus Torvalds bh = next; 27721da177e4SLinus Torvalds } while (bh != head); 27731da177e4SLinus Torvalds *buffers_to_free = head; 27741da177e4SLinus Torvalds __clear_page_buffers(page); 27751da177e4SLinus Torvalds return 1; 27761da177e4SLinus Torvalds failed: 27771da177e4SLinus Torvalds return 0; 27781da177e4SLinus Torvalds } 27791da177e4SLinus Torvalds 27801da177e4SLinus Torvalds int try_to_free_buffers(struct page *page) 27811da177e4SLinus Torvalds { 27821da177e4SLinus Torvalds struct address_space * const mapping = page->mapping; 27831da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 27841da177e4SLinus Torvalds int ret = 0; 27851da177e4SLinus Torvalds 27861da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 2787ecdfc978SLinus Torvalds if (PageWriteback(page)) 27881da177e4SLinus Torvalds return 0; 27891da177e4SLinus Torvalds 27901da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 27911da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 27921da177e4SLinus Torvalds goto out; 27931da177e4SLinus Torvalds } 27941da177e4SLinus Torvalds 27951da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 27961da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 2797ecdfc978SLinus Torvalds 2798ecdfc978SLinus Torvalds /* 2799ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 2800ecdfc978SLinus Torvalds * then we can have clean buffers against a dirty page. We 2801ecdfc978SLinus Torvalds * clean the page here; otherwise the VM will never notice 2802ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 2803ecdfc978SLinus Torvalds * 2804ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 2805ecdfc978SLinus Torvalds * the page's buffers clean. We discover that here and clean 2806ecdfc978SLinus Torvalds * the page also. 280787df7241SNick Piggin * 280887df7241SNick Piggin * private_lock must be held over this entire operation in order 280987df7241SNick Piggin * to synchronise against __set_page_dirty_buffers and prevent the 281087df7241SNick Piggin * dirty bit from being lost. 2811ecdfc978SLinus Torvalds */ 2812ecdfc978SLinus Torvalds if (ret) 2813ecdfc978SLinus Torvalds cancel_dirty_page(page, PAGE_CACHE_SIZE); 281487df7241SNick Piggin spin_unlock(&mapping->private_lock); 28151da177e4SLinus Torvalds out: 28161da177e4SLinus Torvalds if (buffers_to_free) { 28171da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 28181da177e4SLinus Torvalds 28191da177e4SLinus Torvalds do { 28201da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 28211da177e4SLinus Torvalds free_buffer_head(bh); 28221da177e4SLinus Torvalds bh = next; 28231da177e4SLinus Torvalds } while (bh != buffers_to_free); 28241da177e4SLinus Torvalds } 28251da177e4SLinus Torvalds return ret; 28261da177e4SLinus Torvalds } 28271da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 28281da177e4SLinus Torvalds 28293978d717SNeilBrown void block_sync_page(struct page *page) 28301da177e4SLinus Torvalds { 28311da177e4SLinus Torvalds struct address_space *mapping; 28321da177e4SLinus Torvalds 28331da177e4SLinus Torvalds smp_mb(); 28341da177e4SLinus Torvalds mapping = page_mapping(page); 28351da177e4SLinus Torvalds if (mapping) 28361da177e4SLinus Torvalds blk_run_backing_dev(mapping->backing_dev_info, page); 28371da177e4SLinus Torvalds } 28381da177e4SLinus Torvalds 28391da177e4SLinus Torvalds /* 28401da177e4SLinus Torvalds * There are no bdflush tunables left. But distributions are 28411da177e4SLinus Torvalds * still running obsolete flush daemons, so we terminate them here. 28421da177e4SLinus Torvalds * 28431da177e4SLinus Torvalds * Use of bdflush() is deprecated and will be removed in a future kernel. 28441da177e4SLinus Torvalds * The `pdflush' kernel threads fully replace bdflush daemons and this call. 28451da177e4SLinus Torvalds */ 28461da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data) 28471da177e4SLinus Torvalds { 28481da177e4SLinus Torvalds static int msg_count; 28491da177e4SLinus Torvalds 28501da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 28511da177e4SLinus Torvalds return -EPERM; 28521da177e4SLinus Torvalds 28531da177e4SLinus Torvalds if (msg_count < 5) { 28541da177e4SLinus Torvalds msg_count++; 28551da177e4SLinus Torvalds printk(KERN_INFO 28561da177e4SLinus Torvalds "warning: process `%s' used the obsolete bdflush" 28571da177e4SLinus Torvalds " system call\n", current->comm); 28581da177e4SLinus Torvalds printk(KERN_INFO "Fix your initscripts?\n"); 28591da177e4SLinus Torvalds } 28601da177e4SLinus Torvalds 28611da177e4SLinus Torvalds if (func == 1) 28621da177e4SLinus Torvalds do_exit(0); 28631da177e4SLinus Torvalds return 0; 28641da177e4SLinus Torvalds } 28651da177e4SLinus Torvalds 28661da177e4SLinus Torvalds /* 28671da177e4SLinus Torvalds * Buffer-head allocation 28681da177e4SLinus Torvalds */ 2869e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep; 28701da177e4SLinus Torvalds 28711da177e4SLinus Torvalds /* 28721da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 28731da177e4SLinus Torvalds * stripping them in writeback. 28741da177e4SLinus Torvalds */ 28751da177e4SLinus Torvalds static int max_buffer_heads; 28761da177e4SLinus Torvalds 28771da177e4SLinus Torvalds int buffer_heads_over_limit; 28781da177e4SLinus Torvalds 28791da177e4SLinus Torvalds struct bh_accounting { 28801da177e4SLinus Torvalds int nr; /* Number of live bh's */ 28811da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 28821da177e4SLinus Torvalds }; 28831da177e4SLinus Torvalds 28841da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 28851da177e4SLinus Torvalds 28861da177e4SLinus Torvalds static void recalc_bh_state(void) 28871da177e4SLinus Torvalds { 28881da177e4SLinus Torvalds int i; 28891da177e4SLinus Torvalds int tot = 0; 28901da177e4SLinus Torvalds 28911da177e4SLinus Torvalds if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) 28921da177e4SLinus Torvalds return; 28931da177e4SLinus Torvalds __get_cpu_var(bh_accounting).ratelimit = 0; 28948a143426SEric Dumazet for_each_online_cpu(i) 28951da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 28961da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 28971da177e4SLinus Torvalds } 28981da177e4SLinus Torvalds 2899dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 29001da177e4SLinus Torvalds { 2901*a35afb83SChristoph Lameter struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 29021da177e4SLinus Torvalds if (ret) { 2903*a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 2904736c7b80SCoywolf Qi Hunt get_cpu_var(bh_accounting).nr++; 29051da177e4SLinus Torvalds recalc_bh_state(); 2906736c7b80SCoywolf Qi Hunt put_cpu_var(bh_accounting); 29071da177e4SLinus Torvalds } 29081da177e4SLinus Torvalds return ret; 29091da177e4SLinus Torvalds } 29101da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 29111da177e4SLinus Torvalds 29121da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 29131da177e4SLinus Torvalds { 29141da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 29151da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 2916736c7b80SCoywolf Qi Hunt get_cpu_var(bh_accounting).nr--; 29171da177e4SLinus Torvalds recalc_bh_state(); 2918736c7b80SCoywolf Qi Hunt put_cpu_var(bh_accounting); 29191da177e4SLinus Torvalds } 29201da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 29211da177e4SLinus Torvalds 29221da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu) 29231da177e4SLinus Torvalds { 29241da177e4SLinus Torvalds int i; 29251da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 29261da177e4SLinus Torvalds 29271da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 29281da177e4SLinus Torvalds brelse(b->bhs[i]); 29291da177e4SLinus Torvalds b->bhs[i] = NULL; 29301da177e4SLinus Torvalds } 29318a143426SEric Dumazet get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; 29328a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 29338a143426SEric Dumazet put_cpu_var(bh_accounting); 29341da177e4SLinus Torvalds } 29351da177e4SLinus Torvalds 29361da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self, 29371da177e4SLinus Torvalds unsigned long action, void *hcpu) 29381da177e4SLinus Torvalds { 29398bb78442SRafael J. Wysocki if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 29401da177e4SLinus Torvalds buffer_exit_cpu((unsigned long)hcpu); 29411da177e4SLinus Torvalds return NOTIFY_OK; 29421da177e4SLinus Torvalds } 29431da177e4SLinus Torvalds 29441da177e4SLinus Torvalds void __init buffer_init(void) 29451da177e4SLinus Torvalds { 29461da177e4SLinus Torvalds int nrpages; 29471da177e4SLinus Torvalds 2948*a35afb83SChristoph Lameter bh_cachep = KMEM_CACHE(buffer_head, 2949*a35afb83SChristoph Lameter SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 29501da177e4SLinus Torvalds 29511da177e4SLinus Torvalds /* 29521da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 29531da177e4SLinus Torvalds */ 29541da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 29551da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 29561da177e4SLinus Torvalds hotcpu_notifier(buffer_cpu_notify, 0); 29571da177e4SLinus Torvalds } 29581da177e4SLinus Torvalds 29591da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget); 29601da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse); 29611da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer); 29621da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write); 29631da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write); 29641da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page); 29651da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page); 29661da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page); 29671da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page); 29681da177e4SLinus Torvalds EXPORT_SYMBOL(cont_prepare_write); 29691da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync); 29701da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync); 29711da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync); 29721da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev); 29731da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap); 29741da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write); 29751da177e4SLinus Torvalds EXPORT_SYMBOL(generic_cont_expand); 297605eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple); 29771da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer); 29781da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev); 29791da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block); 29801da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty); 29811da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh); 29821da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer); 29831da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer); 2984