buffer.c (ea13a86463fd0c26c2c209c53dc46b8eff81bad4) | buffer.c (c515e1fd361c2a08a9c2eb139396ec30a4f477dc) |
---|---|
1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 27 unchanged lines hidden (view full) --- 36#include <linux/buffer_head.h> 37#include <linux/task_io_accounting_ops.h> 38#include <linux/bio.h> 39#include <linux/notifier.h> 40#include <linux/cpu.h> 41#include <linux/bitops.h> 42#include <linux/mpage.h> 43#include <linux/bit_spinlock.h> | 1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 27 unchanged lines hidden (view full) --- 36#include <linux/buffer_head.h> 37#include <linux/task_io_accounting_ops.h> 38#include <linux/bio.h> 39#include <linux/notifier.h> 40#include <linux/cpu.h> 41#include <linux/bitops.h> 42#include <linux/mpage.h> 43#include <linux/bit_spinlock.h> |
44#include <linux/cleancache.h> |
|
44 45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 46 47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 48 49inline void 50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 51{ --- 212 unchanged lines hidden (view full) --- 264 struct address_space *mapping = bdev->bd_inode->i_mapping; 265 266 if (mapping->nrpages == 0) 267 return; 268 269 invalidate_bh_lrus(); 270 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 271 invalidate_mapping_pages(mapping, 0, -1); | 45 46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 47 48#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 49 50inline void 51init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 52{ --- 212 unchanged lines hidden (view full) --- 265 struct address_space *mapping = bdev->bd_inode->i_mapping; 266 267 if (mapping->nrpages == 0) 268 return; 269 270 invalidate_bh_lrus(); 271 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 272 invalidate_mapping_pages(mapping, 0, -1); |
273 /* 99% of the time, we don't need to flush the cleancache on the bdev. 274 * But, for the strange corners, lets be cautious 275 */ 276 cleancache_flush_inode(mapping); |
|
272} 273EXPORT_SYMBOL(invalidate_bdev); 274 275/* 276 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 277 */ 278static void free_more_memory(void) 279{ --- 2046 unchanged lines hidden (view full) --- 2326 * support these features. 2327 * 2328 * We are not allowed to take the i_mutex here so we have to play games to 2329 * protect against truncate races as the page could now be beyond EOF. Because 2330 * truncate writes the inode size before removing pages, once we have the 2331 * page lock we can determine safely if the page is beyond EOF. If it is not 2332 * beyond EOF, then the page is guaranteed safe against truncation until we 2333 * unlock the page. | 277} 278EXPORT_SYMBOL(invalidate_bdev); 279 280/* 281 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 282 */ 283static void free_more_memory(void) 284{ --- 2046 unchanged lines hidden (view full) --- 2331 * support these features. 2332 * 2333 * We are not allowed to take the i_mutex here so we have to play games to 2334 * protect against truncate races as the page could now be beyond EOF. Because 2335 * truncate writes the inode size before removing pages, once we have the 2336 * page lock we can determine safely if the page is beyond EOF. If it is not 2337 * beyond EOF, then the page is guaranteed safe against truncation until we 2338 * unlock the page. |
2334 * 2335 * Direct callers of this function should call vfs_check_frozen() so that page 2336 * fault does not busyloop until the fs is thawed. | |
2337 */ | 2339 */ |
2338int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2339 get_block_t get_block) | 2340int 2341block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2342 get_block_t get_block) |
2340{ 2341 struct page *page = vmf->page; 2342 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2343 unsigned long end; 2344 loff_t size; | 2343{ 2344 struct page *page = vmf->page; 2345 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2346 unsigned long end; 2347 loff_t size; |
2345 int ret; | 2348 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ |
2346 2347 lock_page(page); 2348 size = i_size_read(inode); 2349 if ((page->mapping != inode->i_mapping) || 2350 (page_offset(page) > size)) { | 2349 2350 lock_page(page); 2351 size = i_size_read(inode); 2352 if ((page->mapping != inode->i_mapping) || 2353 (page_offset(page) > size)) { |
2351 /* We overload EFAULT to mean page got truncated */ 2352 ret = -EFAULT; 2353 goto out_unlock; | 2354 /* page got truncated out from underneath us */ 2355 unlock_page(page); 2356 goto out; |
2354 } 2355 2356 /* page is wholly or partially inside EOF */ 2357 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2358 end = size & ~PAGE_CACHE_MASK; 2359 else 2360 end = PAGE_CACHE_SIZE; 2361 2362 ret = __block_write_begin(page, 0, end, get_block); 2363 if (!ret) 2364 ret = block_commit_write(page, 0, end); 2365 | 2357 } 2358 2359 /* page is wholly or partially inside EOF */ 2360 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) 2361 end = size & ~PAGE_CACHE_MASK; 2362 else 2363 end = PAGE_CACHE_SIZE; 2364 2365 ret = __block_write_begin(page, 0, end, get_block); 2366 if (!ret) 2367 ret = block_commit_write(page, 0, end); 2368 |
2366 if (unlikely(ret < 0)) 2367 goto out_unlock; 2368 /* 2369 * Freezing in progress? We check after the page is marked dirty and 2370 * with page lock held so if the test here fails, we are sure freezing 2371 * code will wait during syncing until the page fault is done - at that 2372 * point page will be dirty and unlocked so freezing code will write it 2373 * and writeprotect it again. 2374 */ 2375 set_page_dirty(page); 2376 if (inode->i_sb->s_frozen != SB_UNFROZEN) { 2377 ret = -EAGAIN; 2378 goto out_unlock; 2379 } 2380 return 0; 2381out_unlock: 2382 unlock_page(page); | 2369 if (unlikely(ret)) { 2370 unlock_page(page); 2371 if (ret == -ENOMEM) 2372 ret = VM_FAULT_OOM; 2373 else /* -ENOSPC, -EIO, etc */ 2374 ret = VM_FAULT_SIGBUS; 2375 } else 2376 ret = VM_FAULT_LOCKED; 2377 2378out: |
2383 return ret; 2384} | 2379 return ret; 2380} |
2385EXPORT_SYMBOL(__block_page_mkwrite); 2386 2387int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2388 get_block_t get_block) 2389{ 2390 int ret; 2391 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; 2392 2393 /* 2394 * This check is racy but catches the common case. The check in 2395 * __block_page_mkwrite() is reliable. 2396 */ 2397 vfs_check_frozen(sb, SB_FREEZE_WRITE); 2398 ret = __block_page_mkwrite(vma, vmf, get_block); 2399 return block_page_mkwrite_return(ret); 2400} | |
2401EXPORT_SYMBOL(block_page_mkwrite); 2402 2403/* 2404 * nobh_write_begin()'s prereads are special: the buffer_heads are freed 2405 * immediately, while under the page lock. So it needs a special end_io 2406 * handler which does not touch the bh after unlocking it. 2407 */ 2408static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) --- 912 unchanged lines hidden --- | 2381EXPORT_SYMBOL(block_page_mkwrite); 2382 2383/* 2384 * nobh_write_begin()'s prereads are special: the buffer_heads are freed 2385 * immediately, while under the page lock. So it needs a special end_io 2386 * handler which does not touch the bh after unlocking it. 2387 */ 2388static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) --- 912 unchanged lines hidden --- |