extent_io.c (9601e3f6336f6ca66929f451b1f66085e68e36e3) | extent_io.c (b7967db75a38df4891b22efe1b0969b9357eb946) |
---|---|
1#include <linux/bitops.h> 2#include <linux/slab.h> 3#include <linux/bio.h> 4#include <linux/mm.h> 5#include <linux/gfp.h> 6#include <linux/pagemap.h> 7#include <linux/page-flags.h> 8#include <linux/module.h> --- 1387 unchanged lines hidden (view full) --- 1396 if (!node) 1397 break; 1398 } 1399out: 1400 spin_unlock(&tree->lock); 1401 return total_bytes; 1402} 1403 | 1#include <linux/bitops.h> 2#include <linux/slab.h> 3#include <linux/bio.h> 4#include <linux/mm.h> 5#include <linux/gfp.h> 6#include <linux/pagemap.h> 7#include <linux/page-flags.h> 8#include <linux/module.h> --- 1387 unchanged lines hidden (view full) --- 1396 if (!node) 1397 break; 1398 } 1399out: 1400 spin_unlock(&tree->lock); 1401 return total_bytes; 1402} 1403 |
1404#if 0 | |
1405/* | 1404/* |
1406 * helper function to lock both pages and extents in the tree. 1407 * pages must be locked first. 1408 */ 1409static int lock_range(struct extent_io_tree *tree, u64 start, u64 end) 1410{ 1411 unsigned long index = start >> PAGE_CACHE_SHIFT; 1412 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1413 struct page *page; 1414 int err; 1415 1416 while (index <= end_index) { 1417 page = grab_cache_page(tree->mapping, index); 1418 if (!page) { 1419 err = -ENOMEM; 1420 goto failed; 1421 } 1422 if (IS_ERR(page)) { 1423 err = PTR_ERR(page); 1424 goto failed; 1425 } 1426 index++; 1427 } 1428 lock_extent(tree, start, end, GFP_NOFS); 1429 return 0; 1430 1431failed: 1432 /* 1433 * we failed above in getting the page at 'index', so we undo here 1434 * up to but not including the page at 'index' 1435 */ 1436 end_index = index; 1437 index = start >> PAGE_CACHE_SHIFT; 1438 while (index < end_index) { 1439 page = find_get_page(tree->mapping, index); 1440 unlock_page(page); 1441 page_cache_release(page); 1442 index++; 1443 } 1444 return err; 1445} 1446 1447/* 1448 * helper function to unlock both pages and extents in the tree. 1449 */ 1450static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) 1451{ 1452 unsigned long index = start >> PAGE_CACHE_SHIFT; 1453 unsigned long end_index = end >> PAGE_CACHE_SHIFT; 1454 struct page *page; 1455 1456 while (index <= end_index) { 1457 page = find_get_page(tree->mapping, index); 1458 unlock_page(page); 1459 page_cache_release(page); 1460 index++; 1461 } 1462 unlock_extent(tree, start, end, GFP_NOFS); 1463 return 0; 1464} 1465#endif 1466 1467/* | |
1468 * set the private field for a given byte offset in the tree. If there isn't 1469 * an extent_state there already, this does nothing. 1470 */ 1471int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) 1472{ 1473 struct rb_node *node; 1474 struct extent_state *state; 1475 int ret = 0; --- 2321 unchanged lines hidden --- | 1405 * set the private field for a given byte offset in the tree. If there isn't 1406 * an extent_state there already, this does nothing. 1407 */ 1408int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) 1409{ 1410 struct rb_node *node; 1411 struct extent_state *state; 1412 int ret = 0; --- 2321 unchanged lines hidden --- |