1 /* 2 * fs/ext4/extents_status.c 3 * 4 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 5 * Modified by 6 * Allison Henderson <achender@linux.vnet.ibm.com> 7 * Hugh Dickins <hughd@google.com> 8 * Zheng Liu <wenqing.lz@taobao.com> 9 * 10 * Ext4 extents status tree core functions. 11 */ 12 #include <linux/rbtree.h> 13 #include <linux/list_sort.h> 14 #include "ext4.h" 15 #include "extents_status.h" 16 #include "ext4_extents.h" 17 18 #include <trace/events/ext4.h> 19 20 /* 21 * According to previous discussion in Ext4 Developer Workshop, we 22 * will introduce a new structure called io tree to track all extent 23 * status in order to solve some problems that we have met 24 * (e.g. Reservation space warning), and provide extent-level locking. 25 * Delay extent tree is the first step to achieve this goal. It is 26 * original built by Yongqiang Yang. At that time it is called delay 27 * extent tree, whose goal is only track delayed extents in memory to 28 * simplify the implementation of fiemap and bigalloc, and introduce 29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 30 * delay extent tree at the first commit. But for better understand 31 * what it does, it has been rename to extent status tree. 32 * 33 * Step1: 34 * Currently the first step has been done. All delayed extents are 35 * tracked in the tree. It maintains the delayed extent when a delayed 36 * allocation is issued, and the delayed extent is written out or 37 * invalidated. Therefore the implementation of fiemap and bigalloc 38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 39 * 40 * The following comment describes the implemenmtation of extent 41 * status tree and future works. 42 * 43 * Step2: 44 * In this step all extent status are tracked by extent status tree. 45 * Thus, we can first try to lookup a block mapping in this tree before 46 * finding it in extent tree. Hence, single extent cache can be removed 47 * because extent status tree can do a better job. Extents in status 48 * tree are loaded on-demand. Therefore, the extent status tree may not 49 * contain all of the extents in a file. Meanwhile we define a shrinker 50 * to reclaim memory from extent status tree because fragmented extent 51 * tree will make status tree cost too much memory. written/unwritten/- 52 * hole extents in the tree will be reclaimed by this shrinker when we 53 * are under high memory pressure. Delayed extents will not be 54 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 55 */ 56 57 /* 58 * Extent status tree implementation for ext4. 59 * 60 * 61 * ========================================================================== 62 * Extent status tree tracks all extent status. 63 * 64 * 1. Why we need to implement extent status tree? 65 * 66 * Without extent status tree, ext4 identifies a delayed extent by looking 67 * up page cache, this has several deficiencies - complicated, buggy, 68 * and inefficient code. 69 * 70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 71 * block or a range of blocks are belonged to a delayed extent. 72 * 73 * Let us have a look at how they do without extent status tree. 74 * -- FIEMAP 75 * FIEMAP looks up page cache to identify delayed allocations from holes. 76 * 77 * -- SEEK_HOLE/DATA 78 * SEEK_HOLE/DATA has the same problem as FIEMAP. 79 * 80 * -- bigalloc 81 * bigalloc looks up page cache to figure out if a block is 82 * already under delayed allocation or not to determine whether 83 * quota reserving is needed for the cluster. 84 * 85 * -- writeout 86 * Writeout looks up whole page cache to see if a buffer is 87 * mapped, If there are not very many delayed buffers, then it is 88 * time comsuming. 89 * 90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 91 * bigalloc and writeout can figure out if a block or a range of 92 * blocks is under delayed allocation(belonged to a delayed extent) or 93 * not by searching the extent tree. 94 * 95 * 96 * ========================================================================== 97 * 2. Ext4 extent status tree impelmentation 98 * 99 * -- extent 100 * A extent is a range of blocks which are contiguous logically and 101 * physically. Unlike extent in extent tree, this extent in ext4 is 102 * a in-memory struct, there is no corresponding on-disk data. There 103 * is no limit on length of extent, so an extent can contain as many 104 * blocks as they are contiguous logically and physically. 105 * 106 * -- extent status tree 107 * Every inode has an extent status tree and all allocation blocks 108 * are added to the tree with different status. The extent in the 109 * tree are ordered by logical block no. 110 * 111 * -- operations on a extent status tree 112 * There are three important operations on a delayed extent tree: find 113 * next extent, adding a extent(a range of blocks) and removing a extent. 114 * 115 * -- race on a extent status tree 116 * Extent status tree is protected by inode->i_es_lock. 117 * 118 * -- memory consumption 119 * Fragmented extent tree will make extent status tree cost too much 120 * memory. Hence, we will reclaim written/unwritten/hole extents from 121 * the tree under a heavy memory pressure. 122 * 123 * 124 * ========================================================================== 125 * 3. Performance analysis 126 * 127 * -- overhead 128 * 1. There is a cache extent for write access, so if writes are 129 * not very random, adding space operaions are in O(1) time. 130 * 131 * -- gain 132 * 2. Code is much simpler, more readable, more maintainable and 133 * more efficient. 134 * 135 * 136 * ========================================================================== 137 * 4. TODO list 138 * 139 * -- Refactor delayed space reservation 140 * 141 * -- Extent-level locking 142 */ 143 144 static struct kmem_cache *ext4_es_cachep; 145 146 static int __es_insert_extent(struct inode *inode, struct extent_status *newes); 147 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 148 ext4_lblk_t end); 149 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 150 int nr_to_scan); 151 152 int __init ext4_init_es(void) 153 { 154 ext4_es_cachep = kmem_cache_create("ext4_extent_status", 155 sizeof(struct extent_status), 156 0, (SLAB_RECLAIM_ACCOUNT), NULL); 157 if (ext4_es_cachep == NULL) 158 return -ENOMEM; 159 return 0; 160 } 161 162 void ext4_exit_es(void) 163 { 164 if (ext4_es_cachep) 165 kmem_cache_destroy(ext4_es_cachep); 166 } 167 168 void ext4_es_init_tree(struct ext4_es_tree *tree) 169 { 170 tree->root = RB_ROOT; 171 tree->cache_es = NULL; 172 } 173 174 #ifdef ES_DEBUG__ 175 static void ext4_es_print_tree(struct inode *inode) 176 { 177 struct ext4_es_tree *tree; 178 struct rb_node *node; 179 180 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 181 tree = &EXT4_I(inode)->i_es_tree; 182 node = rb_first(&tree->root); 183 while (node) { 184 struct extent_status *es; 185 es = rb_entry(node, struct extent_status, rb_node); 186 printk(KERN_DEBUG " [%u/%u) %llu %llx", 187 es->es_lblk, es->es_len, 188 ext4_es_pblock(es), ext4_es_status(es)); 189 node = rb_next(node); 190 } 191 printk(KERN_DEBUG "\n"); 192 } 193 #else 194 #define ext4_es_print_tree(inode) 195 #endif 196 197 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 198 { 199 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 200 return es->es_lblk + es->es_len - 1; 201 } 202 203 /* 204 * search through the tree for an delayed extent with a given offset. If 205 * it can't be found, try to find next extent. 206 */ 207 static struct extent_status *__es_tree_search(struct rb_root *root, 208 ext4_lblk_t lblk) 209 { 210 struct rb_node *node = root->rb_node; 211 struct extent_status *es = NULL; 212 213 while (node) { 214 es = rb_entry(node, struct extent_status, rb_node); 215 if (lblk < es->es_lblk) 216 node = node->rb_left; 217 else if (lblk > ext4_es_end(es)) 218 node = node->rb_right; 219 else 220 return es; 221 } 222 223 if (es && lblk < es->es_lblk) 224 return es; 225 226 if (es && lblk > ext4_es_end(es)) { 227 node = rb_next(&es->rb_node); 228 return node ? rb_entry(node, struct extent_status, rb_node) : 229 NULL; 230 } 231 232 return NULL; 233 } 234 235 /* 236 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering 237 * @es->lblk if it exists, otherwise, the next extent after @es->lblk. 238 * 239 * @inode: the inode which owns delayed extents 240 * @lblk: the offset where we start to search 241 * @end: the offset where we stop to search 242 * @es: delayed extent that we found 243 */ 244 void ext4_es_find_delayed_extent_range(struct inode *inode, 245 ext4_lblk_t lblk, ext4_lblk_t end, 246 struct extent_status *es) 247 { 248 struct ext4_es_tree *tree = NULL; 249 struct extent_status *es1 = NULL; 250 struct rb_node *node; 251 252 BUG_ON(es == NULL); 253 BUG_ON(end < lblk); 254 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk); 255 256 read_lock(&EXT4_I(inode)->i_es_lock); 257 tree = &EXT4_I(inode)->i_es_tree; 258 259 /* find extent in cache firstly */ 260 es->es_lblk = es->es_len = es->es_pblk = 0; 261 if (tree->cache_es) { 262 es1 = tree->cache_es; 263 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 264 es_debug("%u cached by [%u/%u) %llu %llx\n", 265 lblk, es1->es_lblk, es1->es_len, 266 ext4_es_pblock(es1), ext4_es_status(es1)); 267 goto out; 268 } 269 } 270 271 es1 = __es_tree_search(&tree->root, lblk); 272 273 out: 274 if (es1 && !ext4_es_is_delayed(es1)) { 275 while ((node = rb_next(&es1->rb_node)) != NULL) { 276 es1 = rb_entry(node, struct extent_status, rb_node); 277 if (es1->es_lblk > end) { 278 es1 = NULL; 279 break; 280 } 281 if (ext4_es_is_delayed(es1)) 282 break; 283 } 284 } 285 286 if (es1 && ext4_es_is_delayed(es1)) { 287 tree->cache_es = es1; 288 es->es_lblk = es1->es_lblk; 289 es->es_len = es1->es_len; 290 es->es_pblk = es1->es_pblk; 291 } 292 293 read_unlock(&EXT4_I(inode)->i_es_lock); 294 295 trace_ext4_es_find_delayed_extent_range_exit(inode, es); 296 } 297 298 static struct extent_status * 299 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, 300 ext4_fsblk_t pblk) 301 { 302 struct extent_status *es; 303 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 304 if (es == NULL) 305 return NULL; 306 es->es_lblk = lblk; 307 es->es_len = len; 308 es->es_pblk = pblk; 309 310 /* 311 * We don't count delayed extent because we never try to reclaim them 312 */ 313 if (!ext4_es_is_delayed(es)) { 314 EXT4_I(inode)->i_es_lru_nr++; 315 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 316 } 317 318 return es; 319 } 320 321 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 322 { 323 /* Decrease the lru counter when this es is not delayed */ 324 if (!ext4_es_is_delayed(es)) { 325 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); 326 EXT4_I(inode)->i_es_lru_nr--; 327 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 328 } 329 330 kmem_cache_free(ext4_es_cachep, es); 331 } 332 333 /* 334 * Check whether or not two extents can be merged 335 * Condition: 336 * - logical block number is contiguous 337 * - physical block number is contiguous 338 * - status is equal 339 */ 340 static int ext4_es_can_be_merged(struct extent_status *es1, 341 struct extent_status *es2) 342 { 343 if (ext4_es_status(es1) != ext4_es_status(es2)) 344 return 0; 345 346 if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) 347 return 0; 348 349 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 350 return 0; 351 352 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 353 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 354 return 1; 355 356 if (ext4_es_is_hole(es1)) 357 return 1; 358 359 /* we need to check delayed extent is without unwritten status */ 360 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 361 return 1; 362 363 return 0; 364 } 365 366 static struct extent_status * 367 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 368 { 369 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 370 struct extent_status *es1; 371 struct rb_node *node; 372 373 node = rb_prev(&es->rb_node); 374 if (!node) 375 return es; 376 377 es1 = rb_entry(node, struct extent_status, rb_node); 378 if (ext4_es_can_be_merged(es1, es)) { 379 es1->es_len += es->es_len; 380 rb_erase(&es->rb_node, &tree->root); 381 ext4_es_free_extent(inode, es); 382 es = es1; 383 } 384 385 return es; 386 } 387 388 static struct extent_status * 389 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 390 { 391 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 392 struct extent_status *es1; 393 struct rb_node *node; 394 395 node = rb_next(&es->rb_node); 396 if (!node) 397 return es; 398 399 es1 = rb_entry(node, struct extent_status, rb_node); 400 if (ext4_es_can_be_merged(es, es1)) { 401 es->es_len += es1->es_len; 402 rb_erase(node, &tree->root); 403 ext4_es_free_extent(inode, es1); 404 } 405 406 return es; 407 } 408 409 #ifdef ES_AGGRESSIVE_TEST 410 static void ext4_es_insert_extent_ext_check(struct inode *inode, 411 struct extent_status *es) 412 { 413 struct ext4_ext_path *path = NULL; 414 struct ext4_extent *ex; 415 ext4_lblk_t ee_block; 416 ext4_fsblk_t ee_start; 417 unsigned short ee_len; 418 int depth, ee_status, es_status; 419 420 path = ext4_ext_find_extent(inode, es->es_lblk, NULL); 421 if (IS_ERR(path)) 422 return; 423 424 depth = ext_depth(inode); 425 ex = path[depth].p_ext; 426 427 if (ex) { 428 429 ee_block = le32_to_cpu(ex->ee_block); 430 ee_start = ext4_ext_pblock(ex); 431 ee_len = ext4_ext_get_actual_len(ex); 432 433 ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; 434 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 435 436 /* 437 * Make sure ex and es are not overlap when we try to insert 438 * a delayed/hole extent. 439 */ 440 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 441 if (in_range(es->es_lblk, ee_block, ee_len)) { 442 pr_warn("ES insert assertion failed for " 443 "inode: %lu we can find an extent " 444 "at block [%d/%d/%llu/%c], but we " 445 "want to add an delayed/hole extent " 446 "[%d/%d/%llu/%llx]\n", 447 inode->i_ino, ee_block, ee_len, 448 ee_start, ee_status ? 'u' : 'w', 449 es->es_lblk, es->es_len, 450 ext4_es_pblock(es), ext4_es_status(es)); 451 } 452 goto out; 453 } 454 455 /* 456 * We don't check ee_block == es->es_lblk, etc. because es 457 * might be a part of whole extent, vice versa. 458 */ 459 if (es->es_lblk < ee_block || 460 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 461 pr_warn("ES insert assertion failed for inode: %lu " 462 "ex_status [%d/%d/%llu/%c] != " 463 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 464 ee_block, ee_len, ee_start, 465 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 466 ext4_es_pblock(es), es_status ? 'u' : 'w'); 467 goto out; 468 } 469 470 if (ee_status ^ es_status) { 471 pr_warn("ES insert assertion failed for inode: %lu " 472 "ex_status [%d/%d/%llu/%c] != " 473 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 474 ee_block, ee_len, ee_start, 475 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 476 ext4_es_pblock(es), es_status ? 'u' : 'w'); 477 } 478 } else { 479 /* 480 * We can't find an extent on disk. So we need to make sure 481 * that we don't want to add an written/unwritten extent. 482 */ 483 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 484 pr_warn("ES insert assertion failed for inode: %lu " 485 "can't find an extent at block %d but we want " 486 "to add an written/unwritten extent " 487 "[%d/%d/%llu/%llx]\n", inode->i_ino, 488 es->es_lblk, es->es_lblk, es->es_len, 489 ext4_es_pblock(es), ext4_es_status(es)); 490 } 491 } 492 out: 493 if (path) { 494 ext4_ext_drop_refs(path); 495 kfree(path); 496 } 497 } 498 499 static void ext4_es_insert_extent_ind_check(struct inode *inode, 500 struct extent_status *es) 501 { 502 struct ext4_map_blocks map; 503 int retval; 504 505 /* 506 * Here we call ext4_ind_map_blocks to lookup a block mapping because 507 * 'Indirect' structure is defined in indirect.c. So we couldn't 508 * access direct/indirect tree from outside. It is too dirty to define 509 * this function in indirect.c file. 510 */ 511 512 map.m_lblk = es->es_lblk; 513 map.m_len = es->es_len; 514 515 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 516 if (retval > 0) { 517 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 518 /* 519 * We want to add a delayed/hole extent but this 520 * block has been allocated. 521 */ 522 pr_warn("ES insert assertion failed for inode: %lu " 523 "We can find blocks but we want to add a " 524 "delayed/hole extent [%d/%d/%llu/%llx]\n", 525 inode->i_ino, es->es_lblk, es->es_len, 526 ext4_es_pblock(es), ext4_es_status(es)); 527 return; 528 } else if (ext4_es_is_written(es)) { 529 if (retval != es->es_len) { 530 pr_warn("ES insert assertion failed for " 531 "inode: %lu retval %d != es_len %d\n", 532 inode->i_ino, retval, es->es_len); 533 return; 534 } 535 if (map.m_pblk != ext4_es_pblock(es)) { 536 pr_warn("ES insert assertion failed for " 537 "inode: %lu m_pblk %llu != " 538 "es_pblk %llu\n", 539 inode->i_ino, map.m_pblk, 540 ext4_es_pblock(es)); 541 return; 542 } 543 } else { 544 /* 545 * We don't need to check unwritten extent because 546 * indirect-based file doesn't have it. 547 */ 548 BUG_ON(1); 549 } 550 } else if (retval == 0) { 551 if (ext4_es_is_written(es)) { 552 pr_warn("ES insert assertion failed for inode: %lu " 553 "We can't find the block but we want to add " 554 "an written extent [%d/%d/%llu/%llx]\n", 555 inode->i_ino, es->es_lblk, es->es_len, 556 ext4_es_pblock(es), ext4_es_status(es)); 557 return; 558 } 559 } 560 } 561 562 static inline void ext4_es_insert_extent_check(struct inode *inode, 563 struct extent_status *es) 564 { 565 /* 566 * We don't need to worry about the race condition because 567 * caller takes i_data_sem locking. 568 */ 569 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 570 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 571 ext4_es_insert_extent_ext_check(inode, es); 572 else 573 ext4_es_insert_extent_ind_check(inode, es); 574 } 575 #else 576 static inline void ext4_es_insert_extent_check(struct inode *inode, 577 struct extent_status *es) 578 { 579 } 580 #endif 581 582 static int __es_insert_extent(struct inode *inode, struct extent_status *newes) 583 { 584 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 585 struct rb_node **p = &tree->root.rb_node; 586 struct rb_node *parent = NULL; 587 struct extent_status *es; 588 589 while (*p) { 590 parent = *p; 591 es = rb_entry(parent, struct extent_status, rb_node); 592 593 if (newes->es_lblk < es->es_lblk) { 594 if (ext4_es_can_be_merged(newes, es)) { 595 /* 596 * Here we can modify es_lblk directly 597 * because it isn't overlapped. 598 */ 599 es->es_lblk = newes->es_lblk; 600 es->es_len += newes->es_len; 601 if (ext4_es_is_written(es) || 602 ext4_es_is_unwritten(es)) 603 ext4_es_store_pblock(es, 604 newes->es_pblk); 605 es = ext4_es_try_to_merge_left(inode, es); 606 goto out; 607 } 608 p = &(*p)->rb_left; 609 } else if (newes->es_lblk > ext4_es_end(es)) { 610 if (ext4_es_can_be_merged(es, newes)) { 611 es->es_len += newes->es_len; 612 es = ext4_es_try_to_merge_right(inode, es); 613 goto out; 614 } 615 p = &(*p)->rb_right; 616 } else { 617 BUG_ON(1); 618 return -EINVAL; 619 } 620 } 621 622 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len, 623 newes->es_pblk); 624 if (!es) 625 return -ENOMEM; 626 rb_link_node(&es->rb_node, parent, p); 627 rb_insert_color(&es->rb_node, &tree->root); 628 629 out: 630 tree->cache_es = es; 631 return 0; 632 } 633 634 /* 635 * ext4_es_insert_extent() adds information to an inode's extent 636 * status tree. 637 * 638 * Return 0 on success, error code on failure. 639 */ 640 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 641 ext4_lblk_t len, ext4_fsblk_t pblk, 642 unsigned long long status) 643 { 644 struct extent_status newes; 645 ext4_lblk_t end = lblk + len - 1; 646 int err = 0; 647 648 es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n", 649 lblk, len, pblk, status, inode->i_ino); 650 651 if (!len) 652 return 0; 653 654 BUG_ON(end < lblk); 655 656 newes.es_lblk = lblk; 657 newes.es_len = len; 658 ext4_es_store_pblock(&newes, pblk); 659 ext4_es_store_status(&newes, status); 660 trace_ext4_es_insert_extent(inode, &newes); 661 662 ext4_es_insert_extent_check(inode, &newes); 663 664 write_lock(&EXT4_I(inode)->i_es_lock); 665 err = __es_remove_extent(inode, lblk, end); 666 if (err != 0) 667 goto error; 668 err = __es_insert_extent(inode, &newes); 669 670 error: 671 write_unlock(&EXT4_I(inode)->i_es_lock); 672 673 ext4_es_print_tree(inode); 674 675 return err; 676 } 677 678 /* 679 * ext4_es_lookup_extent() looks up an extent in extent status tree. 680 * 681 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 682 * 683 * Return: 1 on found, 0 on not 684 */ 685 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 686 struct extent_status *es) 687 { 688 struct ext4_es_tree *tree; 689 struct extent_status *es1 = NULL; 690 struct rb_node *node; 691 int found = 0; 692 693 trace_ext4_es_lookup_extent_enter(inode, lblk); 694 es_debug("lookup extent in block %u\n", lblk); 695 696 tree = &EXT4_I(inode)->i_es_tree; 697 read_lock(&EXT4_I(inode)->i_es_lock); 698 699 /* find extent in cache firstly */ 700 es->es_lblk = es->es_len = es->es_pblk = 0; 701 if (tree->cache_es) { 702 es1 = tree->cache_es; 703 if (in_range(lblk, es1->es_lblk, es1->es_len)) { 704 es_debug("%u cached by [%u/%u)\n", 705 lblk, es1->es_lblk, es1->es_len); 706 found = 1; 707 goto out; 708 } 709 } 710 711 node = tree->root.rb_node; 712 while (node) { 713 es1 = rb_entry(node, struct extent_status, rb_node); 714 if (lblk < es1->es_lblk) 715 node = node->rb_left; 716 else if (lblk > ext4_es_end(es1)) 717 node = node->rb_right; 718 else { 719 found = 1; 720 break; 721 } 722 } 723 724 out: 725 if (found) { 726 BUG_ON(!es1); 727 es->es_lblk = es1->es_lblk; 728 es->es_len = es1->es_len; 729 es->es_pblk = es1->es_pblk; 730 } 731 732 read_unlock(&EXT4_I(inode)->i_es_lock); 733 734 trace_ext4_es_lookup_extent_exit(inode, es, found); 735 return found; 736 } 737 738 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 739 ext4_lblk_t end) 740 { 741 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 742 struct rb_node *node; 743 struct extent_status *es; 744 struct extent_status orig_es; 745 ext4_lblk_t len1, len2; 746 ext4_fsblk_t block; 747 int err = 0; 748 749 es = __es_tree_search(&tree->root, lblk); 750 if (!es) 751 goto out; 752 if (es->es_lblk > end) 753 goto out; 754 755 /* Simply invalidate cache_es. */ 756 tree->cache_es = NULL; 757 758 orig_es.es_lblk = es->es_lblk; 759 orig_es.es_len = es->es_len; 760 orig_es.es_pblk = es->es_pblk; 761 762 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 763 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 764 if (len1 > 0) 765 es->es_len = len1; 766 if (len2 > 0) { 767 if (len1 > 0) { 768 struct extent_status newes; 769 770 newes.es_lblk = end + 1; 771 newes.es_len = len2; 772 if (ext4_es_is_written(&orig_es) || 773 ext4_es_is_unwritten(&orig_es)) { 774 block = ext4_es_pblock(&orig_es) + 775 orig_es.es_len - len2; 776 ext4_es_store_pblock(&newes, block); 777 } 778 ext4_es_store_status(&newes, ext4_es_status(&orig_es)); 779 err = __es_insert_extent(inode, &newes); 780 if (err) { 781 es->es_lblk = orig_es.es_lblk; 782 es->es_len = orig_es.es_len; 783 goto out; 784 } 785 } else { 786 es->es_lblk = end + 1; 787 es->es_len = len2; 788 if (ext4_es_is_written(es) || 789 ext4_es_is_unwritten(es)) { 790 block = orig_es.es_pblk + orig_es.es_len - len2; 791 ext4_es_store_pblock(es, block); 792 } 793 } 794 goto out; 795 } 796 797 if (len1 > 0) { 798 node = rb_next(&es->rb_node); 799 if (node) 800 es = rb_entry(node, struct extent_status, rb_node); 801 else 802 es = NULL; 803 } 804 805 while (es && ext4_es_end(es) <= end) { 806 node = rb_next(&es->rb_node); 807 rb_erase(&es->rb_node, &tree->root); 808 ext4_es_free_extent(inode, es); 809 if (!node) { 810 es = NULL; 811 break; 812 } 813 es = rb_entry(node, struct extent_status, rb_node); 814 } 815 816 if (es && es->es_lblk < end + 1) { 817 ext4_lblk_t orig_len = es->es_len; 818 819 len1 = ext4_es_end(es) - end; 820 es->es_lblk = end + 1; 821 es->es_len = len1; 822 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 823 block = es->es_pblk + orig_len - len1; 824 ext4_es_store_pblock(es, block); 825 } 826 } 827 828 out: 829 return err; 830 } 831 832 /* 833 * ext4_es_remove_extent() removes a space from a extent status tree. 834 * 835 * Return 0 on success, error code on failure. 836 */ 837 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 838 ext4_lblk_t len) 839 { 840 ext4_lblk_t end; 841 int err = 0; 842 843 trace_ext4_es_remove_extent(inode, lblk, len); 844 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 845 lblk, len, inode->i_ino); 846 847 if (!len) 848 return err; 849 850 end = lblk + len - 1; 851 BUG_ON(end < lblk); 852 853 write_lock(&EXT4_I(inode)->i_es_lock); 854 err = __es_remove_extent(inode, lblk, end); 855 write_unlock(&EXT4_I(inode)->i_es_lock); 856 ext4_es_print_tree(inode); 857 return err; 858 } 859 860 int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) 861 { 862 ext4_lblk_t ee_block; 863 ext4_fsblk_t ee_pblock; 864 unsigned int ee_len; 865 866 ee_block = le32_to_cpu(ex->ee_block); 867 ee_len = ext4_ext_get_actual_len(ex); 868 ee_pblock = ext4_ext_pblock(ex); 869 870 if (ee_len == 0) 871 return 0; 872 873 return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 874 EXTENT_STATUS_WRITTEN); 875 } 876 877 static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, 878 struct list_head *b) 879 { 880 struct ext4_inode_info *eia, *eib; 881 eia = list_entry(a, struct ext4_inode_info, i_es_lru); 882 eib = list_entry(b, struct ext4_inode_info, i_es_lru); 883 884 if (eia->i_touch_when == eib->i_touch_when) 885 return 0; 886 if (time_after(eia->i_touch_when, eib->i_touch_when)) 887 return 1; 888 else 889 return -1; 890 } 891 892 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) 893 { 894 struct ext4_sb_info *sbi = container_of(shrink, 895 struct ext4_sb_info, s_es_shrinker); 896 struct ext4_inode_info *ei; 897 struct list_head *cur, *tmp; 898 LIST_HEAD(skiped); 899 int nr_to_scan = sc->nr_to_scan; 900 int ret, nr_shrunk = 0; 901 902 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 903 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret); 904 905 if (!nr_to_scan) 906 return ret; 907 908 spin_lock(&sbi->s_es_lru_lock); 909 910 /* 911 * If the inode that is at the head of LRU list is newer than 912 * last_sorted time, that means that we need to sort this list. 913 */ 914 ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru); 915 if (sbi->s_es_last_sorted < ei->i_touch_when) { 916 list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); 917 sbi->s_es_last_sorted = jiffies; 918 } 919 920 list_for_each_safe(cur, tmp, &sbi->s_es_lru) { 921 /* 922 * If we have already reclaimed all extents from extent 923 * status tree, just stop the loop immediately. 924 */ 925 if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0) 926 break; 927 928 ei = list_entry(cur, struct ext4_inode_info, i_es_lru); 929 930 /* Skip the inode that is newer than the last_sorted time */ 931 if (sbi->s_es_last_sorted < ei->i_touch_when) { 932 list_move_tail(cur, &skiped); 933 continue; 934 } 935 936 if (ei->i_es_lru_nr == 0) 937 continue; 938 939 write_lock(&ei->i_es_lock); 940 ret = __es_try_to_reclaim_extents(ei, nr_to_scan); 941 if (ei->i_es_lru_nr == 0) 942 list_del_init(&ei->i_es_lru); 943 write_unlock(&ei->i_es_lock); 944 945 nr_shrunk += ret; 946 nr_to_scan -= ret; 947 if (nr_to_scan == 0) 948 break; 949 } 950 951 /* Move the newer inodes into the tail of the LRU list. */ 952 list_splice_tail(&skiped, &sbi->s_es_lru); 953 spin_unlock(&sbi->s_es_lru_lock); 954 955 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 956 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); 957 return ret; 958 } 959 960 void ext4_es_register_shrinker(struct ext4_sb_info *sbi) 961 { 962 INIT_LIST_HEAD(&sbi->s_es_lru); 963 spin_lock_init(&sbi->s_es_lru_lock); 964 sbi->s_es_last_sorted = 0; 965 sbi->s_es_shrinker.shrink = ext4_es_shrink; 966 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 967 register_shrinker(&sbi->s_es_shrinker); 968 } 969 970 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 971 { 972 unregister_shrinker(&sbi->s_es_shrinker); 973 } 974 975 void ext4_es_lru_add(struct inode *inode) 976 { 977 struct ext4_inode_info *ei = EXT4_I(inode); 978 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 979 980 ei->i_touch_when = jiffies; 981 982 if (!list_empty(&ei->i_es_lru)) 983 return; 984 985 spin_lock(&sbi->s_es_lru_lock); 986 if (list_empty(&ei->i_es_lru)) 987 list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); 988 spin_unlock(&sbi->s_es_lru_lock); 989 } 990 991 void ext4_es_lru_del(struct inode *inode) 992 { 993 struct ext4_inode_info *ei = EXT4_I(inode); 994 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 995 996 spin_lock(&sbi->s_es_lru_lock); 997 if (!list_empty(&ei->i_es_lru)) 998 list_del_init(&ei->i_es_lru); 999 spin_unlock(&sbi->s_es_lru_lock); 1000 } 1001 1002 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 1003 int nr_to_scan) 1004 { 1005 struct inode *inode = &ei->vfs_inode; 1006 struct ext4_es_tree *tree = &ei->i_es_tree; 1007 struct rb_node *node; 1008 struct extent_status *es; 1009 int nr_shrunk = 0; 1010 1011 if (ei->i_es_lru_nr == 0) 1012 return 0; 1013 1014 node = rb_first(&tree->root); 1015 while (node != NULL) { 1016 es = rb_entry(node, struct extent_status, rb_node); 1017 node = rb_next(&es->rb_node); 1018 /* 1019 * We can't reclaim delayed extent from status tree because 1020 * fiemap, bigallic, and seek_data/hole need to use it. 1021 */ 1022 if (!ext4_es_is_delayed(es)) { 1023 rb_erase(&es->rb_node, &tree->root); 1024 ext4_es_free_extent(inode, es); 1025 nr_shrunk++; 1026 if (--nr_to_scan == 0) 1027 break; 1028 } 1029 } 1030 tree->cache_es = NULL; 1031 return nr_shrunk; 1032 } 1033