1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/ext4/extents_status.c 4 * 5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 6 * Modified by 7 * Allison Henderson <achender@linux.vnet.ibm.com> 8 * Hugh Dickins <hughd@google.com> 9 * Zheng Liu <wenqing.lz@taobao.com> 10 * 11 * Ext4 extents status tree core functions. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/proc_fs.h> 15 #include <linux/seq_file.h> 16 #include "ext4.h" 17 18 #include <trace/events/ext4.h> 19 20 /* 21 * According to previous discussion in Ext4 Developer Workshop, we 22 * will introduce a new structure called io tree to track all extent 23 * status in order to solve some problems that we have met 24 * (e.g. Reservation space warning), and provide extent-level locking. 25 * Delay extent tree is the first step to achieve this goal. It is 26 * original built by Yongqiang Yang. At that time it is called delay 27 * extent tree, whose goal is only track delayed extents in memory to 28 * simplify the implementation of fiemap and bigalloc, and introduce 29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 30 * delay extent tree at the first commit. But for better understand 31 * what it does, it has been rename to extent status tree. 32 * 33 * Step1: 34 * Currently the first step has been done. All delayed extents are 35 * tracked in the tree. It maintains the delayed extent when a delayed 36 * allocation is issued, and the delayed extent is written out or 37 * invalidated. Therefore the implementation of fiemap and bigalloc 38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 39 * 40 * The following comment describes the implemenmtation of extent 41 * status tree and future works. 42 * 43 * Step2: 44 * In this step all extent status are tracked by extent status tree. 45 * Thus, we can first try to lookup a block mapping in this tree before 46 * finding it in extent tree. Hence, single extent cache can be removed 47 * because extent status tree can do a better job. Extents in status 48 * tree are loaded on-demand. Therefore, the extent status tree may not 49 * contain all of the extents in a file. Meanwhile we define a shrinker 50 * to reclaim memory from extent status tree because fragmented extent 51 * tree will make status tree cost too much memory. written/unwritten/- 52 * hole extents in the tree will be reclaimed by this shrinker when we 53 * are under high memory pressure. Delayed extents will not be 54 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 55 */ 56 57 /* 58 * Extent status tree implementation for ext4. 59 * 60 * 61 * ========================================================================== 62 * Extent status tree tracks all extent status. 63 * 64 * 1. Why we need to implement extent status tree? 65 * 66 * Without extent status tree, ext4 identifies a delayed extent by looking 67 * up page cache, this has several deficiencies - complicated, buggy, 68 * and inefficient code. 69 * 70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 71 * block or a range of blocks are belonged to a delayed extent. 72 * 73 * Let us have a look at how they do without extent status tree. 74 * -- FIEMAP 75 * FIEMAP looks up page cache to identify delayed allocations from holes. 76 * 77 * -- SEEK_HOLE/DATA 78 * SEEK_HOLE/DATA has the same problem as FIEMAP. 79 * 80 * -- bigalloc 81 * bigalloc looks up page cache to figure out if a block is 82 * already under delayed allocation or not to determine whether 83 * quota reserving is needed for the cluster. 84 * 85 * -- writeout 86 * Writeout looks up whole page cache to see if a buffer is 87 * mapped, If there are not very many delayed buffers, then it is 88 * time consuming. 89 * 90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 91 * bigalloc and writeout can figure out if a block or a range of 92 * blocks is under delayed allocation(belonged to a delayed extent) or 93 * not by searching the extent tree. 94 * 95 * 96 * ========================================================================== 97 * 2. Ext4 extent status tree impelmentation 98 * 99 * -- extent 100 * A extent is a range of blocks which are contiguous logically and 101 * physically. Unlike extent in extent tree, this extent in ext4 is 102 * a in-memory struct, there is no corresponding on-disk data. There 103 * is no limit on length of extent, so an extent can contain as many 104 * blocks as they are contiguous logically and physically. 105 * 106 * -- extent status tree 107 * Every inode has an extent status tree and all allocation blocks 108 * are added to the tree with different status. The extent in the 109 * tree are ordered by logical block no. 110 * 111 * -- operations on a extent status tree 112 * There are three important operations on a delayed extent tree: find 113 * next extent, adding a extent(a range of blocks) and removing a extent. 114 * 115 * -- race on a extent status tree 116 * Extent status tree is protected by inode->i_es_lock. 117 * 118 * -- memory consumption 119 * Fragmented extent tree will make extent status tree cost too much 120 * memory. Hence, we will reclaim written/unwritten/hole extents from 121 * the tree under a heavy memory pressure. 122 * 123 * 124 * ========================================================================== 125 * 3. Performance analysis 126 * 127 * -- overhead 128 * 1. There is a cache extent for write access, so if writes are 129 * not very random, adding space operaions are in O(1) time. 130 * 131 * -- gain 132 * 2. Code is much simpler, more readable, more maintainable and 133 * more efficient. 134 * 135 * 136 * ========================================================================== 137 * 4. TODO list 138 * 139 * -- Refactor delayed space reservation 140 * 141 * -- Extent-level locking 142 */ 143 144 static struct kmem_cache *ext4_es_cachep; 145 static struct kmem_cache *ext4_pending_cachep; 146 147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 148 struct extent_status *prealloc); 149 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 150 ext4_lblk_t end, int *reserved, 151 struct extent_status *prealloc); 152 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); 153 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 154 struct ext4_inode_info *locked_ei); 155 static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, 156 ext4_lblk_t len, 157 struct pending_reservation **prealloc); 158 159 int __init ext4_init_es(void) 160 { 161 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); 162 if (ext4_es_cachep == NULL) 163 return -ENOMEM; 164 return 0; 165 } 166 167 void ext4_exit_es(void) 168 { 169 kmem_cache_destroy(ext4_es_cachep); 170 } 171 172 void ext4_es_init_tree(struct ext4_es_tree *tree) 173 { 174 tree->root = RB_ROOT; 175 tree->cache_es = NULL; 176 } 177 178 #ifdef ES_DEBUG__ 179 static void ext4_es_print_tree(struct inode *inode) 180 { 181 struct ext4_es_tree *tree; 182 struct rb_node *node; 183 184 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 185 tree = &EXT4_I(inode)->i_es_tree; 186 node = rb_first(&tree->root); 187 while (node) { 188 struct extent_status *es; 189 es = rb_entry(node, struct extent_status, rb_node); 190 printk(KERN_DEBUG " [%u/%u) %llu %x", 191 es->es_lblk, es->es_len, 192 ext4_es_pblock(es), ext4_es_status(es)); 193 node = rb_next(node); 194 } 195 printk(KERN_DEBUG "\n"); 196 } 197 #else 198 #define ext4_es_print_tree(inode) 199 #endif 200 201 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 202 { 203 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 204 return es->es_lblk + es->es_len - 1; 205 } 206 207 /* 208 * search through the tree for an delayed extent with a given offset. If 209 * it can't be found, try to find next extent. 210 */ 211 static struct extent_status *__es_tree_search(struct rb_root *root, 212 ext4_lblk_t lblk) 213 { 214 struct rb_node *node = root->rb_node; 215 struct extent_status *es = NULL; 216 217 while (node) { 218 es = rb_entry(node, struct extent_status, rb_node); 219 if (lblk < es->es_lblk) 220 node = node->rb_left; 221 else if (lblk > ext4_es_end(es)) 222 node = node->rb_right; 223 else 224 return es; 225 } 226 227 if (es && lblk < es->es_lblk) 228 return es; 229 230 if (es && lblk > ext4_es_end(es)) { 231 node = rb_next(&es->rb_node); 232 return node ? rb_entry(node, struct extent_status, rb_node) : 233 NULL; 234 } 235 236 return NULL; 237 } 238 239 /* 240 * ext4_es_find_extent_range - find extent with specified status within block 241 * range or next extent following block range in 242 * extents status tree 243 * 244 * @inode - file containing the range 245 * @matching_fn - pointer to function that matches extents with desired status 246 * @lblk - logical block defining start of range 247 * @end - logical block defining end of range 248 * @es - extent found, if any 249 * 250 * Find the first extent within the block range specified by @lblk and @end 251 * in the extents status tree that satisfies @matching_fn. If a match 252 * is found, it's returned in @es. If not, and a matching extent is found 253 * beyond the block range, it's returned in @es. If no match is found, an 254 * extent is returned in @es whose es_lblk, es_len, and es_pblk components 255 * are 0. 256 */ 257 static void __es_find_extent_range(struct inode *inode, 258 int (*matching_fn)(struct extent_status *es), 259 ext4_lblk_t lblk, ext4_lblk_t end, 260 struct extent_status *es) 261 { 262 struct ext4_es_tree *tree = NULL; 263 struct extent_status *es1 = NULL; 264 struct rb_node *node; 265 266 WARN_ON(es == NULL); 267 WARN_ON(end < lblk); 268 269 tree = &EXT4_I(inode)->i_es_tree; 270 271 /* see if the extent has been cached */ 272 es->es_lblk = es->es_len = es->es_pblk = 0; 273 es1 = READ_ONCE(tree->cache_es); 274 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 275 es_debug("%u cached by [%u/%u) %llu %x\n", 276 lblk, es1->es_lblk, es1->es_len, 277 ext4_es_pblock(es1), ext4_es_status(es1)); 278 goto out; 279 } 280 281 es1 = __es_tree_search(&tree->root, lblk); 282 283 out: 284 if (es1 && !matching_fn(es1)) { 285 while ((node = rb_next(&es1->rb_node)) != NULL) { 286 es1 = rb_entry(node, struct extent_status, rb_node); 287 if (es1->es_lblk > end) { 288 es1 = NULL; 289 break; 290 } 291 if (matching_fn(es1)) 292 break; 293 } 294 } 295 296 if (es1 && matching_fn(es1)) { 297 WRITE_ONCE(tree->cache_es, es1); 298 es->es_lblk = es1->es_lblk; 299 es->es_len = es1->es_len; 300 es->es_pblk = es1->es_pblk; 301 } 302 303 } 304 305 /* 306 * Locking for __es_find_extent_range() for external use 307 */ 308 void ext4_es_find_extent_range(struct inode *inode, 309 int (*matching_fn)(struct extent_status *es), 310 ext4_lblk_t lblk, ext4_lblk_t end, 311 struct extent_status *es) 312 { 313 es->es_lblk = es->es_len = es->es_pblk = 0; 314 315 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 316 return; 317 318 trace_ext4_es_find_extent_range_enter(inode, lblk); 319 320 read_lock(&EXT4_I(inode)->i_es_lock); 321 __es_find_extent_range(inode, matching_fn, lblk, end, es); 322 read_unlock(&EXT4_I(inode)->i_es_lock); 323 324 trace_ext4_es_find_extent_range_exit(inode, es); 325 } 326 327 /* 328 * __es_scan_range - search block range for block with specified status 329 * in extents status tree 330 * 331 * @inode - file containing the range 332 * @matching_fn - pointer to function that matches extents with desired status 333 * @lblk - logical block defining start of range 334 * @end - logical block defining end of range 335 * 336 * Returns true if at least one block in the specified block range satisfies 337 * the criterion specified by @matching_fn, and false if not. If at least 338 * one extent has the specified status, then there is at least one block 339 * in the cluster with that status. Should only be called by code that has 340 * taken i_es_lock. 341 */ 342 static bool __es_scan_range(struct inode *inode, 343 int (*matching_fn)(struct extent_status *es), 344 ext4_lblk_t start, ext4_lblk_t end) 345 { 346 struct extent_status es; 347 348 __es_find_extent_range(inode, matching_fn, start, end, &es); 349 if (es.es_len == 0) 350 return false; /* no matching extent in the tree */ 351 else if (es.es_lblk <= start && 352 start < es.es_lblk + es.es_len) 353 return true; 354 else if (start <= es.es_lblk && es.es_lblk <= end) 355 return true; 356 else 357 return false; 358 } 359 /* 360 * Locking for __es_scan_range() for external use 361 */ 362 bool ext4_es_scan_range(struct inode *inode, 363 int (*matching_fn)(struct extent_status *es), 364 ext4_lblk_t lblk, ext4_lblk_t end) 365 { 366 bool ret; 367 368 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 369 return false; 370 371 read_lock(&EXT4_I(inode)->i_es_lock); 372 ret = __es_scan_range(inode, matching_fn, lblk, end); 373 read_unlock(&EXT4_I(inode)->i_es_lock); 374 375 return ret; 376 } 377 378 /* 379 * __es_scan_clu - search cluster for block with specified status in 380 * extents status tree 381 * 382 * @inode - file containing the cluster 383 * @matching_fn - pointer to function that matches extents with desired status 384 * @lblk - logical block in cluster to be searched 385 * 386 * Returns true if at least one extent in the cluster containing @lblk 387 * satisfies the criterion specified by @matching_fn, and false if not. If at 388 * least one extent has the specified status, then there is at least one block 389 * in the cluster with that status. Should only be called by code that has 390 * taken i_es_lock. 391 */ 392 static bool __es_scan_clu(struct inode *inode, 393 int (*matching_fn)(struct extent_status *es), 394 ext4_lblk_t lblk) 395 { 396 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 397 ext4_lblk_t lblk_start, lblk_end; 398 399 lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 400 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 401 402 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); 403 } 404 405 /* 406 * Locking for __es_scan_clu() for external use 407 */ 408 bool ext4_es_scan_clu(struct inode *inode, 409 int (*matching_fn)(struct extent_status *es), 410 ext4_lblk_t lblk) 411 { 412 bool ret; 413 414 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 415 return false; 416 417 read_lock(&EXT4_I(inode)->i_es_lock); 418 ret = __es_scan_clu(inode, matching_fn, lblk); 419 read_unlock(&EXT4_I(inode)->i_es_lock); 420 421 return ret; 422 } 423 424 static void ext4_es_list_add(struct inode *inode) 425 { 426 struct ext4_inode_info *ei = EXT4_I(inode); 427 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 428 429 if (!list_empty(&ei->i_es_list)) 430 return; 431 432 spin_lock(&sbi->s_es_lock); 433 if (list_empty(&ei->i_es_list)) { 434 list_add_tail(&ei->i_es_list, &sbi->s_es_list); 435 sbi->s_es_nr_inode++; 436 } 437 spin_unlock(&sbi->s_es_lock); 438 } 439 440 static void ext4_es_list_del(struct inode *inode) 441 { 442 struct ext4_inode_info *ei = EXT4_I(inode); 443 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 444 445 spin_lock(&sbi->s_es_lock); 446 if (!list_empty(&ei->i_es_list)) { 447 list_del_init(&ei->i_es_list); 448 sbi->s_es_nr_inode--; 449 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); 450 } 451 spin_unlock(&sbi->s_es_lock); 452 } 453 454 static inline struct pending_reservation *__alloc_pending(bool nofail) 455 { 456 if (!nofail) 457 return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); 458 459 return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL); 460 } 461 462 static inline void __free_pending(struct pending_reservation *pr) 463 { 464 kmem_cache_free(ext4_pending_cachep, pr); 465 } 466 467 /* 468 * Returns true if we cannot fail to allocate memory for this extent_status 469 * entry and cannot reclaim it until its status changes. 470 */ 471 static inline bool ext4_es_must_keep(struct extent_status *es) 472 { 473 /* fiemap, bigalloc, and seek_data/hole need to use it. */ 474 if (ext4_es_is_delayed(es)) 475 return true; 476 477 return false; 478 } 479 480 static inline struct extent_status *__es_alloc_extent(bool nofail) 481 { 482 if (!nofail) 483 return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 484 485 return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); 486 } 487 488 static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, 489 ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) 490 { 491 es->es_lblk = lblk; 492 es->es_len = len; 493 es->es_pblk = pblk; 494 495 /* We never try to reclaim a must kept extent, so we don't count it. */ 496 if (!ext4_es_must_keep(es)) { 497 if (!EXT4_I(inode)->i_es_shk_nr++) 498 ext4_es_list_add(inode); 499 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 500 s_es_stats.es_stats_shk_cnt); 501 } 502 503 EXT4_I(inode)->i_es_all_nr++; 504 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 505 } 506 507 static inline void __es_free_extent(struct extent_status *es) 508 { 509 kmem_cache_free(ext4_es_cachep, es); 510 } 511 512 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 513 { 514 EXT4_I(inode)->i_es_all_nr--; 515 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 516 517 /* Decrease the shrink counter when we can reclaim the extent. */ 518 if (!ext4_es_must_keep(es)) { 519 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); 520 if (!--EXT4_I(inode)->i_es_shk_nr) 521 ext4_es_list_del(inode); 522 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 523 s_es_stats.es_stats_shk_cnt); 524 } 525 526 __es_free_extent(es); 527 } 528 529 /* 530 * Check whether or not two extents can be merged 531 * Condition: 532 * - logical block number is contiguous 533 * - physical block number is contiguous 534 * - status is equal 535 */ 536 static int ext4_es_can_be_merged(struct extent_status *es1, 537 struct extent_status *es2) 538 { 539 if (ext4_es_type(es1) != ext4_es_type(es2)) 540 return 0; 541 542 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 543 pr_warn("ES assertion failed when merging extents. " 544 "The sum of lengths of es1 (%d) and es2 (%d) " 545 "is bigger than allowed file size (%d)\n", 546 es1->es_len, es2->es_len, EXT_MAX_BLOCKS); 547 WARN_ON(1); 548 return 0; 549 } 550 551 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 552 return 0; 553 554 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 555 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 556 return 1; 557 558 if (ext4_es_is_hole(es1)) 559 return 1; 560 561 /* we need to check delayed extent */ 562 if (ext4_es_is_delayed(es1)) 563 return 1; 564 565 return 0; 566 } 567 568 static struct extent_status * 569 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 570 { 571 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 572 struct extent_status *es1; 573 struct rb_node *node; 574 575 node = rb_prev(&es->rb_node); 576 if (!node) 577 return es; 578 579 es1 = rb_entry(node, struct extent_status, rb_node); 580 if (ext4_es_can_be_merged(es1, es)) { 581 es1->es_len += es->es_len; 582 if (ext4_es_is_referenced(es)) 583 ext4_es_set_referenced(es1); 584 rb_erase(&es->rb_node, &tree->root); 585 ext4_es_free_extent(inode, es); 586 es = es1; 587 } 588 589 return es; 590 } 591 592 static struct extent_status * 593 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 594 { 595 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 596 struct extent_status *es1; 597 struct rb_node *node; 598 599 node = rb_next(&es->rb_node); 600 if (!node) 601 return es; 602 603 es1 = rb_entry(node, struct extent_status, rb_node); 604 if (ext4_es_can_be_merged(es, es1)) { 605 es->es_len += es1->es_len; 606 if (ext4_es_is_referenced(es1)) 607 ext4_es_set_referenced(es); 608 rb_erase(node, &tree->root); 609 ext4_es_free_extent(inode, es1); 610 } 611 612 return es; 613 } 614 615 #ifdef ES_AGGRESSIVE_TEST 616 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ 617 618 static void ext4_es_insert_extent_ext_check(struct inode *inode, 619 struct extent_status *es) 620 { 621 struct ext4_ext_path *path = NULL; 622 struct ext4_extent *ex; 623 ext4_lblk_t ee_block; 624 ext4_fsblk_t ee_start; 625 unsigned short ee_len; 626 int depth, ee_status, es_status; 627 628 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 629 if (IS_ERR(path)) 630 return; 631 632 depth = ext_depth(inode); 633 ex = path[depth].p_ext; 634 635 if (ex) { 636 637 ee_block = le32_to_cpu(ex->ee_block); 638 ee_start = ext4_ext_pblock(ex); 639 ee_len = ext4_ext_get_actual_len(ex); 640 641 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; 642 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 643 644 /* 645 * Make sure ex and es are not overlap when we try to insert 646 * a delayed/hole extent. 647 */ 648 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 649 if (in_range(es->es_lblk, ee_block, ee_len)) { 650 pr_warn("ES insert assertion failed for " 651 "inode: %lu we can find an extent " 652 "at block [%d/%d/%llu/%c], but we " 653 "want to add a delayed/hole extent " 654 "[%d/%d/%llu/%x]\n", 655 inode->i_ino, ee_block, ee_len, 656 ee_start, ee_status ? 'u' : 'w', 657 es->es_lblk, es->es_len, 658 ext4_es_pblock(es), ext4_es_status(es)); 659 } 660 goto out; 661 } 662 663 /* 664 * We don't check ee_block == es->es_lblk, etc. because es 665 * might be a part of whole extent, vice versa. 666 */ 667 if (es->es_lblk < ee_block || 668 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 669 pr_warn("ES insert assertion failed for inode: %lu " 670 "ex_status [%d/%d/%llu/%c] != " 671 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 672 ee_block, ee_len, ee_start, 673 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 674 ext4_es_pblock(es), es_status ? 'u' : 'w'); 675 goto out; 676 } 677 678 if (ee_status ^ es_status) { 679 pr_warn("ES insert assertion failed for inode: %lu " 680 "ex_status [%d/%d/%llu/%c] != " 681 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 682 ee_block, ee_len, ee_start, 683 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 684 ext4_es_pblock(es), es_status ? 'u' : 'w'); 685 } 686 } else { 687 /* 688 * We can't find an extent on disk. So we need to make sure 689 * that we don't want to add an written/unwritten extent. 690 */ 691 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 692 pr_warn("ES insert assertion failed for inode: %lu " 693 "can't find an extent at block %d but we want " 694 "to add a written/unwritten extent " 695 "[%d/%d/%llu/%x]\n", inode->i_ino, 696 es->es_lblk, es->es_lblk, es->es_len, 697 ext4_es_pblock(es), ext4_es_status(es)); 698 } 699 } 700 out: 701 ext4_free_ext_path(path); 702 } 703 704 static void ext4_es_insert_extent_ind_check(struct inode *inode, 705 struct extent_status *es) 706 { 707 struct ext4_map_blocks map; 708 int retval; 709 710 /* 711 * Here we call ext4_ind_map_blocks to lookup a block mapping because 712 * 'Indirect' structure is defined in indirect.c. So we couldn't 713 * access direct/indirect tree from outside. It is too dirty to define 714 * this function in indirect.c file. 715 */ 716 717 map.m_lblk = es->es_lblk; 718 map.m_len = es->es_len; 719 720 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 721 if (retval > 0) { 722 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 723 /* 724 * We want to add a delayed/hole extent but this 725 * block has been allocated. 726 */ 727 pr_warn("ES insert assertion failed for inode: %lu " 728 "We can find blocks but we want to add a " 729 "delayed/hole extent [%d/%d/%llu/%x]\n", 730 inode->i_ino, es->es_lblk, es->es_len, 731 ext4_es_pblock(es), ext4_es_status(es)); 732 return; 733 } else if (ext4_es_is_written(es)) { 734 if (retval != es->es_len) { 735 pr_warn("ES insert assertion failed for " 736 "inode: %lu retval %d != es_len %d\n", 737 inode->i_ino, retval, es->es_len); 738 return; 739 } 740 if (map.m_pblk != ext4_es_pblock(es)) { 741 pr_warn("ES insert assertion failed for " 742 "inode: %lu m_pblk %llu != " 743 "es_pblk %llu\n", 744 inode->i_ino, map.m_pblk, 745 ext4_es_pblock(es)); 746 return; 747 } 748 } else { 749 /* 750 * We don't need to check unwritten extent because 751 * indirect-based file doesn't have it. 752 */ 753 BUG(); 754 } 755 } else if (retval == 0) { 756 if (ext4_es_is_written(es)) { 757 pr_warn("ES insert assertion failed for inode: %lu " 758 "We can't find the block but we want to add " 759 "a written extent [%d/%d/%llu/%x]\n", 760 inode->i_ino, es->es_lblk, es->es_len, 761 ext4_es_pblock(es), ext4_es_status(es)); 762 return; 763 } 764 } 765 } 766 767 static inline void ext4_es_insert_extent_check(struct inode *inode, 768 struct extent_status *es) 769 { 770 /* 771 * We don't need to worry about the race condition because 772 * caller takes i_data_sem locking. 773 */ 774 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 775 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 776 ext4_es_insert_extent_ext_check(inode, es); 777 else 778 ext4_es_insert_extent_ind_check(inode, es); 779 } 780 #else 781 static inline void ext4_es_insert_extent_check(struct inode *inode, 782 struct extent_status *es) 783 { 784 } 785 #endif 786 787 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 788 struct extent_status *prealloc) 789 { 790 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 791 struct rb_node **p = &tree->root.rb_node; 792 struct rb_node *parent = NULL; 793 struct extent_status *es; 794 795 while (*p) { 796 parent = *p; 797 es = rb_entry(parent, struct extent_status, rb_node); 798 799 if (newes->es_lblk < es->es_lblk) { 800 if (ext4_es_can_be_merged(newes, es)) { 801 /* 802 * Here we can modify es_lblk directly 803 * because it isn't overlapped. 804 */ 805 es->es_lblk = newes->es_lblk; 806 es->es_len += newes->es_len; 807 if (ext4_es_is_written(es) || 808 ext4_es_is_unwritten(es)) 809 ext4_es_store_pblock(es, 810 newes->es_pblk); 811 es = ext4_es_try_to_merge_left(inode, es); 812 goto out; 813 } 814 p = &(*p)->rb_left; 815 } else if (newes->es_lblk > ext4_es_end(es)) { 816 if (ext4_es_can_be_merged(es, newes)) { 817 es->es_len += newes->es_len; 818 es = ext4_es_try_to_merge_right(inode, es); 819 goto out; 820 } 821 p = &(*p)->rb_right; 822 } else { 823 BUG(); 824 return -EINVAL; 825 } 826 } 827 828 if (prealloc) 829 es = prealloc; 830 else 831 es = __es_alloc_extent(false); 832 if (!es) 833 return -ENOMEM; 834 ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, 835 newes->es_pblk); 836 837 rb_link_node(&es->rb_node, parent, p); 838 rb_insert_color(&es->rb_node, &tree->root); 839 840 out: 841 tree->cache_es = es; 842 return 0; 843 } 844 845 /* 846 * ext4_es_insert_extent() adds information to an inode's extent 847 * status tree. 848 */ 849 void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 850 ext4_lblk_t len, ext4_fsblk_t pblk, 851 unsigned int status, bool delalloc_reserve_used) 852 { 853 struct extent_status newes; 854 ext4_lblk_t end = lblk + len - 1; 855 int err1 = 0, err2 = 0, err3 = 0; 856 int resv_used = 0, pending = 0; 857 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 858 struct extent_status *es1 = NULL; 859 struct extent_status *es2 = NULL; 860 struct pending_reservation *pr = NULL; 861 bool revise_pending = false; 862 863 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 864 return; 865 866 es_debug("add [%u/%u) %llu %x %d to extent status tree of inode %lu\n", 867 lblk, len, pblk, status, delalloc_reserve_used, inode->i_ino); 868 869 if (!len) 870 return; 871 872 BUG_ON(end < lblk); 873 WARN_ON_ONCE(status & EXTENT_STATUS_DELAYED); 874 875 newes.es_lblk = lblk; 876 newes.es_len = len; 877 ext4_es_store_pblock_status(&newes, pblk, status); 878 trace_ext4_es_insert_extent(inode, &newes); 879 880 ext4_es_insert_extent_check(inode, &newes); 881 882 revise_pending = sbi->s_cluster_ratio > 1 && 883 test_opt(inode->i_sb, DELALLOC) && 884 (status & (EXTENT_STATUS_WRITTEN | 885 EXTENT_STATUS_UNWRITTEN)); 886 retry: 887 if (err1 && !es1) 888 es1 = __es_alloc_extent(true); 889 if ((err1 || err2) && !es2) 890 es2 = __es_alloc_extent(true); 891 if ((err1 || err2 || err3 < 0) && revise_pending && !pr) 892 pr = __alloc_pending(true); 893 write_lock(&EXT4_I(inode)->i_es_lock); 894 895 err1 = __es_remove_extent(inode, lblk, end, &resv_used, es1); 896 if (err1 != 0) 897 goto error; 898 /* Free preallocated extent if it didn't get used. */ 899 if (es1) { 900 if (!es1->es_len) 901 __es_free_extent(es1); 902 es1 = NULL; 903 } 904 905 err2 = __es_insert_extent(inode, &newes, es2); 906 if (err2 == -ENOMEM && !ext4_es_must_keep(&newes)) 907 err2 = 0; 908 if (err2 != 0) 909 goto error; 910 /* Free preallocated extent if it didn't get used. */ 911 if (es2) { 912 if (!es2->es_len) 913 __es_free_extent(es2); 914 es2 = NULL; 915 } 916 917 if (revise_pending) { 918 err3 = __revise_pending(inode, lblk, len, &pr); 919 if (err3 < 0) 920 goto error; 921 if (pr) { 922 __free_pending(pr); 923 pr = NULL; 924 } 925 pending = err3; 926 } 927 error: 928 write_unlock(&EXT4_I(inode)->i_es_lock); 929 /* 930 * Reduce the reserved cluster count to reflect successful deferred 931 * allocation of delayed allocated clusters or direct allocation of 932 * clusters discovered to be delayed allocated. Once allocated, a 933 * cluster is not included in the reserved count. 934 * 935 * When direct allocating (from fallocate, filemap, DIO, or clusters 936 * allocated when delalloc has been disabled by ext4_nonda_switch()) 937 * an extent either 1) contains delayed blocks but start with 938 * non-delayed allocated blocks (e.g. hole) or 2) contains non-delayed 939 * allocated blocks which belong to delayed allocated clusters when 940 * bigalloc feature is enabled, quota has already been claimed by 941 * ext4_mb_new_blocks(), so release the quota reservations made for 942 * any previously delayed allocated clusters instead of claim them 943 * again. 944 */ 945 resv_used += pending; 946 if (resv_used) 947 ext4_da_update_reserve_space(inode, resv_used, 948 delalloc_reserve_used); 949 950 if (err1 || err2 || err3 < 0) 951 goto retry; 952 953 ext4_es_print_tree(inode); 954 return; 955 } 956 957 /* 958 * ext4_es_cache_extent() inserts information into the extent status 959 * tree if and only if there isn't information about the range in 960 * question already. 961 */ 962 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, 963 ext4_lblk_t len, ext4_fsblk_t pblk, 964 unsigned int status) 965 { 966 struct extent_status *es; 967 struct extent_status newes; 968 ext4_lblk_t end = lblk + len - 1; 969 970 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 971 return; 972 973 newes.es_lblk = lblk; 974 newes.es_len = len; 975 ext4_es_store_pblock_status(&newes, pblk, status); 976 trace_ext4_es_cache_extent(inode, &newes); 977 978 if (!len) 979 return; 980 981 BUG_ON(end < lblk); 982 983 write_lock(&EXT4_I(inode)->i_es_lock); 984 985 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); 986 if (!es || es->es_lblk > end) 987 __es_insert_extent(inode, &newes, NULL); 988 write_unlock(&EXT4_I(inode)->i_es_lock); 989 } 990 991 /* 992 * ext4_es_lookup_extent() looks up an extent in extent status tree. 993 * 994 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 995 * 996 * Return: 1 on found, 0 on not 997 */ 998 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 999 ext4_lblk_t *next_lblk, 1000 struct extent_status *es) 1001 { 1002 struct ext4_es_tree *tree; 1003 struct ext4_es_stats *stats; 1004 struct extent_status *es1 = NULL; 1005 struct rb_node *node; 1006 int found = 0; 1007 1008 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 1009 return 0; 1010 1011 trace_ext4_es_lookup_extent_enter(inode, lblk); 1012 es_debug("lookup extent in block %u\n", lblk); 1013 1014 tree = &EXT4_I(inode)->i_es_tree; 1015 read_lock(&EXT4_I(inode)->i_es_lock); 1016 1017 /* find extent in cache firstly */ 1018 es->es_lblk = es->es_len = es->es_pblk = 0; 1019 es1 = READ_ONCE(tree->cache_es); 1020 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 1021 es_debug("%u cached by [%u/%u)\n", 1022 lblk, es1->es_lblk, es1->es_len); 1023 found = 1; 1024 goto out; 1025 } 1026 1027 node = tree->root.rb_node; 1028 while (node) { 1029 es1 = rb_entry(node, struct extent_status, rb_node); 1030 if (lblk < es1->es_lblk) 1031 node = node->rb_left; 1032 else if (lblk > ext4_es_end(es1)) 1033 node = node->rb_right; 1034 else { 1035 found = 1; 1036 break; 1037 } 1038 } 1039 1040 out: 1041 stats = &EXT4_SB(inode->i_sb)->s_es_stats; 1042 if (found) { 1043 BUG_ON(!es1); 1044 es->es_lblk = es1->es_lblk; 1045 es->es_len = es1->es_len; 1046 es->es_pblk = es1->es_pblk; 1047 if (!ext4_es_is_referenced(es1)) 1048 ext4_es_set_referenced(es1); 1049 percpu_counter_inc(&stats->es_stats_cache_hits); 1050 if (next_lblk) { 1051 node = rb_next(&es1->rb_node); 1052 if (node) { 1053 es1 = rb_entry(node, struct extent_status, 1054 rb_node); 1055 *next_lblk = es1->es_lblk; 1056 } else 1057 *next_lblk = 0; 1058 } 1059 } else { 1060 percpu_counter_inc(&stats->es_stats_cache_misses); 1061 } 1062 1063 read_unlock(&EXT4_I(inode)->i_es_lock); 1064 1065 trace_ext4_es_lookup_extent_exit(inode, es, found); 1066 return found; 1067 } 1068 1069 struct rsvd_count { 1070 int ndelayed; 1071 bool first_do_lblk_found; 1072 ext4_lblk_t first_do_lblk; 1073 ext4_lblk_t last_do_lblk; 1074 struct extent_status *left_es; 1075 bool partial; 1076 ext4_lblk_t lclu; 1077 }; 1078 1079 /* 1080 * init_rsvd - initialize reserved count data before removing block range 1081 * in file from extent status tree 1082 * 1083 * @inode - file containing range 1084 * @lblk - first block in range 1085 * @es - pointer to first extent in range 1086 * @rc - pointer to reserved count data 1087 * 1088 * Assumes es is not NULL 1089 */ 1090 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, 1091 struct extent_status *es, struct rsvd_count *rc) 1092 { 1093 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1094 struct rb_node *node; 1095 1096 rc->ndelayed = 0; 1097 1098 /* 1099 * for bigalloc, note the first delayed block in the range has not 1100 * been found, record the extent containing the block to the left of 1101 * the region to be removed, if any, and note that there's no partial 1102 * cluster to track 1103 */ 1104 if (sbi->s_cluster_ratio > 1) { 1105 rc->first_do_lblk_found = false; 1106 if (lblk > es->es_lblk) { 1107 rc->left_es = es; 1108 } else { 1109 node = rb_prev(&es->rb_node); 1110 rc->left_es = node ? rb_entry(node, 1111 struct extent_status, 1112 rb_node) : NULL; 1113 } 1114 rc->partial = false; 1115 } 1116 } 1117 1118 /* 1119 * count_rsvd - count the clusters containing delayed blocks in a range 1120 * within an extent and add to the running tally in rsvd_count 1121 * 1122 * @inode - file containing extent 1123 * @lblk - first block in range 1124 * @len - length of range in blocks 1125 * @es - pointer to extent containing clusters to be counted 1126 * @rc - pointer to reserved count data 1127 * 1128 * Tracks partial clusters found at the beginning and end of extents so 1129 * they aren't overcounted when they span adjacent extents 1130 */ 1131 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, 1132 struct extent_status *es, struct rsvd_count *rc) 1133 { 1134 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1135 ext4_lblk_t i, end, nclu; 1136 1137 if (!ext4_es_is_delayed(es)) 1138 return; 1139 1140 WARN_ON(len <= 0); 1141 1142 if (sbi->s_cluster_ratio == 1) { 1143 rc->ndelayed += (int) len; 1144 return; 1145 } 1146 1147 /* bigalloc */ 1148 1149 i = (lblk < es->es_lblk) ? es->es_lblk : lblk; 1150 end = lblk + (ext4_lblk_t) len - 1; 1151 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; 1152 1153 /* record the first block of the first delayed extent seen */ 1154 if (!rc->first_do_lblk_found) { 1155 rc->first_do_lblk = i; 1156 rc->first_do_lblk_found = true; 1157 } 1158 1159 /* update the last lblk in the region seen so far */ 1160 rc->last_do_lblk = end; 1161 1162 /* 1163 * if we're tracking a partial cluster and the current extent 1164 * doesn't start with it, count it and stop tracking 1165 */ 1166 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { 1167 rc->ndelayed++; 1168 rc->partial = false; 1169 } 1170 1171 /* 1172 * if the first cluster doesn't start on a cluster boundary but 1173 * ends on one, count it 1174 */ 1175 if (EXT4_LBLK_COFF(sbi, i) != 0) { 1176 if (end >= EXT4_LBLK_CFILL(sbi, i)) { 1177 rc->ndelayed++; 1178 rc->partial = false; 1179 i = EXT4_LBLK_CFILL(sbi, i) + 1; 1180 } 1181 } 1182 1183 /* 1184 * if the current cluster starts on a cluster boundary, count the 1185 * number of whole delayed clusters in the extent 1186 */ 1187 if ((i + sbi->s_cluster_ratio - 1) <= end) { 1188 nclu = (end - i + 1) >> sbi->s_cluster_bits; 1189 rc->ndelayed += nclu; 1190 i += nclu << sbi->s_cluster_bits; 1191 } 1192 1193 /* 1194 * start tracking a partial cluster if there's a partial at the end 1195 * of the current extent and we're not already tracking one 1196 */ 1197 if (!rc->partial && i <= end) { 1198 rc->partial = true; 1199 rc->lclu = EXT4_B2C(sbi, i); 1200 } 1201 } 1202 1203 /* 1204 * __pr_tree_search - search for a pending cluster reservation 1205 * 1206 * @root - root of pending reservation tree 1207 * @lclu - logical cluster to search for 1208 * 1209 * Returns the pending reservation for the cluster identified by @lclu 1210 * if found. If not, returns a reservation for the next cluster if any, 1211 * and if not, returns NULL. 1212 */ 1213 static struct pending_reservation *__pr_tree_search(struct rb_root *root, 1214 ext4_lblk_t lclu) 1215 { 1216 struct rb_node *node = root->rb_node; 1217 struct pending_reservation *pr = NULL; 1218 1219 while (node) { 1220 pr = rb_entry(node, struct pending_reservation, rb_node); 1221 if (lclu < pr->lclu) 1222 node = node->rb_left; 1223 else if (lclu > pr->lclu) 1224 node = node->rb_right; 1225 else 1226 return pr; 1227 } 1228 if (pr && lclu < pr->lclu) 1229 return pr; 1230 if (pr && lclu > pr->lclu) { 1231 node = rb_next(&pr->rb_node); 1232 return node ? rb_entry(node, struct pending_reservation, 1233 rb_node) : NULL; 1234 } 1235 return NULL; 1236 } 1237 1238 /* 1239 * get_rsvd - calculates and returns the number of cluster reservations to be 1240 * released when removing a block range from the extent status tree 1241 * and releases any pending reservations within the range 1242 * 1243 * @inode - file containing block range 1244 * @end - last block in range 1245 * @right_es - pointer to extent containing next block beyond end or NULL 1246 * @rc - pointer to reserved count data 1247 * 1248 * The number of reservations to be released is equal to the number of 1249 * clusters containing delayed blocks within the range, minus the number of 1250 * clusters still containing delayed blocks at the ends of the range, and 1251 * minus the number of pending reservations within the range. 1252 */ 1253 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, 1254 struct extent_status *right_es, 1255 struct rsvd_count *rc) 1256 { 1257 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1258 struct pending_reservation *pr; 1259 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1260 struct rb_node *node; 1261 ext4_lblk_t first_lclu, last_lclu; 1262 bool left_delayed, right_delayed, count_pending; 1263 struct extent_status *es; 1264 1265 if (sbi->s_cluster_ratio > 1) { 1266 /* count any remaining partial cluster */ 1267 if (rc->partial) 1268 rc->ndelayed++; 1269 1270 if (rc->ndelayed == 0) 1271 return 0; 1272 1273 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); 1274 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); 1275 1276 /* 1277 * decrease the delayed count by the number of clusters at the 1278 * ends of the range that still contain delayed blocks - 1279 * these clusters still need to be reserved 1280 */ 1281 left_delayed = right_delayed = false; 1282 1283 es = rc->left_es; 1284 while (es && ext4_es_end(es) >= 1285 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { 1286 if (ext4_es_is_delayed(es)) { 1287 rc->ndelayed--; 1288 left_delayed = true; 1289 break; 1290 } 1291 node = rb_prev(&es->rb_node); 1292 if (!node) 1293 break; 1294 es = rb_entry(node, struct extent_status, rb_node); 1295 } 1296 if (right_es && (!left_delayed || first_lclu != last_lclu)) { 1297 if (end < ext4_es_end(right_es)) { 1298 es = right_es; 1299 } else { 1300 node = rb_next(&right_es->rb_node); 1301 es = node ? rb_entry(node, struct extent_status, 1302 rb_node) : NULL; 1303 } 1304 while (es && es->es_lblk <= 1305 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { 1306 if (ext4_es_is_delayed(es)) { 1307 rc->ndelayed--; 1308 right_delayed = true; 1309 break; 1310 } 1311 node = rb_next(&es->rb_node); 1312 if (!node) 1313 break; 1314 es = rb_entry(node, struct extent_status, 1315 rb_node); 1316 } 1317 } 1318 1319 /* 1320 * Determine the block range that should be searched for 1321 * pending reservations, if any. Clusters on the ends of the 1322 * original removed range containing delayed blocks are 1323 * excluded. They've already been accounted for and it's not 1324 * possible to determine if an associated pending reservation 1325 * should be released with the information available in the 1326 * extents status tree. 1327 */ 1328 if (first_lclu == last_lclu) { 1329 if (left_delayed | right_delayed) 1330 count_pending = false; 1331 else 1332 count_pending = true; 1333 } else { 1334 if (left_delayed) 1335 first_lclu++; 1336 if (right_delayed) 1337 last_lclu--; 1338 if (first_lclu <= last_lclu) 1339 count_pending = true; 1340 else 1341 count_pending = false; 1342 } 1343 1344 /* 1345 * a pending reservation found between first_lclu and last_lclu 1346 * represents an allocated cluster that contained at least one 1347 * delayed block, so the delayed total must be reduced by one 1348 * for each pending reservation found and released 1349 */ 1350 if (count_pending) { 1351 pr = __pr_tree_search(&tree->root, first_lclu); 1352 while (pr && pr->lclu <= last_lclu) { 1353 rc->ndelayed--; 1354 node = rb_next(&pr->rb_node); 1355 rb_erase(&pr->rb_node, &tree->root); 1356 __free_pending(pr); 1357 if (!node) 1358 break; 1359 pr = rb_entry(node, struct pending_reservation, 1360 rb_node); 1361 } 1362 } 1363 } 1364 return rc->ndelayed; 1365 } 1366 1367 1368 /* 1369 * __es_remove_extent - removes block range from extent status tree 1370 * 1371 * @inode - file containing range 1372 * @lblk - first block in range 1373 * @end - last block in range 1374 * @reserved - number of cluster reservations released 1375 * @prealloc - pre-allocated es to avoid memory allocation failures 1376 * 1377 * If @reserved is not NULL and delayed allocation is enabled, counts 1378 * block/cluster reservations freed by removing range and if bigalloc 1379 * enabled cancels pending reservations as needed. Returns 0 on success, 1380 * error code on failure. 1381 */ 1382 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1383 ext4_lblk_t end, int *reserved, 1384 struct extent_status *prealloc) 1385 { 1386 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 1387 struct rb_node *node; 1388 struct extent_status *es; 1389 struct extent_status orig_es; 1390 ext4_lblk_t len1, len2; 1391 ext4_fsblk_t block; 1392 int err = 0; 1393 bool count_reserved = true; 1394 struct rsvd_count rc; 1395 1396 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) 1397 count_reserved = false; 1398 1399 es = __es_tree_search(&tree->root, lblk); 1400 if (!es) 1401 goto out; 1402 if (es->es_lblk > end) 1403 goto out; 1404 1405 /* Simply invalidate cache_es. */ 1406 tree->cache_es = NULL; 1407 if (count_reserved) 1408 init_rsvd(inode, lblk, es, &rc); 1409 1410 orig_es.es_lblk = es->es_lblk; 1411 orig_es.es_len = es->es_len; 1412 orig_es.es_pblk = es->es_pblk; 1413 1414 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 1415 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 1416 if (len1 > 0) 1417 es->es_len = len1; 1418 if (len2 > 0) { 1419 if (len1 > 0) { 1420 struct extent_status newes; 1421 1422 newes.es_lblk = end + 1; 1423 newes.es_len = len2; 1424 block = 0x7FDEADBEEFULL; 1425 if (ext4_es_is_written(&orig_es) || 1426 ext4_es_is_unwritten(&orig_es)) 1427 block = ext4_es_pblock(&orig_es) + 1428 orig_es.es_len - len2; 1429 ext4_es_store_pblock_status(&newes, block, 1430 ext4_es_status(&orig_es)); 1431 err = __es_insert_extent(inode, &newes, prealloc); 1432 if (err) { 1433 if (!ext4_es_must_keep(&newes)) 1434 return 0; 1435 1436 es->es_lblk = orig_es.es_lblk; 1437 es->es_len = orig_es.es_len; 1438 goto out; 1439 } 1440 } else { 1441 es->es_lblk = end + 1; 1442 es->es_len = len2; 1443 if (ext4_es_is_written(es) || 1444 ext4_es_is_unwritten(es)) { 1445 block = orig_es.es_pblk + orig_es.es_len - len2; 1446 ext4_es_store_pblock(es, block); 1447 } 1448 } 1449 if (count_reserved) 1450 count_rsvd(inode, orig_es.es_lblk + len1, 1451 orig_es.es_len - len1 - len2, &orig_es, &rc); 1452 goto out_get_reserved; 1453 } 1454 1455 if (len1 > 0) { 1456 if (count_reserved) 1457 count_rsvd(inode, lblk, orig_es.es_len - len1, 1458 &orig_es, &rc); 1459 node = rb_next(&es->rb_node); 1460 if (node) 1461 es = rb_entry(node, struct extent_status, rb_node); 1462 else 1463 es = NULL; 1464 } 1465 1466 while (es && ext4_es_end(es) <= end) { 1467 if (count_reserved) 1468 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); 1469 node = rb_next(&es->rb_node); 1470 rb_erase(&es->rb_node, &tree->root); 1471 ext4_es_free_extent(inode, es); 1472 if (!node) { 1473 es = NULL; 1474 break; 1475 } 1476 es = rb_entry(node, struct extent_status, rb_node); 1477 } 1478 1479 if (es && es->es_lblk < end + 1) { 1480 ext4_lblk_t orig_len = es->es_len; 1481 1482 len1 = ext4_es_end(es) - end; 1483 if (count_reserved) 1484 count_rsvd(inode, es->es_lblk, orig_len - len1, 1485 es, &rc); 1486 es->es_lblk = end + 1; 1487 es->es_len = len1; 1488 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 1489 block = es->es_pblk + orig_len - len1; 1490 ext4_es_store_pblock(es, block); 1491 } 1492 } 1493 1494 out_get_reserved: 1495 if (count_reserved) 1496 *reserved = get_rsvd(inode, end, es, &rc); 1497 out: 1498 return err; 1499 } 1500 1501 /* 1502 * ext4_es_remove_extent - removes block range from extent status tree 1503 * 1504 * @inode - file containing range 1505 * @lblk - first block in range 1506 * @len - number of blocks to remove 1507 * 1508 * Reduces block/cluster reservation count and for bigalloc cancels pending 1509 * reservations as needed. 1510 */ 1511 void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1512 ext4_lblk_t len) 1513 { 1514 ext4_lblk_t end; 1515 int err = 0; 1516 int reserved = 0; 1517 struct extent_status *es = NULL; 1518 1519 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 1520 return; 1521 1522 trace_ext4_es_remove_extent(inode, lblk, len); 1523 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 1524 lblk, len, inode->i_ino); 1525 1526 if (!len) 1527 return; 1528 1529 end = lblk + len - 1; 1530 BUG_ON(end < lblk); 1531 1532 retry: 1533 if (err && !es) 1534 es = __es_alloc_extent(true); 1535 /* 1536 * ext4_clear_inode() depends on us taking i_es_lock unconditionally 1537 * so that we are sure __es_shrink() is done with the inode before it 1538 * is reclaimed. 1539 */ 1540 write_lock(&EXT4_I(inode)->i_es_lock); 1541 err = __es_remove_extent(inode, lblk, end, &reserved, es); 1542 /* Free preallocated extent if it didn't get used. */ 1543 if (es) { 1544 if (!es->es_len) 1545 __es_free_extent(es); 1546 es = NULL; 1547 } 1548 write_unlock(&EXT4_I(inode)->i_es_lock); 1549 if (err) 1550 goto retry; 1551 1552 ext4_es_print_tree(inode); 1553 ext4_da_release_space(inode, reserved); 1554 return; 1555 } 1556 1557 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 1558 struct ext4_inode_info *locked_ei) 1559 { 1560 struct ext4_inode_info *ei; 1561 struct ext4_es_stats *es_stats; 1562 ktime_t start_time; 1563 u64 scan_time; 1564 int nr_to_walk; 1565 int nr_shrunk = 0; 1566 int retried = 0, nr_skipped = 0; 1567 1568 es_stats = &sbi->s_es_stats; 1569 start_time = ktime_get(); 1570 1571 retry: 1572 spin_lock(&sbi->s_es_lock); 1573 nr_to_walk = sbi->s_es_nr_inode; 1574 while (nr_to_walk-- > 0) { 1575 if (list_empty(&sbi->s_es_list)) { 1576 spin_unlock(&sbi->s_es_lock); 1577 goto out; 1578 } 1579 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, 1580 i_es_list); 1581 /* Move the inode to the tail */ 1582 list_move_tail(&ei->i_es_list, &sbi->s_es_list); 1583 1584 /* 1585 * Normally we try hard to avoid shrinking precached inodes, 1586 * but we will as a last resort. 1587 */ 1588 if (!retried && ext4_test_inode_state(&ei->vfs_inode, 1589 EXT4_STATE_EXT_PRECACHED)) { 1590 nr_skipped++; 1591 continue; 1592 } 1593 1594 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { 1595 nr_skipped++; 1596 continue; 1597 } 1598 /* 1599 * Now we hold i_es_lock which protects us from inode reclaim 1600 * freeing inode under us 1601 */ 1602 spin_unlock(&sbi->s_es_lock); 1603 1604 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); 1605 write_unlock(&ei->i_es_lock); 1606 1607 if (nr_to_scan <= 0) 1608 goto out; 1609 spin_lock(&sbi->s_es_lock); 1610 } 1611 spin_unlock(&sbi->s_es_lock); 1612 1613 /* 1614 * If we skipped any inodes, and we weren't able to make any 1615 * forward progress, try again to scan precached inodes. 1616 */ 1617 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1618 retried++; 1619 goto retry; 1620 } 1621 1622 if (locked_ei && nr_shrunk == 0) 1623 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); 1624 1625 out: 1626 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1627 if (likely(es_stats->es_stats_scan_time)) 1628 es_stats->es_stats_scan_time = (scan_time + 1629 es_stats->es_stats_scan_time*3) / 4; 1630 else 1631 es_stats->es_stats_scan_time = scan_time; 1632 if (scan_time > es_stats->es_stats_max_scan_time) 1633 es_stats->es_stats_max_scan_time = scan_time; 1634 if (likely(es_stats->es_stats_shrunk)) 1635 es_stats->es_stats_shrunk = (nr_shrunk + 1636 es_stats->es_stats_shrunk*3) / 4; 1637 else 1638 es_stats->es_stats_shrunk = nr_shrunk; 1639 1640 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, 1641 nr_skipped, retried); 1642 return nr_shrunk; 1643 } 1644 1645 static unsigned long ext4_es_count(struct shrinker *shrink, 1646 struct shrink_control *sc) 1647 { 1648 unsigned long nr; 1649 struct ext4_sb_info *sbi; 1650 1651 sbi = shrink->private_data; 1652 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1653 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1654 return nr; 1655 } 1656 1657 static unsigned long ext4_es_scan(struct shrinker *shrink, 1658 struct shrink_control *sc) 1659 { 1660 struct ext4_sb_info *sbi = shrink->private_data; 1661 int nr_to_scan = sc->nr_to_scan; 1662 int ret, nr_shrunk; 1663 1664 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1665 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1666 1667 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); 1668 1669 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1670 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1671 return nr_shrunk; 1672 } 1673 1674 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) 1675 { 1676 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); 1677 struct ext4_es_stats *es_stats = &sbi->s_es_stats; 1678 struct ext4_inode_info *ei, *max = NULL; 1679 unsigned int inode_cnt = 0; 1680 1681 if (v != SEQ_START_TOKEN) 1682 return 0; 1683 1684 /* here we just find an inode that has the max nr. of objects */ 1685 spin_lock(&sbi->s_es_lock); 1686 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { 1687 inode_cnt++; 1688 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1689 max = ei; 1690 else if (!max) 1691 max = ei; 1692 } 1693 spin_unlock(&sbi->s_es_lock); 1694 1695 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1696 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1697 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); 1698 seq_printf(seq, " %lld/%lld cache hits/misses\n", 1699 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), 1700 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); 1701 if (inode_cnt) 1702 seq_printf(seq, " %d inodes on list\n", inode_cnt); 1703 1704 seq_printf(seq, "average:\n %llu us scan time\n", 1705 div_u64(es_stats->es_stats_scan_time, 1000)); 1706 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); 1707 if (inode_cnt) 1708 seq_printf(seq, 1709 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1710 " %llu us max scan time\n", 1711 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, 1712 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1713 1714 return 0; 1715 } 1716 1717 int ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1718 { 1719 int err; 1720 1721 /* Make sure we have enough bits for physical block number */ 1722 BUILD_BUG_ON(ES_SHIFT < 48); 1723 INIT_LIST_HEAD(&sbi->s_es_list); 1724 sbi->s_es_nr_inode = 0; 1725 spin_lock_init(&sbi->s_es_lock); 1726 sbi->s_es_stats.es_stats_shrunk = 0; 1727 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, 1728 GFP_KERNEL); 1729 if (err) 1730 return err; 1731 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, 1732 GFP_KERNEL); 1733 if (err) 1734 goto err1; 1735 sbi->s_es_stats.es_stats_scan_time = 0; 1736 sbi->s_es_stats.es_stats_max_scan_time = 0; 1737 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1738 if (err) 1739 goto err2; 1740 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); 1741 if (err) 1742 goto err3; 1743 1744 sbi->s_es_shrinker = shrinker_alloc(0, "ext4-es:%s", sbi->s_sb->s_id); 1745 if (!sbi->s_es_shrinker) { 1746 err = -ENOMEM; 1747 goto err4; 1748 } 1749 1750 sbi->s_es_shrinker->scan_objects = ext4_es_scan; 1751 sbi->s_es_shrinker->count_objects = ext4_es_count; 1752 sbi->s_es_shrinker->private_data = sbi; 1753 1754 shrinker_register(sbi->s_es_shrinker); 1755 1756 return 0; 1757 err4: 1758 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1759 err3: 1760 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1761 err2: 1762 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1763 err1: 1764 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1765 return err; 1766 } 1767 1768 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1769 { 1770 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1771 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1772 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1773 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1774 shrinker_free(sbi->s_es_shrinker); 1775 } 1776 1777 /* 1778 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at 1779 * most *nr_to_scan extents, update *nr_to_scan accordingly. 1780 * 1781 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. 1782 * Increment *nr_shrunk by the number of reclaimed extents. Also update 1783 * ei->i_es_shrink_lblk to where we should continue scanning. 1784 */ 1785 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, 1786 int *nr_to_scan, int *nr_shrunk) 1787 { 1788 struct inode *inode = &ei->vfs_inode; 1789 struct ext4_es_tree *tree = &ei->i_es_tree; 1790 struct extent_status *es; 1791 struct rb_node *node; 1792 1793 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); 1794 if (!es) 1795 goto out_wrap; 1796 1797 while (*nr_to_scan > 0) { 1798 if (es->es_lblk > end) { 1799 ei->i_es_shrink_lblk = end + 1; 1800 return 0; 1801 } 1802 1803 (*nr_to_scan)--; 1804 node = rb_next(&es->rb_node); 1805 1806 if (ext4_es_must_keep(es)) 1807 goto next; 1808 if (ext4_es_is_referenced(es)) { 1809 ext4_es_clear_referenced(es); 1810 goto next; 1811 } 1812 1813 rb_erase(&es->rb_node, &tree->root); 1814 ext4_es_free_extent(inode, es); 1815 (*nr_shrunk)++; 1816 next: 1817 if (!node) 1818 goto out_wrap; 1819 es = rb_entry(node, struct extent_status, rb_node); 1820 } 1821 ei->i_es_shrink_lblk = es->es_lblk; 1822 return 1; 1823 out_wrap: 1824 ei->i_es_shrink_lblk = 0; 1825 return 0; 1826 } 1827 1828 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) 1829 { 1830 struct inode *inode = &ei->vfs_inode; 1831 int nr_shrunk = 0; 1832 ext4_lblk_t start = ei->i_es_shrink_lblk; 1833 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1834 DEFAULT_RATELIMIT_BURST); 1835 1836 if (ei->i_es_shk_nr == 0) 1837 return 0; 1838 1839 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1840 __ratelimit(&_rs)) 1841 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1842 1843 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && 1844 start != 0) 1845 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); 1846 1847 ei->i_es_tree.cache_es = NULL; 1848 return nr_shrunk; 1849 } 1850 1851 /* 1852 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove 1853 * discretionary entries from the extent status cache. (Some entries 1854 * must be present for proper operations.) 1855 */ 1856 void ext4_clear_inode_es(struct inode *inode) 1857 { 1858 struct ext4_inode_info *ei = EXT4_I(inode); 1859 struct extent_status *es; 1860 struct ext4_es_tree *tree; 1861 struct rb_node *node; 1862 1863 write_lock(&ei->i_es_lock); 1864 tree = &EXT4_I(inode)->i_es_tree; 1865 tree->cache_es = NULL; 1866 node = rb_first(&tree->root); 1867 while (node) { 1868 es = rb_entry(node, struct extent_status, rb_node); 1869 node = rb_next(node); 1870 if (!ext4_es_must_keep(es)) { 1871 rb_erase(&es->rb_node, &tree->root); 1872 ext4_es_free_extent(inode, es); 1873 } 1874 } 1875 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 1876 write_unlock(&ei->i_es_lock); 1877 } 1878 1879 #ifdef ES_DEBUG__ 1880 static void ext4_print_pending_tree(struct inode *inode) 1881 { 1882 struct ext4_pending_tree *tree; 1883 struct rb_node *node; 1884 struct pending_reservation *pr; 1885 1886 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); 1887 tree = &EXT4_I(inode)->i_pending_tree; 1888 node = rb_first(&tree->root); 1889 while (node) { 1890 pr = rb_entry(node, struct pending_reservation, rb_node); 1891 printk(KERN_DEBUG " %u", pr->lclu); 1892 node = rb_next(node); 1893 } 1894 printk(KERN_DEBUG "\n"); 1895 } 1896 #else 1897 #define ext4_print_pending_tree(inode) 1898 #endif 1899 1900 int __init ext4_init_pending(void) 1901 { 1902 ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); 1903 if (ext4_pending_cachep == NULL) 1904 return -ENOMEM; 1905 return 0; 1906 } 1907 1908 void ext4_exit_pending(void) 1909 { 1910 kmem_cache_destroy(ext4_pending_cachep); 1911 } 1912 1913 void ext4_init_pending_tree(struct ext4_pending_tree *tree) 1914 { 1915 tree->root = RB_ROOT; 1916 } 1917 1918 /* 1919 * __get_pending - retrieve a pointer to a pending reservation 1920 * 1921 * @inode - file containing the pending cluster reservation 1922 * @lclu - logical cluster of interest 1923 * 1924 * Returns a pointer to a pending reservation if it's a member of 1925 * the set, and NULL if not. Must be called holding i_es_lock. 1926 */ 1927 static struct pending_reservation *__get_pending(struct inode *inode, 1928 ext4_lblk_t lclu) 1929 { 1930 struct ext4_pending_tree *tree; 1931 struct rb_node *node; 1932 struct pending_reservation *pr = NULL; 1933 1934 tree = &EXT4_I(inode)->i_pending_tree; 1935 node = (&tree->root)->rb_node; 1936 1937 while (node) { 1938 pr = rb_entry(node, struct pending_reservation, rb_node); 1939 if (lclu < pr->lclu) 1940 node = node->rb_left; 1941 else if (lclu > pr->lclu) 1942 node = node->rb_right; 1943 else if (lclu == pr->lclu) 1944 return pr; 1945 } 1946 return NULL; 1947 } 1948 1949 /* 1950 * __insert_pending - adds a pending cluster reservation to the set of 1951 * pending reservations 1952 * 1953 * @inode - file containing the cluster 1954 * @lblk - logical block in the cluster to be added 1955 * @prealloc - preallocated pending entry 1956 * 1957 * Returns 1 on successful insertion and -ENOMEM on failure. If the 1958 * pending reservation is already in the set, returns successfully. 1959 */ 1960 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk, 1961 struct pending_reservation **prealloc) 1962 { 1963 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1964 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1965 struct rb_node **p = &tree->root.rb_node; 1966 struct rb_node *parent = NULL; 1967 struct pending_reservation *pr; 1968 ext4_lblk_t lclu; 1969 int ret = 0; 1970 1971 lclu = EXT4_B2C(sbi, lblk); 1972 /* search to find parent for insertion */ 1973 while (*p) { 1974 parent = *p; 1975 pr = rb_entry(parent, struct pending_reservation, rb_node); 1976 1977 if (lclu < pr->lclu) { 1978 p = &(*p)->rb_left; 1979 } else if (lclu > pr->lclu) { 1980 p = &(*p)->rb_right; 1981 } else { 1982 /* pending reservation already inserted */ 1983 goto out; 1984 } 1985 } 1986 1987 if (likely(*prealloc == NULL)) { 1988 pr = __alloc_pending(false); 1989 if (!pr) { 1990 ret = -ENOMEM; 1991 goto out; 1992 } 1993 } else { 1994 pr = *prealloc; 1995 *prealloc = NULL; 1996 } 1997 pr->lclu = lclu; 1998 1999 rb_link_node(&pr->rb_node, parent, p); 2000 rb_insert_color(&pr->rb_node, &tree->root); 2001 ret = 1; 2002 2003 out: 2004 return ret; 2005 } 2006 2007 /* 2008 * __remove_pending - removes a pending cluster reservation from the set 2009 * of pending reservations 2010 * 2011 * @inode - file containing the cluster 2012 * @lblk - logical block in the pending cluster reservation to be removed 2013 * 2014 * Returns successfully if pending reservation is not a member of the set. 2015 */ 2016 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) 2017 { 2018 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2019 struct pending_reservation *pr; 2020 struct ext4_pending_tree *tree; 2021 2022 pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); 2023 if (pr != NULL) { 2024 tree = &EXT4_I(inode)->i_pending_tree; 2025 rb_erase(&pr->rb_node, &tree->root); 2026 __free_pending(pr); 2027 } 2028 } 2029 2030 /* 2031 * ext4_remove_pending - removes a pending cluster reservation from the set 2032 * of pending reservations 2033 * 2034 * @inode - file containing the cluster 2035 * @lblk - logical block in the pending cluster reservation to be removed 2036 * 2037 * Locking for external use of __remove_pending. 2038 */ 2039 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) 2040 { 2041 struct ext4_inode_info *ei = EXT4_I(inode); 2042 2043 write_lock(&ei->i_es_lock); 2044 __remove_pending(inode, lblk); 2045 write_unlock(&ei->i_es_lock); 2046 } 2047 2048 /* 2049 * ext4_is_pending - determine whether a cluster has a pending reservation 2050 * on it 2051 * 2052 * @inode - file containing the cluster 2053 * @lblk - logical block in the cluster 2054 * 2055 * Returns true if there's a pending reservation for the cluster in the 2056 * set of pending reservations, and false if not. 2057 */ 2058 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) 2059 { 2060 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2061 struct ext4_inode_info *ei = EXT4_I(inode); 2062 bool ret; 2063 2064 read_lock(&ei->i_es_lock); 2065 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); 2066 read_unlock(&ei->i_es_lock); 2067 2068 return ret; 2069 } 2070 2071 /* 2072 * ext4_es_insert_delayed_extent - adds some delayed blocks to the extents 2073 * status tree, adding a pending reservation 2074 * where needed 2075 * 2076 * @inode - file containing the newly added block 2077 * @lblk - start logical block to be added 2078 * @len - length of blocks to be added 2079 * @lclu_allocated/end_allocated - indicates whether a physical cluster has 2080 * been allocated for the logical cluster 2081 * that contains the start/end block. Note that 2082 * end_allocated should always be set to false 2083 * if the start and the end block are in the 2084 * same cluster 2085 */ 2086 void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 2087 ext4_lblk_t len, bool lclu_allocated, 2088 bool end_allocated) 2089 { 2090 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2091 struct extent_status newes; 2092 ext4_lblk_t end = lblk + len - 1; 2093 int err1 = 0, err2 = 0, err3 = 0; 2094 struct extent_status *es1 = NULL; 2095 struct extent_status *es2 = NULL; 2096 struct pending_reservation *pr1 = NULL; 2097 struct pending_reservation *pr2 = NULL; 2098 2099 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 2100 return; 2101 2102 es_debug("add [%u/%u) delayed to extent status tree of inode %lu\n", 2103 lblk, len, inode->i_ino); 2104 if (!len) 2105 return; 2106 2107 WARN_ON_ONCE((EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) && 2108 end_allocated); 2109 2110 newes.es_lblk = lblk; 2111 newes.es_len = len; 2112 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); 2113 trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated, 2114 end_allocated); 2115 2116 ext4_es_insert_extent_check(inode, &newes); 2117 2118 retry: 2119 if (err1 && !es1) 2120 es1 = __es_alloc_extent(true); 2121 if ((err1 || err2) && !es2) 2122 es2 = __es_alloc_extent(true); 2123 if (err1 || err2 || err3 < 0) { 2124 if (lclu_allocated && !pr1) 2125 pr1 = __alloc_pending(true); 2126 if (end_allocated && !pr2) 2127 pr2 = __alloc_pending(true); 2128 } 2129 write_lock(&EXT4_I(inode)->i_es_lock); 2130 2131 err1 = __es_remove_extent(inode, lblk, end, NULL, es1); 2132 if (err1 != 0) 2133 goto error; 2134 /* Free preallocated extent if it didn't get used. */ 2135 if (es1) { 2136 if (!es1->es_len) 2137 __es_free_extent(es1); 2138 es1 = NULL; 2139 } 2140 2141 err2 = __es_insert_extent(inode, &newes, es2); 2142 if (err2 != 0) 2143 goto error; 2144 /* Free preallocated extent if it didn't get used. */ 2145 if (es2) { 2146 if (!es2->es_len) 2147 __es_free_extent(es2); 2148 es2 = NULL; 2149 } 2150 2151 if (lclu_allocated) { 2152 err3 = __insert_pending(inode, lblk, &pr1); 2153 if (err3 < 0) 2154 goto error; 2155 if (pr1) { 2156 __free_pending(pr1); 2157 pr1 = NULL; 2158 } 2159 } 2160 if (end_allocated) { 2161 err3 = __insert_pending(inode, end, &pr2); 2162 if (err3 < 0) 2163 goto error; 2164 if (pr2) { 2165 __free_pending(pr2); 2166 pr2 = NULL; 2167 } 2168 } 2169 error: 2170 write_unlock(&EXT4_I(inode)->i_es_lock); 2171 if (err1 || err2 || err3 < 0) 2172 goto retry; 2173 2174 ext4_es_print_tree(inode); 2175 ext4_print_pending_tree(inode); 2176 return; 2177 } 2178 2179 /* 2180 * __revise_pending - makes, cancels, or leaves unchanged pending cluster 2181 * reservations for a specified block range depending 2182 * upon the presence or absence of delayed blocks 2183 * outside the range within clusters at the ends of the 2184 * range 2185 * 2186 * @inode - file containing the range 2187 * @lblk - logical block defining the start of range 2188 * @len - length of range in blocks 2189 * @prealloc - preallocated pending entry 2190 * 2191 * Used after a newly allocated extent is added to the extents status tree. 2192 * Requires that the extents in the range have either written or unwritten 2193 * status. Must be called while holding i_es_lock. Returns number of new 2194 * inserts pending cluster on insert pendings, returns 0 on remove pendings, 2195 * return -ENOMEM on failure. 2196 */ 2197 static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, 2198 ext4_lblk_t len, 2199 struct pending_reservation **prealloc) 2200 { 2201 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2202 ext4_lblk_t end = lblk + len - 1; 2203 ext4_lblk_t first, last; 2204 bool f_del = false, l_del = false; 2205 int pendings = 0; 2206 int ret = 0; 2207 2208 if (len == 0) 2209 return 0; 2210 2211 /* 2212 * Two cases - block range within single cluster and block range 2213 * spanning two or more clusters. Note that a cluster belonging 2214 * to a range starting and/or ending on a cluster boundary is treated 2215 * as if it does not contain a delayed extent. The new range may 2216 * have allocated space for previously delayed blocks out to the 2217 * cluster boundary, requiring that any pre-existing pending 2218 * reservation be canceled. Because this code only looks at blocks 2219 * outside the range, it should revise pending reservations 2220 * correctly even if the extent represented by the range can't be 2221 * inserted in the extents status tree due to ENOSPC. 2222 */ 2223 2224 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { 2225 first = EXT4_LBLK_CMASK(sbi, lblk); 2226 if (first != lblk) 2227 f_del = __es_scan_range(inode, &ext4_es_is_delayed, 2228 first, lblk - 1); 2229 if (f_del) { 2230 ret = __insert_pending(inode, first, prealloc); 2231 if (ret < 0) 2232 goto out; 2233 pendings += ret; 2234 } else { 2235 last = EXT4_LBLK_CMASK(sbi, end) + 2236 sbi->s_cluster_ratio - 1; 2237 if (last != end) 2238 l_del = __es_scan_range(inode, 2239 &ext4_es_is_delayed, 2240 end + 1, last); 2241 if (l_del) { 2242 ret = __insert_pending(inode, last, prealloc); 2243 if (ret < 0) 2244 goto out; 2245 pendings += ret; 2246 } else 2247 __remove_pending(inode, last); 2248 } 2249 } else { 2250 first = EXT4_LBLK_CMASK(sbi, lblk); 2251 if (first != lblk) 2252 f_del = __es_scan_range(inode, &ext4_es_is_delayed, 2253 first, lblk - 1); 2254 if (f_del) { 2255 ret = __insert_pending(inode, first, prealloc); 2256 if (ret < 0) 2257 goto out; 2258 pendings += ret; 2259 } else 2260 __remove_pending(inode, first); 2261 2262 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; 2263 if (last != end) 2264 l_del = __es_scan_range(inode, &ext4_es_is_delayed, 2265 end + 1, last); 2266 if (l_del) { 2267 ret = __insert_pending(inode, last, prealloc); 2268 if (ret < 0) 2269 goto out; 2270 pendings += ret; 2271 } else 2272 __remove_pending(inode, last); 2273 } 2274 out: 2275 return (ret < 0) ? ret : pendings; 2276 } 2277