1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/ext4/extents_status.c 4 * 5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> 6 * Modified by 7 * Allison Henderson <achender@linux.vnet.ibm.com> 8 * Hugh Dickins <hughd@google.com> 9 * Zheng Liu <wenqing.lz@taobao.com> 10 * 11 * Ext4 extents status tree core functions. 12 */ 13 #include <linux/list_sort.h> 14 #include <linux/proc_fs.h> 15 #include <linux/seq_file.h> 16 #include "ext4.h" 17 18 #include <trace/events/ext4.h> 19 20 /* 21 * According to previous discussion in Ext4 Developer Workshop, we 22 * will introduce a new structure called io tree to track all extent 23 * status in order to solve some problems that we have met 24 * (e.g. Reservation space warning), and provide extent-level locking. 25 * Delay extent tree is the first step to achieve this goal. It is 26 * original built by Yongqiang Yang. At that time it is called delay 27 * extent tree, whose goal is only track delayed extents in memory to 28 * simplify the implementation of fiemap and bigalloc, and introduce 29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called 30 * delay extent tree at the first commit. But for better understand 31 * what it does, it has been rename to extent status tree. 32 * 33 * Step1: 34 * Currently the first step has been done. All delayed extents are 35 * tracked in the tree. It maintains the delayed extent when a delayed 36 * allocation is issued, and the delayed extent is written out or 37 * invalidated. Therefore the implementation of fiemap and bigalloc 38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. 39 * 40 * The following comment describes the implemenmtation of extent 41 * status tree and future works. 42 * 43 * Step2: 44 * In this step all extent status are tracked by extent status tree. 45 * Thus, we can first try to lookup a block mapping in this tree before 46 * finding it in extent tree. Hence, single extent cache can be removed 47 * because extent status tree can do a better job. Extents in status 48 * tree are loaded on-demand. Therefore, the extent status tree may not 49 * contain all of the extents in a file. Meanwhile we define a shrinker 50 * to reclaim memory from extent status tree because fragmented extent 51 * tree will make status tree cost too much memory. written/unwritten/- 52 * hole extents in the tree will be reclaimed by this shrinker when we 53 * are under high memory pressure. Delayed extents will not be 54 * reclimed because fiemap, bigalloc, and seek_data/hole need it. 55 */ 56 57 /* 58 * Extent status tree implementation for ext4. 59 * 60 * 61 * ========================================================================== 62 * Extent status tree tracks all extent status. 63 * 64 * 1. Why we need to implement extent status tree? 65 * 66 * Without extent status tree, ext4 identifies a delayed extent by looking 67 * up page cache, this has several deficiencies - complicated, buggy, 68 * and inefficient code. 69 * 70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a 71 * block or a range of blocks are belonged to a delayed extent. 72 * 73 * Let us have a look at how they do without extent status tree. 74 * -- FIEMAP 75 * FIEMAP looks up page cache to identify delayed allocations from holes. 76 * 77 * -- SEEK_HOLE/DATA 78 * SEEK_HOLE/DATA has the same problem as FIEMAP. 79 * 80 * -- bigalloc 81 * bigalloc looks up page cache to figure out if a block is 82 * already under delayed allocation or not to determine whether 83 * quota reserving is needed for the cluster. 84 * 85 * -- writeout 86 * Writeout looks up whole page cache to see if a buffer is 87 * mapped, If there are not very many delayed buffers, then it is 88 * time consuming. 89 * 90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, 91 * bigalloc and writeout can figure out if a block or a range of 92 * blocks is under delayed allocation(belonged to a delayed extent) or 93 * not by searching the extent tree. 94 * 95 * 96 * ========================================================================== 97 * 2. Ext4 extent status tree impelmentation 98 * 99 * -- extent 100 * A extent is a range of blocks which are contiguous logically and 101 * physically. Unlike extent in extent tree, this extent in ext4 is 102 * a in-memory struct, there is no corresponding on-disk data. There 103 * is no limit on length of extent, so an extent can contain as many 104 * blocks as they are contiguous logically and physically. 105 * 106 * -- extent status tree 107 * Every inode has an extent status tree and all allocation blocks 108 * are added to the tree with different status. The extent in the 109 * tree are ordered by logical block no. 110 * 111 * -- operations on a extent status tree 112 * There are three important operations on a delayed extent tree: find 113 * next extent, adding a extent(a range of blocks) and removing a extent. 114 * 115 * -- race on a extent status tree 116 * Extent status tree is protected by inode->i_es_lock. 117 * 118 * -- memory consumption 119 * Fragmented extent tree will make extent status tree cost too much 120 * memory. Hence, we will reclaim written/unwritten/hole extents from 121 * the tree under a heavy memory pressure. 122 * 123 * 124 * ========================================================================== 125 * 3. Performance analysis 126 * 127 * -- overhead 128 * 1. There is a cache extent for write access, so if writes are 129 * not very random, adding space operaions are in O(1) time. 130 * 131 * -- gain 132 * 2. Code is much simpler, more readable, more maintainable and 133 * more efficient. 134 * 135 * 136 * ========================================================================== 137 * 4. TODO list 138 * 139 * -- Refactor delayed space reservation 140 * 141 * -- Extent-level locking 142 */ 143 144 static struct kmem_cache *ext4_es_cachep; 145 static struct kmem_cache *ext4_pending_cachep; 146 147 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 148 struct extent_status *prealloc); 149 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 150 ext4_lblk_t end, int *reserved, 151 struct extent_status *prealloc); 152 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); 153 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 154 struct ext4_inode_info *locked_ei); 155 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 156 ext4_lblk_t len); 157 158 int __init ext4_init_es(void) 159 { 160 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); 161 if (ext4_es_cachep == NULL) 162 return -ENOMEM; 163 return 0; 164 } 165 166 void ext4_exit_es(void) 167 { 168 kmem_cache_destroy(ext4_es_cachep); 169 } 170 171 void ext4_es_init_tree(struct ext4_es_tree *tree) 172 { 173 tree->root = RB_ROOT; 174 tree->cache_es = NULL; 175 } 176 177 #ifdef ES_DEBUG__ 178 static void ext4_es_print_tree(struct inode *inode) 179 { 180 struct ext4_es_tree *tree; 181 struct rb_node *node; 182 183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); 184 tree = &EXT4_I(inode)->i_es_tree; 185 node = rb_first(&tree->root); 186 while (node) { 187 struct extent_status *es; 188 es = rb_entry(node, struct extent_status, rb_node); 189 printk(KERN_DEBUG " [%u/%u) %llu %x", 190 es->es_lblk, es->es_len, 191 ext4_es_pblock(es), ext4_es_status(es)); 192 node = rb_next(node); 193 } 194 printk(KERN_DEBUG "\n"); 195 } 196 #else 197 #define ext4_es_print_tree(inode) 198 #endif 199 200 static inline ext4_lblk_t ext4_es_end(struct extent_status *es) 201 { 202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk); 203 return es->es_lblk + es->es_len - 1; 204 } 205 206 /* 207 * search through the tree for an delayed extent with a given offset. If 208 * it can't be found, try to find next extent. 209 */ 210 static struct extent_status *__es_tree_search(struct rb_root *root, 211 ext4_lblk_t lblk) 212 { 213 struct rb_node *node = root->rb_node; 214 struct extent_status *es = NULL; 215 216 while (node) { 217 es = rb_entry(node, struct extent_status, rb_node); 218 if (lblk < es->es_lblk) 219 node = node->rb_left; 220 else if (lblk > ext4_es_end(es)) 221 node = node->rb_right; 222 else 223 return es; 224 } 225 226 if (es && lblk < es->es_lblk) 227 return es; 228 229 if (es && lblk > ext4_es_end(es)) { 230 node = rb_next(&es->rb_node); 231 return node ? rb_entry(node, struct extent_status, rb_node) : 232 NULL; 233 } 234 235 return NULL; 236 } 237 238 /* 239 * ext4_es_find_extent_range - find extent with specified status within block 240 * range or next extent following block range in 241 * extents status tree 242 * 243 * @inode - file containing the range 244 * @matching_fn - pointer to function that matches extents with desired status 245 * @lblk - logical block defining start of range 246 * @end - logical block defining end of range 247 * @es - extent found, if any 248 * 249 * Find the first extent within the block range specified by @lblk and @end 250 * in the extents status tree that satisfies @matching_fn. If a match 251 * is found, it's returned in @es. If not, and a matching extent is found 252 * beyond the block range, it's returned in @es. If no match is found, an 253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components 254 * are 0. 255 */ 256 static void __es_find_extent_range(struct inode *inode, 257 int (*matching_fn)(struct extent_status *es), 258 ext4_lblk_t lblk, ext4_lblk_t end, 259 struct extent_status *es) 260 { 261 struct ext4_es_tree *tree = NULL; 262 struct extent_status *es1 = NULL; 263 struct rb_node *node; 264 265 WARN_ON(es == NULL); 266 WARN_ON(end < lblk); 267 268 tree = &EXT4_I(inode)->i_es_tree; 269 270 /* see if the extent has been cached */ 271 es->es_lblk = es->es_len = es->es_pblk = 0; 272 es1 = READ_ONCE(tree->cache_es); 273 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 274 es_debug("%u cached by [%u/%u) %llu %x\n", 275 lblk, es1->es_lblk, es1->es_len, 276 ext4_es_pblock(es1), ext4_es_status(es1)); 277 goto out; 278 } 279 280 es1 = __es_tree_search(&tree->root, lblk); 281 282 out: 283 if (es1 && !matching_fn(es1)) { 284 while ((node = rb_next(&es1->rb_node)) != NULL) { 285 es1 = rb_entry(node, struct extent_status, rb_node); 286 if (es1->es_lblk > end) { 287 es1 = NULL; 288 break; 289 } 290 if (matching_fn(es1)) 291 break; 292 } 293 } 294 295 if (es1 && matching_fn(es1)) { 296 WRITE_ONCE(tree->cache_es, es1); 297 es->es_lblk = es1->es_lblk; 298 es->es_len = es1->es_len; 299 es->es_pblk = es1->es_pblk; 300 } 301 302 } 303 304 /* 305 * Locking for __es_find_extent_range() for external use 306 */ 307 void ext4_es_find_extent_range(struct inode *inode, 308 int (*matching_fn)(struct extent_status *es), 309 ext4_lblk_t lblk, ext4_lblk_t end, 310 struct extent_status *es) 311 { 312 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 313 return; 314 315 trace_ext4_es_find_extent_range_enter(inode, lblk); 316 317 read_lock(&EXT4_I(inode)->i_es_lock); 318 __es_find_extent_range(inode, matching_fn, lblk, end, es); 319 read_unlock(&EXT4_I(inode)->i_es_lock); 320 321 trace_ext4_es_find_extent_range_exit(inode, es); 322 } 323 324 /* 325 * __es_scan_range - search block range for block with specified status 326 * in extents status tree 327 * 328 * @inode - file containing the range 329 * @matching_fn - pointer to function that matches extents with desired status 330 * @lblk - logical block defining start of range 331 * @end - logical block defining end of range 332 * 333 * Returns true if at least one block in the specified block range satisfies 334 * the criterion specified by @matching_fn, and false if not. If at least 335 * one extent has the specified status, then there is at least one block 336 * in the cluster with that status. Should only be called by code that has 337 * taken i_es_lock. 338 */ 339 static bool __es_scan_range(struct inode *inode, 340 int (*matching_fn)(struct extent_status *es), 341 ext4_lblk_t start, ext4_lblk_t end) 342 { 343 struct extent_status es; 344 345 __es_find_extent_range(inode, matching_fn, start, end, &es); 346 if (es.es_len == 0) 347 return false; /* no matching extent in the tree */ 348 else if (es.es_lblk <= start && 349 start < es.es_lblk + es.es_len) 350 return true; 351 else if (start <= es.es_lblk && es.es_lblk <= end) 352 return true; 353 else 354 return false; 355 } 356 /* 357 * Locking for __es_scan_range() for external use 358 */ 359 bool ext4_es_scan_range(struct inode *inode, 360 int (*matching_fn)(struct extent_status *es), 361 ext4_lblk_t lblk, ext4_lblk_t end) 362 { 363 bool ret; 364 365 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 366 return false; 367 368 read_lock(&EXT4_I(inode)->i_es_lock); 369 ret = __es_scan_range(inode, matching_fn, lblk, end); 370 read_unlock(&EXT4_I(inode)->i_es_lock); 371 372 return ret; 373 } 374 375 /* 376 * __es_scan_clu - search cluster for block with specified status in 377 * extents status tree 378 * 379 * @inode - file containing the cluster 380 * @matching_fn - pointer to function that matches extents with desired status 381 * @lblk - logical block in cluster to be searched 382 * 383 * Returns true if at least one extent in the cluster containing @lblk 384 * satisfies the criterion specified by @matching_fn, and false if not. If at 385 * least one extent has the specified status, then there is at least one block 386 * in the cluster with that status. Should only be called by code that has 387 * taken i_es_lock. 388 */ 389 static bool __es_scan_clu(struct inode *inode, 390 int (*matching_fn)(struct extent_status *es), 391 ext4_lblk_t lblk) 392 { 393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 394 ext4_lblk_t lblk_start, lblk_end; 395 396 lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 397 lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 398 399 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); 400 } 401 402 /* 403 * Locking for __es_scan_clu() for external use 404 */ 405 bool ext4_es_scan_clu(struct inode *inode, 406 int (*matching_fn)(struct extent_status *es), 407 ext4_lblk_t lblk) 408 { 409 bool ret; 410 411 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 412 return false; 413 414 read_lock(&EXT4_I(inode)->i_es_lock); 415 ret = __es_scan_clu(inode, matching_fn, lblk); 416 read_unlock(&EXT4_I(inode)->i_es_lock); 417 418 return ret; 419 } 420 421 static void ext4_es_list_add(struct inode *inode) 422 { 423 struct ext4_inode_info *ei = EXT4_I(inode); 424 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 425 426 if (!list_empty(&ei->i_es_list)) 427 return; 428 429 spin_lock(&sbi->s_es_lock); 430 if (list_empty(&ei->i_es_list)) { 431 list_add_tail(&ei->i_es_list, &sbi->s_es_list); 432 sbi->s_es_nr_inode++; 433 } 434 spin_unlock(&sbi->s_es_lock); 435 } 436 437 static void ext4_es_list_del(struct inode *inode) 438 { 439 struct ext4_inode_info *ei = EXT4_I(inode); 440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 441 442 spin_lock(&sbi->s_es_lock); 443 if (!list_empty(&ei->i_es_list)) { 444 list_del_init(&ei->i_es_list); 445 sbi->s_es_nr_inode--; 446 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); 447 } 448 spin_unlock(&sbi->s_es_lock); 449 } 450 451 /* 452 * Returns true if we cannot fail to allocate memory for this extent_status 453 * entry and cannot reclaim it until its status changes. 454 */ 455 static inline bool ext4_es_must_keep(struct extent_status *es) 456 { 457 /* fiemap, bigalloc, and seek_data/hole need to use it. */ 458 if (ext4_es_is_delayed(es)) 459 return true; 460 461 return false; 462 } 463 464 static inline struct extent_status *__es_alloc_extent(bool nofail) 465 { 466 if (!nofail) 467 return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); 468 469 return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); 470 } 471 472 static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, 473 ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) 474 { 475 es->es_lblk = lblk; 476 es->es_len = len; 477 es->es_pblk = pblk; 478 479 /* We never try to reclaim a must kept extent, so we don't count it. */ 480 if (!ext4_es_must_keep(es)) { 481 if (!EXT4_I(inode)->i_es_shk_nr++) 482 ext4_es_list_add(inode); 483 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 484 s_es_stats.es_stats_shk_cnt); 485 } 486 487 EXT4_I(inode)->i_es_all_nr++; 488 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 489 } 490 491 static inline void __es_free_extent(struct extent_status *es) 492 { 493 kmem_cache_free(ext4_es_cachep, es); 494 } 495 496 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 497 { 498 EXT4_I(inode)->i_es_all_nr--; 499 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 500 501 /* Decrease the shrink counter when we can reclaim the extent. */ 502 if (!ext4_es_must_keep(es)) { 503 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); 504 if (!--EXT4_I(inode)->i_es_shk_nr) 505 ext4_es_list_del(inode); 506 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 507 s_es_stats.es_stats_shk_cnt); 508 } 509 510 __es_free_extent(es); 511 } 512 513 /* 514 * Check whether or not two extents can be merged 515 * Condition: 516 * - logical block number is contiguous 517 * - physical block number is contiguous 518 * - status is equal 519 */ 520 static int ext4_es_can_be_merged(struct extent_status *es1, 521 struct extent_status *es2) 522 { 523 if (ext4_es_type(es1) != ext4_es_type(es2)) 524 return 0; 525 526 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 527 pr_warn("ES assertion failed when merging extents. " 528 "The sum of lengths of es1 (%d) and es2 (%d) " 529 "is bigger than allowed file size (%d)\n", 530 es1->es_len, es2->es_len, EXT_MAX_BLOCKS); 531 WARN_ON(1); 532 return 0; 533 } 534 535 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) 536 return 0; 537 538 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && 539 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) 540 return 1; 541 542 if (ext4_es_is_hole(es1)) 543 return 1; 544 545 /* we need to check delayed extent is without unwritten status */ 546 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) 547 return 1; 548 549 return 0; 550 } 551 552 static struct extent_status * 553 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) 554 { 555 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 556 struct extent_status *es1; 557 struct rb_node *node; 558 559 node = rb_prev(&es->rb_node); 560 if (!node) 561 return es; 562 563 es1 = rb_entry(node, struct extent_status, rb_node); 564 if (ext4_es_can_be_merged(es1, es)) { 565 es1->es_len += es->es_len; 566 if (ext4_es_is_referenced(es)) 567 ext4_es_set_referenced(es1); 568 rb_erase(&es->rb_node, &tree->root); 569 ext4_es_free_extent(inode, es); 570 es = es1; 571 } 572 573 return es; 574 } 575 576 static struct extent_status * 577 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) 578 { 579 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 580 struct extent_status *es1; 581 struct rb_node *node; 582 583 node = rb_next(&es->rb_node); 584 if (!node) 585 return es; 586 587 es1 = rb_entry(node, struct extent_status, rb_node); 588 if (ext4_es_can_be_merged(es, es1)) { 589 es->es_len += es1->es_len; 590 if (ext4_es_is_referenced(es1)) 591 ext4_es_set_referenced(es); 592 rb_erase(node, &tree->root); 593 ext4_es_free_extent(inode, es1); 594 } 595 596 return es; 597 } 598 599 #ifdef ES_AGGRESSIVE_TEST 600 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ 601 602 static void ext4_es_insert_extent_ext_check(struct inode *inode, 603 struct extent_status *es) 604 { 605 struct ext4_ext_path *path = NULL; 606 struct ext4_extent *ex; 607 ext4_lblk_t ee_block; 608 ext4_fsblk_t ee_start; 609 unsigned short ee_len; 610 int depth, ee_status, es_status; 611 612 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 613 if (IS_ERR(path)) 614 return; 615 616 depth = ext_depth(inode); 617 ex = path[depth].p_ext; 618 619 if (ex) { 620 621 ee_block = le32_to_cpu(ex->ee_block); 622 ee_start = ext4_ext_pblock(ex); 623 ee_len = ext4_ext_get_actual_len(ex); 624 625 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; 626 es_status = ext4_es_is_unwritten(es) ? 1 : 0; 627 628 /* 629 * Make sure ex and es are not overlap when we try to insert 630 * a delayed/hole extent. 631 */ 632 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { 633 if (in_range(es->es_lblk, ee_block, ee_len)) { 634 pr_warn("ES insert assertion failed for " 635 "inode: %lu we can find an extent " 636 "at block [%d/%d/%llu/%c], but we " 637 "want to add a delayed/hole extent " 638 "[%d/%d/%llu/%x]\n", 639 inode->i_ino, ee_block, ee_len, 640 ee_start, ee_status ? 'u' : 'w', 641 es->es_lblk, es->es_len, 642 ext4_es_pblock(es), ext4_es_status(es)); 643 } 644 goto out; 645 } 646 647 /* 648 * We don't check ee_block == es->es_lblk, etc. because es 649 * might be a part of whole extent, vice versa. 650 */ 651 if (es->es_lblk < ee_block || 652 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { 653 pr_warn("ES insert assertion failed for inode: %lu " 654 "ex_status [%d/%d/%llu/%c] != " 655 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 656 ee_block, ee_len, ee_start, 657 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 658 ext4_es_pblock(es), es_status ? 'u' : 'w'); 659 goto out; 660 } 661 662 if (ee_status ^ es_status) { 663 pr_warn("ES insert assertion failed for inode: %lu " 664 "ex_status [%d/%d/%llu/%c] != " 665 "es_status [%d/%d/%llu/%c]\n", inode->i_ino, 666 ee_block, ee_len, ee_start, 667 ee_status ? 'u' : 'w', es->es_lblk, es->es_len, 668 ext4_es_pblock(es), es_status ? 'u' : 'w'); 669 } 670 } else { 671 /* 672 * We can't find an extent on disk. So we need to make sure 673 * that we don't want to add an written/unwritten extent. 674 */ 675 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { 676 pr_warn("ES insert assertion failed for inode: %lu " 677 "can't find an extent at block %d but we want " 678 "to add a written/unwritten extent " 679 "[%d/%d/%llu/%x]\n", inode->i_ino, 680 es->es_lblk, es->es_lblk, es->es_len, 681 ext4_es_pblock(es), ext4_es_status(es)); 682 } 683 } 684 out: 685 ext4_free_ext_path(path); 686 } 687 688 static void ext4_es_insert_extent_ind_check(struct inode *inode, 689 struct extent_status *es) 690 { 691 struct ext4_map_blocks map; 692 int retval; 693 694 /* 695 * Here we call ext4_ind_map_blocks to lookup a block mapping because 696 * 'Indirect' structure is defined in indirect.c. So we couldn't 697 * access direct/indirect tree from outside. It is too dirty to define 698 * this function in indirect.c file. 699 */ 700 701 map.m_lblk = es->es_lblk; 702 map.m_len = es->es_len; 703 704 retval = ext4_ind_map_blocks(NULL, inode, &map, 0); 705 if (retval > 0) { 706 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { 707 /* 708 * We want to add a delayed/hole extent but this 709 * block has been allocated. 710 */ 711 pr_warn("ES insert assertion failed for inode: %lu " 712 "We can find blocks but we want to add a " 713 "delayed/hole extent [%d/%d/%llu/%x]\n", 714 inode->i_ino, es->es_lblk, es->es_len, 715 ext4_es_pblock(es), ext4_es_status(es)); 716 return; 717 } else if (ext4_es_is_written(es)) { 718 if (retval != es->es_len) { 719 pr_warn("ES insert assertion failed for " 720 "inode: %lu retval %d != es_len %d\n", 721 inode->i_ino, retval, es->es_len); 722 return; 723 } 724 if (map.m_pblk != ext4_es_pblock(es)) { 725 pr_warn("ES insert assertion failed for " 726 "inode: %lu m_pblk %llu != " 727 "es_pblk %llu\n", 728 inode->i_ino, map.m_pblk, 729 ext4_es_pblock(es)); 730 return; 731 } 732 } else { 733 /* 734 * We don't need to check unwritten extent because 735 * indirect-based file doesn't have it. 736 */ 737 BUG(); 738 } 739 } else if (retval == 0) { 740 if (ext4_es_is_written(es)) { 741 pr_warn("ES insert assertion failed for inode: %lu " 742 "We can't find the block but we want to add " 743 "a written extent [%d/%d/%llu/%x]\n", 744 inode->i_ino, es->es_lblk, es->es_len, 745 ext4_es_pblock(es), ext4_es_status(es)); 746 return; 747 } 748 } 749 } 750 751 static inline void ext4_es_insert_extent_check(struct inode *inode, 752 struct extent_status *es) 753 { 754 /* 755 * We don't need to worry about the race condition because 756 * caller takes i_data_sem locking. 757 */ 758 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 759 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 760 ext4_es_insert_extent_ext_check(inode, es); 761 else 762 ext4_es_insert_extent_ind_check(inode, es); 763 } 764 #else 765 static inline void ext4_es_insert_extent_check(struct inode *inode, 766 struct extent_status *es) 767 { 768 } 769 #endif 770 771 static int __es_insert_extent(struct inode *inode, struct extent_status *newes, 772 struct extent_status *prealloc) 773 { 774 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 775 struct rb_node **p = &tree->root.rb_node; 776 struct rb_node *parent = NULL; 777 struct extent_status *es; 778 779 while (*p) { 780 parent = *p; 781 es = rb_entry(parent, struct extent_status, rb_node); 782 783 if (newes->es_lblk < es->es_lblk) { 784 if (ext4_es_can_be_merged(newes, es)) { 785 /* 786 * Here we can modify es_lblk directly 787 * because it isn't overlapped. 788 */ 789 es->es_lblk = newes->es_lblk; 790 es->es_len += newes->es_len; 791 if (ext4_es_is_written(es) || 792 ext4_es_is_unwritten(es)) 793 ext4_es_store_pblock(es, 794 newes->es_pblk); 795 es = ext4_es_try_to_merge_left(inode, es); 796 goto out; 797 } 798 p = &(*p)->rb_left; 799 } else if (newes->es_lblk > ext4_es_end(es)) { 800 if (ext4_es_can_be_merged(es, newes)) { 801 es->es_len += newes->es_len; 802 es = ext4_es_try_to_merge_right(inode, es); 803 goto out; 804 } 805 p = &(*p)->rb_right; 806 } else { 807 BUG(); 808 return -EINVAL; 809 } 810 } 811 812 if (prealloc) 813 es = prealloc; 814 else 815 es = __es_alloc_extent(false); 816 if (!es) 817 return -ENOMEM; 818 ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, 819 newes->es_pblk); 820 821 rb_link_node(&es->rb_node, parent, p); 822 rb_insert_color(&es->rb_node, &tree->root); 823 824 out: 825 tree->cache_es = es; 826 return 0; 827 } 828 829 /* 830 * ext4_es_insert_extent() adds information to an inode's extent 831 * status tree. 832 */ 833 void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, 834 ext4_lblk_t len, ext4_fsblk_t pblk, 835 unsigned int status) 836 { 837 struct extent_status newes; 838 ext4_lblk_t end = lblk + len - 1; 839 int err1 = 0; 840 int err2 = 0; 841 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 842 struct extent_status *es1 = NULL; 843 struct extent_status *es2 = NULL; 844 845 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 846 return; 847 848 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", 849 lblk, len, pblk, status, inode->i_ino); 850 851 if (!len) 852 return; 853 854 BUG_ON(end < lblk); 855 856 if ((status & EXTENT_STATUS_DELAYED) && 857 (status & EXTENT_STATUS_WRITTEN)) { 858 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " 859 " delayed and written which can potentially " 860 " cause data loss.", lblk, len); 861 WARN_ON(1); 862 } 863 864 newes.es_lblk = lblk; 865 newes.es_len = len; 866 ext4_es_store_pblock_status(&newes, pblk, status); 867 trace_ext4_es_insert_extent(inode, &newes); 868 869 ext4_es_insert_extent_check(inode, &newes); 870 871 retry: 872 if (err1 && !es1) 873 es1 = __es_alloc_extent(true); 874 if ((err1 || err2) && !es2) 875 es2 = __es_alloc_extent(true); 876 write_lock(&EXT4_I(inode)->i_es_lock); 877 878 err1 = __es_remove_extent(inode, lblk, end, NULL, es1); 879 if (err1 != 0) 880 goto error; 881 /* Free preallocated extent if it didn't get used. */ 882 if (es1) { 883 if (!es1->es_len) 884 __es_free_extent(es1); 885 es1 = NULL; 886 } 887 888 err2 = __es_insert_extent(inode, &newes, es2); 889 if (err2 == -ENOMEM && !ext4_es_must_keep(&newes)) 890 err2 = 0; 891 if (err2 != 0) 892 goto error; 893 /* Free preallocated extent if it didn't get used. */ 894 if (es2) { 895 if (!es2->es_len) 896 __es_free_extent(es2); 897 es2 = NULL; 898 } 899 900 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && 901 (status & EXTENT_STATUS_WRITTEN || 902 status & EXTENT_STATUS_UNWRITTEN)) 903 __revise_pending(inode, lblk, len); 904 error: 905 write_unlock(&EXT4_I(inode)->i_es_lock); 906 if (err1 || err2) 907 goto retry; 908 909 ext4_es_print_tree(inode); 910 return; 911 } 912 913 /* 914 * ext4_es_cache_extent() inserts information into the extent status 915 * tree if and only if there isn't information about the range in 916 * question already. 917 */ 918 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, 919 ext4_lblk_t len, ext4_fsblk_t pblk, 920 unsigned int status) 921 { 922 struct extent_status *es; 923 struct extent_status newes; 924 ext4_lblk_t end = lblk + len - 1; 925 926 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 927 return; 928 929 newes.es_lblk = lblk; 930 newes.es_len = len; 931 ext4_es_store_pblock_status(&newes, pblk, status); 932 trace_ext4_es_cache_extent(inode, &newes); 933 934 if (!len) 935 return; 936 937 BUG_ON(end < lblk); 938 939 write_lock(&EXT4_I(inode)->i_es_lock); 940 941 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); 942 if (!es || es->es_lblk > end) 943 __es_insert_extent(inode, &newes, NULL); 944 write_unlock(&EXT4_I(inode)->i_es_lock); 945 } 946 947 /* 948 * ext4_es_lookup_extent() looks up an extent in extent status tree. 949 * 950 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. 951 * 952 * Return: 1 on found, 0 on not 953 */ 954 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 955 ext4_lblk_t *next_lblk, 956 struct extent_status *es) 957 { 958 struct ext4_es_tree *tree; 959 struct ext4_es_stats *stats; 960 struct extent_status *es1 = NULL; 961 struct rb_node *node; 962 int found = 0; 963 964 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 965 return 0; 966 967 trace_ext4_es_lookup_extent_enter(inode, lblk); 968 es_debug("lookup extent in block %u\n", lblk); 969 970 tree = &EXT4_I(inode)->i_es_tree; 971 read_lock(&EXT4_I(inode)->i_es_lock); 972 973 /* find extent in cache firstly */ 974 es->es_lblk = es->es_len = es->es_pblk = 0; 975 es1 = READ_ONCE(tree->cache_es); 976 if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { 977 es_debug("%u cached by [%u/%u)\n", 978 lblk, es1->es_lblk, es1->es_len); 979 found = 1; 980 goto out; 981 } 982 983 node = tree->root.rb_node; 984 while (node) { 985 es1 = rb_entry(node, struct extent_status, rb_node); 986 if (lblk < es1->es_lblk) 987 node = node->rb_left; 988 else if (lblk > ext4_es_end(es1)) 989 node = node->rb_right; 990 else { 991 found = 1; 992 break; 993 } 994 } 995 996 out: 997 stats = &EXT4_SB(inode->i_sb)->s_es_stats; 998 if (found) { 999 BUG_ON(!es1); 1000 es->es_lblk = es1->es_lblk; 1001 es->es_len = es1->es_len; 1002 es->es_pblk = es1->es_pblk; 1003 if (!ext4_es_is_referenced(es1)) 1004 ext4_es_set_referenced(es1); 1005 percpu_counter_inc(&stats->es_stats_cache_hits); 1006 if (next_lblk) { 1007 node = rb_next(&es1->rb_node); 1008 if (node) { 1009 es1 = rb_entry(node, struct extent_status, 1010 rb_node); 1011 *next_lblk = es1->es_lblk; 1012 } else 1013 *next_lblk = 0; 1014 } 1015 } else { 1016 percpu_counter_inc(&stats->es_stats_cache_misses); 1017 } 1018 1019 read_unlock(&EXT4_I(inode)->i_es_lock); 1020 1021 trace_ext4_es_lookup_extent_exit(inode, es, found); 1022 return found; 1023 } 1024 1025 struct rsvd_count { 1026 int ndelonly; 1027 bool first_do_lblk_found; 1028 ext4_lblk_t first_do_lblk; 1029 ext4_lblk_t last_do_lblk; 1030 struct extent_status *left_es; 1031 bool partial; 1032 ext4_lblk_t lclu; 1033 }; 1034 1035 /* 1036 * init_rsvd - initialize reserved count data before removing block range 1037 * in file from extent status tree 1038 * 1039 * @inode - file containing range 1040 * @lblk - first block in range 1041 * @es - pointer to first extent in range 1042 * @rc - pointer to reserved count data 1043 * 1044 * Assumes es is not NULL 1045 */ 1046 static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, 1047 struct extent_status *es, struct rsvd_count *rc) 1048 { 1049 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1050 struct rb_node *node; 1051 1052 rc->ndelonly = 0; 1053 1054 /* 1055 * for bigalloc, note the first delonly block in the range has not 1056 * been found, record the extent containing the block to the left of 1057 * the region to be removed, if any, and note that there's no partial 1058 * cluster to track 1059 */ 1060 if (sbi->s_cluster_ratio > 1) { 1061 rc->first_do_lblk_found = false; 1062 if (lblk > es->es_lblk) { 1063 rc->left_es = es; 1064 } else { 1065 node = rb_prev(&es->rb_node); 1066 rc->left_es = node ? rb_entry(node, 1067 struct extent_status, 1068 rb_node) : NULL; 1069 } 1070 rc->partial = false; 1071 } 1072 } 1073 1074 /* 1075 * count_rsvd - count the clusters containing delayed and not unwritten 1076 * (delonly) blocks in a range within an extent and add to 1077 * the running tally in rsvd_count 1078 * 1079 * @inode - file containing extent 1080 * @lblk - first block in range 1081 * @len - length of range in blocks 1082 * @es - pointer to extent containing clusters to be counted 1083 * @rc - pointer to reserved count data 1084 * 1085 * Tracks partial clusters found at the beginning and end of extents so 1086 * they aren't overcounted when they span adjacent extents 1087 */ 1088 static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, 1089 struct extent_status *es, struct rsvd_count *rc) 1090 { 1091 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1092 ext4_lblk_t i, end, nclu; 1093 1094 if (!ext4_es_is_delonly(es)) 1095 return; 1096 1097 WARN_ON(len <= 0); 1098 1099 if (sbi->s_cluster_ratio == 1) { 1100 rc->ndelonly += (int) len; 1101 return; 1102 } 1103 1104 /* bigalloc */ 1105 1106 i = (lblk < es->es_lblk) ? es->es_lblk : lblk; 1107 end = lblk + (ext4_lblk_t) len - 1; 1108 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; 1109 1110 /* record the first block of the first delonly extent seen */ 1111 if (!rc->first_do_lblk_found) { 1112 rc->first_do_lblk = i; 1113 rc->first_do_lblk_found = true; 1114 } 1115 1116 /* update the last lblk in the region seen so far */ 1117 rc->last_do_lblk = end; 1118 1119 /* 1120 * if we're tracking a partial cluster and the current extent 1121 * doesn't start with it, count it and stop tracking 1122 */ 1123 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { 1124 rc->ndelonly++; 1125 rc->partial = false; 1126 } 1127 1128 /* 1129 * if the first cluster doesn't start on a cluster boundary but 1130 * ends on one, count it 1131 */ 1132 if (EXT4_LBLK_COFF(sbi, i) != 0) { 1133 if (end >= EXT4_LBLK_CFILL(sbi, i)) { 1134 rc->ndelonly++; 1135 rc->partial = false; 1136 i = EXT4_LBLK_CFILL(sbi, i) + 1; 1137 } 1138 } 1139 1140 /* 1141 * if the current cluster starts on a cluster boundary, count the 1142 * number of whole delonly clusters in the extent 1143 */ 1144 if ((i + sbi->s_cluster_ratio - 1) <= end) { 1145 nclu = (end - i + 1) >> sbi->s_cluster_bits; 1146 rc->ndelonly += nclu; 1147 i += nclu << sbi->s_cluster_bits; 1148 } 1149 1150 /* 1151 * start tracking a partial cluster if there's a partial at the end 1152 * of the current extent and we're not already tracking one 1153 */ 1154 if (!rc->partial && i <= end) { 1155 rc->partial = true; 1156 rc->lclu = EXT4_B2C(sbi, i); 1157 } 1158 } 1159 1160 /* 1161 * __pr_tree_search - search for a pending cluster reservation 1162 * 1163 * @root - root of pending reservation tree 1164 * @lclu - logical cluster to search for 1165 * 1166 * Returns the pending reservation for the cluster identified by @lclu 1167 * if found. If not, returns a reservation for the next cluster if any, 1168 * and if not, returns NULL. 1169 */ 1170 static struct pending_reservation *__pr_tree_search(struct rb_root *root, 1171 ext4_lblk_t lclu) 1172 { 1173 struct rb_node *node = root->rb_node; 1174 struct pending_reservation *pr = NULL; 1175 1176 while (node) { 1177 pr = rb_entry(node, struct pending_reservation, rb_node); 1178 if (lclu < pr->lclu) 1179 node = node->rb_left; 1180 else if (lclu > pr->lclu) 1181 node = node->rb_right; 1182 else 1183 return pr; 1184 } 1185 if (pr && lclu < pr->lclu) 1186 return pr; 1187 if (pr && lclu > pr->lclu) { 1188 node = rb_next(&pr->rb_node); 1189 return node ? rb_entry(node, struct pending_reservation, 1190 rb_node) : NULL; 1191 } 1192 return NULL; 1193 } 1194 1195 /* 1196 * get_rsvd - calculates and returns the number of cluster reservations to be 1197 * released when removing a block range from the extent status tree 1198 * and releases any pending reservations within the range 1199 * 1200 * @inode - file containing block range 1201 * @end - last block in range 1202 * @right_es - pointer to extent containing next block beyond end or NULL 1203 * @rc - pointer to reserved count data 1204 * 1205 * The number of reservations to be released is equal to the number of 1206 * clusters containing delayed and not unwritten (delonly) blocks within 1207 * the range, minus the number of clusters still containing delonly blocks 1208 * at the ends of the range, and minus the number of pending reservations 1209 * within the range. 1210 */ 1211 static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, 1212 struct extent_status *right_es, 1213 struct rsvd_count *rc) 1214 { 1215 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1216 struct pending_reservation *pr; 1217 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1218 struct rb_node *node; 1219 ext4_lblk_t first_lclu, last_lclu; 1220 bool left_delonly, right_delonly, count_pending; 1221 struct extent_status *es; 1222 1223 if (sbi->s_cluster_ratio > 1) { 1224 /* count any remaining partial cluster */ 1225 if (rc->partial) 1226 rc->ndelonly++; 1227 1228 if (rc->ndelonly == 0) 1229 return 0; 1230 1231 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); 1232 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); 1233 1234 /* 1235 * decrease the delonly count by the number of clusters at the 1236 * ends of the range that still contain delonly blocks - 1237 * these clusters still need to be reserved 1238 */ 1239 left_delonly = right_delonly = false; 1240 1241 es = rc->left_es; 1242 while (es && ext4_es_end(es) >= 1243 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { 1244 if (ext4_es_is_delonly(es)) { 1245 rc->ndelonly--; 1246 left_delonly = true; 1247 break; 1248 } 1249 node = rb_prev(&es->rb_node); 1250 if (!node) 1251 break; 1252 es = rb_entry(node, struct extent_status, rb_node); 1253 } 1254 if (right_es && (!left_delonly || first_lclu != last_lclu)) { 1255 if (end < ext4_es_end(right_es)) { 1256 es = right_es; 1257 } else { 1258 node = rb_next(&right_es->rb_node); 1259 es = node ? rb_entry(node, struct extent_status, 1260 rb_node) : NULL; 1261 } 1262 while (es && es->es_lblk <= 1263 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { 1264 if (ext4_es_is_delonly(es)) { 1265 rc->ndelonly--; 1266 right_delonly = true; 1267 break; 1268 } 1269 node = rb_next(&es->rb_node); 1270 if (!node) 1271 break; 1272 es = rb_entry(node, struct extent_status, 1273 rb_node); 1274 } 1275 } 1276 1277 /* 1278 * Determine the block range that should be searched for 1279 * pending reservations, if any. Clusters on the ends of the 1280 * original removed range containing delonly blocks are 1281 * excluded. They've already been accounted for and it's not 1282 * possible to determine if an associated pending reservation 1283 * should be released with the information available in the 1284 * extents status tree. 1285 */ 1286 if (first_lclu == last_lclu) { 1287 if (left_delonly | right_delonly) 1288 count_pending = false; 1289 else 1290 count_pending = true; 1291 } else { 1292 if (left_delonly) 1293 first_lclu++; 1294 if (right_delonly) 1295 last_lclu--; 1296 if (first_lclu <= last_lclu) 1297 count_pending = true; 1298 else 1299 count_pending = false; 1300 } 1301 1302 /* 1303 * a pending reservation found between first_lclu and last_lclu 1304 * represents an allocated cluster that contained at least one 1305 * delonly block, so the delonly total must be reduced by one 1306 * for each pending reservation found and released 1307 */ 1308 if (count_pending) { 1309 pr = __pr_tree_search(&tree->root, first_lclu); 1310 while (pr && pr->lclu <= last_lclu) { 1311 rc->ndelonly--; 1312 node = rb_next(&pr->rb_node); 1313 rb_erase(&pr->rb_node, &tree->root); 1314 kmem_cache_free(ext4_pending_cachep, pr); 1315 if (!node) 1316 break; 1317 pr = rb_entry(node, struct pending_reservation, 1318 rb_node); 1319 } 1320 } 1321 } 1322 return rc->ndelonly; 1323 } 1324 1325 1326 /* 1327 * __es_remove_extent - removes block range from extent status tree 1328 * 1329 * @inode - file containing range 1330 * @lblk - first block in range 1331 * @end - last block in range 1332 * @reserved - number of cluster reservations released 1333 * @prealloc - pre-allocated es to avoid memory allocation failures 1334 * 1335 * If @reserved is not NULL and delayed allocation is enabled, counts 1336 * block/cluster reservations freed by removing range and if bigalloc 1337 * enabled cancels pending reservations as needed. Returns 0 on success, 1338 * error code on failure. 1339 */ 1340 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1341 ext4_lblk_t end, int *reserved, 1342 struct extent_status *prealloc) 1343 { 1344 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 1345 struct rb_node *node; 1346 struct extent_status *es; 1347 struct extent_status orig_es; 1348 ext4_lblk_t len1, len2; 1349 ext4_fsblk_t block; 1350 int err = 0; 1351 bool count_reserved = true; 1352 struct rsvd_count rc; 1353 1354 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) 1355 count_reserved = false; 1356 1357 es = __es_tree_search(&tree->root, lblk); 1358 if (!es) 1359 goto out; 1360 if (es->es_lblk > end) 1361 goto out; 1362 1363 /* Simply invalidate cache_es. */ 1364 tree->cache_es = NULL; 1365 if (count_reserved) 1366 init_rsvd(inode, lblk, es, &rc); 1367 1368 orig_es.es_lblk = es->es_lblk; 1369 orig_es.es_len = es->es_len; 1370 orig_es.es_pblk = es->es_pblk; 1371 1372 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; 1373 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; 1374 if (len1 > 0) 1375 es->es_len = len1; 1376 if (len2 > 0) { 1377 if (len1 > 0) { 1378 struct extent_status newes; 1379 1380 newes.es_lblk = end + 1; 1381 newes.es_len = len2; 1382 block = 0x7FDEADBEEFULL; 1383 if (ext4_es_is_written(&orig_es) || 1384 ext4_es_is_unwritten(&orig_es)) 1385 block = ext4_es_pblock(&orig_es) + 1386 orig_es.es_len - len2; 1387 ext4_es_store_pblock_status(&newes, block, 1388 ext4_es_status(&orig_es)); 1389 err = __es_insert_extent(inode, &newes, prealloc); 1390 if (err) { 1391 if (!ext4_es_must_keep(&newes)) 1392 return 0; 1393 1394 es->es_lblk = orig_es.es_lblk; 1395 es->es_len = orig_es.es_len; 1396 goto out; 1397 } 1398 } else { 1399 es->es_lblk = end + 1; 1400 es->es_len = len2; 1401 if (ext4_es_is_written(es) || 1402 ext4_es_is_unwritten(es)) { 1403 block = orig_es.es_pblk + orig_es.es_len - len2; 1404 ext4_es_store_pblock(es, block); 1405 } 1406 } 1407 if (count_reserved) 1408 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, 1409 &orig_es, &rc); 1410 goto out_get_reserved; 1411 } 1412 1413 if (len1 > 0) { 1414 if (count_reserved) 1415 count_rsvd(inode, lblk, orig_es.es_len - len1, 1416 &orig_es, &rc); 1417 node = rb_next(&es->rb_node); 1418 if (node) 1419 es = rb_entry(node, struct extent_status, rb_node); 1420 else 1421 es = NULL; 1422 } 1423 1424 while (es && ext4_es_end(es) <= end) { 1425 if (count_reserved) 1426 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); 1427 node = rb_next(&es->rb_node); 1428 rb_erase(&es->rb_node, &tree->root); 1429 ext4_es_free_extent(inode, es); 1430 if (!node) { 1431 es = NULL; 1432 break; 1433 } 1434 es = rb_entry(node, struct extent_status, rb_node); 1435 } 1436 1437 if (es && es->es_lblk < end + 1) { 1438 ext4_lblk_t orig_len = es->es_len; 1439 1440 len1 = ext4_es_end(es) - end; 1441 if (count_reserved) 1442 count_rsvd(inode, es->es_lblk, orig_len - len1, 1443 es, &rc); 1444 es->es_lblk = end + 1; 1445 es->es_len = len1; 1446 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { 1447 block = es->es_pblk + orig_len - len1; 1448 ext4_es_store_pblock(es, block); 1449 } 1450 } 1451 1452 out_get_reserved: 1453 if (count_reserved) 1454 *reserved = get_rsvd(inode, end, es, &rc); 1455 out: 1456 return err; 1457 } 1458 1459 /* 1460 * ext4_es_remove_extent - removes block range from extent status tree 1461 * 1462 * @inode - file containing range 1463 * @lblk - first block in range 1464 * @len - number of blocks to remove 1465 * 1466 * Reduces block/cluster reservation count and for bigalloc cancels pending 1467 * reservations as needed. 1468 */ 1469 void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 1470 ext4_lblk_t len) 1471 { 1472 ext4_lblk_t end; 1473 int err = 0; 1474 int reserved = 0; 1475 struct extent_status *es = NULL; 1476 1477 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 1478 return; 1479 1480 trace_ext4_es_remove_extent(inode, lblk, len); 1481 es_debug("remove [%u/%u) from extent status tree of inode %lu\n", 1482 lblk, len, inode->i_ino); 1483 1484 if (!len) 1485 return; 1486 1487 end = lblk + len - 1; 1488 BUG_ON(end < lblk); 1489 1490 retry: 1491 if (err && !es) 1492 es = __es_alloc_extent(true); 1493 /* 1494 * ext4_clear_inode() depends on us taking i_es_lock unconditionally 1495 * so that we are sure __es_shrink() is done with the inode before it 1496 * is reclaimed. 1497 */ 1498 write_lock(&EXT4_I(inode)->i_es_lock); 1499 err = __es_remove_extent(inode, lblk, end, &reserved, es); 1500 /* Free preallocated extent if it didn't get used. */ 1501 if (es) { 1502 if (!es->es_len) 1503 __es_free_extent(es); 1504 es = NULL; 1505 } 1506 write_unlock(&EXT4_I(inode)->i_es_lock); 1507 if (err) 1508 goto retry; 1509 1510 ext4_es_print_tree(inode); 1511 ext4_da_release_space(inode, reserved); 1512 return; 1513 } 1514 1515 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 1516 struct ext4_inode_info *locked_ei) 1517 { 1518 struct ext4_inode_info *ei; 1519 struct ext4_es_stats *es_stats; 1520 ktime_t start_time; 1521 u64 scan_time; 1522 int nr_to_walk; 1523 int nr_shrunk = 0; 1524 int retried = 0, nr_skipped = 0; 1525 1526 es_stats = &sbi->s_es_stats; 1527 start_time = ktime_get(); 1528 1529 retry: 1530 spin_lock(&sbi->s_es_lock); 1531 nr_to_walk = sbi->s_es_nr_inode; 1532 while (nr_to_walk-- > 0) { 1533 if (list_empty(&sbi->s_es_list)) { 1534 spin_unlock(&sbi->s_es_lock); 1535 goto out; 1536 } 1537 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, 1538 i_es_list); 1539 /* Move the inode to the tail */ 1540 list_move_tail(&ei->i_es_list, &sbi->s_es_list); 1541 1542 /* 1543 * Normally we try hard to avoid shrinking precached inodes, 1544 * but we will as a last resort. 1545 */ 1546 if (!retried && ext4_test_inode_state(&ei->vfs_inode, 1547 EXT4_STATE_EXT_PRECACHED)) { 1548 nr_skipped++; 1549 continue; 1550 } 1551 1552 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { 1553 nr_skipped++; 1554 continue; 1555 } 1556 /* 1557 * Now we hold i_es_lock which protects us from inode reclaim 1558 * freeing inode under us 1559 */ 1560 spin_unlock(&sbi->s_es_lock); 1561 1562 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); 1563 write_unlock(&ei->i_es_lock); 1564 1565 if (nr_to_scan <= 0) 1566 goto out; 1567 spin_lock(&sbi->s_es_lock); 1568 } 1569 spin_unlock(&sbi->s_es_lock); 1570 1571 /* 1572 * If we skipped any inodes, and we weren't able to make any 1573 * forward progress, try again to scan precached inodes. 1574 */ 1575 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1576 retried++; 1577 goto retry; 1578 } 1579 1580 if (locked_ei && nr_shrunk == 0) 1581 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); 1582 1583 out: 1584 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1585 if (likely(es_stats->es_stats_scan_time)) 1586 es_stats->es_stats_scan_time = (scan_time + 1587 es_stats->es_stats_scan_time*3) / 4; 1588 else 1589 es_stats->es_stats_scan_time = scan_time; 1590 if (scan_time > es_stats->es_stats_max_scan_time) 1591 es_stats->es_stats_max_scan_time = scan_time; 1592 if (likely(es_stats->es_stats_shrunk)) 1593 es_stats->es_stats_shrunk = (nr_shrunk + 1594 es_stats->es_stats_shrunk*3) / 4; 1595 else 1596 es_stats->es_stats_shrunk = nr_shrunk; 1597 1598 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, 1599 nr_skipped, retried); 1600 return nr_shrunk; 1601 } 1602 1603 static unsigned long ext4_es_count(struct shrinker *shrink, 1604 struct shrink_control *sc) 1605 { 1606 unsigned long nr; 1607 struct ext4_sb_info *sbi; 1608 1609 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1610 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1611 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1612 return nr; 1613 } 1614 1615 static unsigned long ext4_es_scan(struct shrinker *shrink, 1616 struct shrink_control *sc) 1617 { 1618 struct ext4_sb_info *sbi = container_of(shrink, 1619 struct ext4_sb_info, s_es_shrinker); 1620 int nr_to_scan = sc->nr_to_scan; 1621 int ret, nr_shrunk; 1622 1623 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1624 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1625 1626 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); 1627 1628 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); 1629 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1630 return nr_shrunk; 1631 } 1632 1633 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) 1634 { 1635 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); 1636 struct ext4_es_stats *es_stats = &sbi->s_es_stats; 1637 struct ext4_inode_info *ei, *max = NULL; 1638 unsigned int inode_cnt = 0; 1639 1640 if (v != SEQ_START_TOKEN) 1641 return 0; 1642 1643 /* here we just find an inode that has the max nr. of objects */ 1644 spin_lock(&sbi->s_es_lock); 1645 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { 1646 inode_cnt++; 1647 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1648 max = ei; 1649 else if (!max) 1650 max = ei; 1651 } 1652 spin_unlock(&sbi->s_es_lock); 1653 1654 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1655 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1656 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); 1657 seq_printf(seq, " %lld/%lld cache hits/misses\n", 1658 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), 1659 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); 1660 if (inode_cnt) 1661 seq_printf(seq, " %d inodes on list\n", inode_cnt); 1662 1663 seq_printf(seq, "average:\n %llu us scan time\n", 1664 div_u64(es_stats->es_stats_scan_time, 1000)); 1665 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); 1666 if (inode_cnt) 1667 seq_printf(seq, 1668 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1669 " %llu us max scan time\n", 1670 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, 1671 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1672 1673 return 0; 1674 } 1675 1676 int ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1677 { 1678 int err; 1679 1680 /* Make sure we have enough bits for physical block number */ 1681 BUILD_BUG_ON(ES_SHIFT < 48); 1682 INIT_LIST_HEAD(&sbi->s_es_list); 1683 sbi->s_es_nr_inode = 0; 1684 spin_lock_init(&sbi->s_es_lock); 1685 sbi->s_es_stats.es_stats_shrunk = 0; 1686 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, 1687 GFP_KERNEL); 1688 if (err) 1689 return err; 1690 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, 1691 GFP_KERNEL); 1692 if (err) 1693 goto err1; 1694 sbi->s_es_stats.es_stats_scan_time = 0; 1695 sbi->s_es_stats.es_stats_max_scan_time = 0; 1696 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1697 if (err) 1698 goto err2; 1699 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); 1700 if (err) 1701 goto err3; 1702 1703 sbi->s_es_shrinker.scan_objects = ext4_es_scan; 1704 sbi->s_es_shrinker.count_objects = ext4_es_count; 1705 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1706 err = register_shrinker(&sbi->s_es_shrinker, "ext4-es:%s", 1707 sbi->s_sb->s_id); 1708 if (err) 1709 goto err4; 1710 1711 return 0; 1712 err4: 1713 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1714 err3: 1715 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1716 err2: 1717 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1718 err1: 1719 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1720 return err; 1721 } 1722 1723 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1724 { 1725 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); 1726 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); 1727 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1728 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); 1729 unregister_shrinker(&sbi->s_es_shrinker); 1730 } 1731 1732 /* 1733 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at 1734 * most *nr_to_scan extents, update *nr_to_scan accordingly. 1735 * 1736 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. 1737 * Increment *nr_shrunk by the number of reclaimed extents. Also update 1738 * ei->i_es_shrink_lblk to where we should continue scanning. 1739 */ 1740 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, 1741 int *nr_to_scan, int *nr_shrunk) 1742 { 1743 struct inode *inode = &ei->vfs_inode; 1744 struct ext4_es_tree *tree = &ei->i_es_tree; 1745 struct extent_status *es; 1746 struct rb_node *node; 1747 1748 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); 1749 if (!es) 1750 goto out_wrap; 1751 1752 while (*nr_to_scan > 0) { 1753 if (es->es_lblk > end) { 1754 ei->i_es_shrink_lblk = end + 1; 1755 return 0; 1756 } 1757 1758 (*nr_to_scan)--; 1759 node = rb_next(&es->rb_node); 1760 1761 if (ext4_es_must_keep(es)) 1762 goto next; 1763 if (ext4_es_is_referenced(es)) { 1764 ext4_es_clear_referenced(es); 1765 goto next; 1766 } 1767 1768 rb_erase(&es->rb_node, &tree->root); 1769 ext4_es_free_extent(inode, es); 1770 (*nr_shrunk)++; 1771 next: 1772 if (!node) 1773 goto out_wrap; 1774 es = rb_entry(node, struct extent_status, rb_node); 1775 } 1776 ei->i_es_shrink_lblk = es->es_lblk; 1777 return 1; 1778 out_wrap: 1779 ei->i_es_shrink_lblk = 0; 1780 return 0; 1781 } 1782 1783 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) 1784 { 1785 struct inode *inode = &ei->vfs_inode; 1786 int nr_shrunk = 0; 1787 ext4_lblk_t start = ei->i_es_shrink_lblk; 1788 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1789 DEFAULT_RATELIMIT_BURST); 1790 1791 if (ei->i_es_shk_nr == 0) 1792 return 0; 1793 1794 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1795 __ratelimit(&_rs)) 1796 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1797 1798 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && 1799 start != 0) 1800 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); 1801 1802 ei->i_es_tree.cache_es = NULL; 1803 return nr_shrunk; 1804 } 1805 1806 /* 1807 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove 1808 * discretionary entries from the extent status cache. (Some entries 1809 * must be present for proper operations.) 1810 */ 1811 void ext4_clear_inode_es(struct inode *inode) 1812 { 1813 struct ext4_inode_info *ei = EXT4_I(inode); 1814 struct extent_status *es; 1815 struct ext4_es_tree *tree; 1816 struct rb_node *node; 1817 1818 write_lock(&ei->i_es_lock); 1819 tree = &EXT4_I(inode)->i_es_tree; 1820 tree->cache_es = NULL; 1821 node = rb_first(&tree->root); 1822 while (node) { 1823 es = rb_entry(node, struct extent_status, rb_node); 1824 node = rb_next(node); 1825 if (!ext4_es_must_keep(es)) { 1826 rb_erase(&es->rb_node, &tree->root); 1827 ext4_es_free_extent(inode, es); 1828 } 1829 } 1830 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 1831 write_unlock(&ei->i_es_lock); 1832 } 1833 1834 #ifdef ES_DEBUG__ 1835 static void ext4_print_pending_tree(struct inode *inode) 1836 { 1837 struct ext4_pending_tree *tree; 1838 struct rb_node *node; 1839 struct pending_reservation *pr; 1840 1841 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); 1842 tree = &EXT4_I(inode)->i_pending_tree; 1843 node = rb_first(&tree->root); 1844 while (node) { 1845 pr = rb_entry(node, struct pending_reservation, rb_node); 1846 printk(KERN_DEBUG " %u", pr->lclu); 1847 node = rb_next(node); 1848 } 1849 printk(KERN_DEBUG "\n"); 1850 } 1851 #else 1852 #define ext4_print_pending_tree(inode) 1853 #endif 1854 1855 int __init ext4_init_pending(void) 1856 { 1857 ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); 1858 if (ext4_pending_cachep == NULL) 1859 return -ENOMEM; 1860 return 0; 1861 } 1862 1863 void ext4_exit_pending(void) 1864 { 1865 kmem_cache_destroy(ext4_pending_cachep); 1866 } 1867 1868 void ext4_init_pending_tree(struct ext4_pending_tree *tree) 1869 { 1870 tree->root = RB_ROOT; 1871 } 1872 1873 /* 1874 * __get_pending - retrieve a pointer to a pending reservation 1875 * 1876 * @inode - file containing the pending cluster reservation 1877 * @lclu - logical cluster of interest 1878 * 1879 * Returns a pointer to a pending reservation if it's a member of 1880 * the set, and NULL if not. Must be called holding i_es_lock. 1881 */ 1882 static struct pending_reservation *__get_pending(struct inode *inode, 1883 ext4_lblk_t lclu) 1884 { 1885 struct ext4_pending_tree *tree; 1886 struct rb_node *node; 1887 struct pending_reservation *pr = NULL; 1888 1889 tree = &EXT4_I(inode)->i_pending_tree; 1890 node = (&tree->root)->rb_node; 1891 1892 while (node) { 1893 pr = rb_entry(node, struct pending_reservation, rb_node); 1894 if (lclu < pr->lclu) 1895 node = node->rb_left; 1896 else if (lclu > pr->lclu) 1897 node = node->rb_right; 1898 else if (lclu == pr->lclu) 1899 return pr; 1900 } 1901 return NULL; 1902 } 1903 1904 /* 1905 * __insert_pending - adds a pending cluster reservation to the set of 1906 * pending reservations 1907 * 1908 * @inode - file containing the cluster 1909 * @lblk - logical block in the cluster to be added 1910 * 1911 * Returns 0 on successful insertion and -ENOMEM on failure. If the 1912 * pending reservation is already in the set, returns successfully. 1913 */ 1914 static int __insert_pending(struct inode *inode, ext4_lblk_t lblk) 1915 { 1916 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1917 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; 1918 struct rb_node **p = &tree->root.rb_node; 1919 struct rb_node *parent = NULL; 1920 struct pending_reservation *pr; 1921 ext4_lblk_t lclu; 1922 int ret = 0; 1923 1924 lclu = EXT4_B2C(sbi, lblk); 1925 /* search to find parent for insertion */ 1926 while (*p) { 1927 parent = *p; 1928 pr = rb_entry(parent, struct pending_reservation, rb_node); 1929 1930 if (lclu < pr->lclu) { 1931 p = &(*p)->rb_left; 1932 } else if (lclu > pr->lclu) { 1933 p = &(*p)->rb_right; 1934 } else { 1935 /* pending reservation already inserted */ 1936 goto out; 1937 } 1938 } 1939 1940 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); 1941 if (pr == NULL) { 1942 ret = -ENOMEM; 1943 goto out; 1944 } 1945 pr->lclu = lclu; 1946 1947 rb_link_node(&pr->rb_node, parent, p); 1948 rb_insert_color(&pr->rb_node, &tree->root); 1949 1950 out: 1951 return ret; 1952 } 1953 1954 /* 1955 * __remove_pending - removes a pending cluster reservation from the set 1956 * of pending reservations 1957 * 1958 * @inode - file containing the cluster 1959 * @lblk - logical block in the pending cluster reservation to be removed 1960 * 1961 * Returns successfully if pending reservation is not a member of the set. 1962 */ 1963 static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) 1964 { 1965 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1966 struct pending_reservation *pr; 1967 struct ext4_pending_tree *tree; 1968 1969 pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); 1970 if (pr != NULL) { 1971 tree = &EXT4_I(inode)->i_pending_tree; 1972 rb_erase(&pr->rb_node, &tree->root); 1973 kmem_cache_free(ext4_pending_cachep, pr); 1974 } 1975 } 1976 1977 /* 1978 * ext4_remove_pending - removes a pending cluster reservation from the set 1979 * of pending reservations 1980 * 1981 * @inode - file containing the cluster 1982 * @lblk - logical block in the pending cluster reservation to be removed 1983 * 1984 * Locking for external use of __remove_pending. 1985 */ 1986 void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) 1987 { 1988 struct ext4_inode_info *ei = EXT4_I(inode); 1989 1990 write_lock(&ei->i_es_lock); 1991 __remove_pending(inode, lblk); 1992 write_unlock(&ei->i_es_lock); 1993 } 1994 1995 /* 1996 * ext4_is_pending - determine whether a cluster has a pending reservation 1997 * on it 1998 * 1999 * @inode - file containing the cluster 2000 * @lblk - logical block in the cluster 2001 * 2002 * Returns true if there's a pending reservation for the cluster in the 2003 * set of pending reservations, and false if not. 2004 */ 2005 bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) 2006 { 2007 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2008 struct ext4_inode_info *ei = EXT4_I(inode); 2009 bool ret; 2010 2011 read_lock(&ei->i_es_lock); 2012 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); 2013 read_unlock(&ei->i_es_lock); 2014 2015 return ret; 2016 } 2017 2018 /* 2019 * ext4_es_insert_delayed_block - adds a delayed block to the extents status 2020 * tree, adding a pending reservation where 2021 * needed 2022 * 2023 * @inode - file containing the newly added block 2024 * @lblk - logical block to be added 2025 * @allocated - indicates whether a physical cluster has been allocated for 2026 * the logical cluster that contains the block 2027 */ 2028 void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk, 2029 bool allocated) 2030 { 2031 struct extent_status newes; 2032 int err1 = 0; 2033 int err2 = 0; 2034 struct extent_status *es1 = NULL; 2035 struct extent_status *es2 = NULL; 2036 2037 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) 2038 return; 2039 2040 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n", 2041 lblk, inode->i_ino); 2042 2043 newes.es_lblk = lblk; 2044 newes.es_len = 1; 2045 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); 2046 trace_ext4_es_insert_delayed_block(inode, &newes, allocated); 2047 2048 ext4_es_insert_extent_check(inode, &newes); 2049 2050 retry: 2051 if (err1 && !es1) 2052 es1 = __es_alloc_extent(true); 2053 if ((err1 || err2) && !es2) 2054 es2 = __es_alloc_extent(true); 2055 write_lock(&EXT4_I(inode)->i_es_lock); 2056 2057 err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1); 2058 if (err1 != 0) 2059 goto error; 2060 /* Free preallocated extent if it didn't get used. */ 2061 if (es1) { 2062 if (!es1->es_len) 2063 __es_free_extent(es1); 2064 es1 = NULL; 2065 } 2066 2067 err2 = __es_insert_extent(inode, &newes, es2); 2068 if (err2 != 0) 2069 goto error; 2070 /* Free preallocated extent if it didn't get used. */ 2071 if (es2) { 2072 if (!es2->es_len) 2073 __es_free_extent(es2); 2074 es2 = NULL; 2075 } 2076 2077 if (allocated) 2078 __insert_pending(inode, lblk); 2079 error: 2080 write_unlock(&EXT4_I(inode)->i_es_lock); 2081 if (err1 || err2) 2082 goto retry; 2083 2084 ext4_es_print_tree(inode); 2085 ext4_print_pending_tree(inode); 2086 return; 2087 } 2088 2089 /* 2090 * __es_delayed_clu - count number of clusters containing blocks that 2091 * are delayed only 2092 * 2093 * @inode - file containing block range 2094 * @start - logical block defining start of range 2095 * @end - logical block defining end of range 2096 * 2097 * Returns the number of clusters containing only delayed (not delayed 2098 * and unwritten) blocks in the range specified by @start and @end. Any 2099 * cluster or part of a cluster within the range and containing a delayed 2100 * and not unwritten block within the range is counted as a whole cluster. 2101 */ 2102 static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, 2103 ext4_lblk_t end) 2104 { 2105 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; 2106 struct extent_status *es; 2107 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2108 struct rb_node *node; 2109 ext4_lblk_t first_lclu, last_lclu; 2110 unsigned long long last_counted_lclu; 2111 unsigned int n = 0; 2112 2113 /* guaranteed to be unequal to any ext4_lblk_t value */ 2114 last_counted_lclu = ~0ULL; 2115 2116 es = __es_tree_search(&tree->root, start); 2117 2118 while (es && (es->es_lblk <= end)) { 2119 if (ext4_es_is_delonly(es)) { 2120 if (es->es_lblk <= start) 2121 first_lclu = EXT4_B2C(sbi, start); 2122 else 2123 first_lclu = EXT4_B2C(sbi, es->es_lblk); 2124 2125 if (ext4_es_end(es) >= end) 2126 last_lclu = EXT4_B2C(sbi, end); 2127 else 2128 last_lclu = EXT4_B2C(sbi, ext4_es_end(es)); 2129 2130 if (first_lclu == last_counted_lclu) 2131 n += last_lclu - first_lclu; 2132 else 2133 n += last_lclu - first_lclu + 1; 2134 last_counted_lclu = last_lclu; 2135 } 2136 node = rb_next(&es->rb_node); 2137 if (!node) 2138 break; 2139 es = rb_entry(node, struct extent_status, rb_node); 2140 } 2141 2142 return n; 2143 } 2144 2145 /* 2146 * ext4_es_delayed_clu - count number of clusters containing blocks that 2147 * are both delayed and unwritten 2148 * 2149 * @inode - file containing block range 2150 * @lblk - logical block defining start of range 2151 * @len - number of blocks in range 2152 * 2153 * Locking for external use of __es_delayed_clu(). 2154 */ 2155 unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk, 2156 ext4_lblk_t len) 2157 { 2158 struct ext4_inode_info *ei = EXT4_I(inode); 2159 ext4_lblk_t end; 2160 unsigned int n; 2161 2162 if (len == 0) 2163 return 0; 2164 2165 end = lblk + len - 1; 2166 WARN_ON(end < lblk); 2167 2168 read_lock(&ei->i_es_lock); 2169 2170 n = __es_delayed_clu(inode, lblk, end); 2171 2172 read_unlock(&ei->i_es_lock); 2173 2174 return n; 2175 } 2176 2177 /* 2178 * __revise_pending - makes, cancels, or leaves unchanged pending cluster 2179 * reservations for a specified block range depending 2180 * upon the presence or absence of delayed blocks 2181 * outside the range within clusters at the ends of the 2182 * range 2183 * 2184 * @inode - file containing the range 2185 * @lblk - logical block defining the start of range 2186 * @len - length of range in blocks 2187 * 2188 * Used after a newly allocated extent is added to the extents status tree. 2189 * Requires that the extents in the range have either written or unwritten 2190 * status. Must be called while holding i_es_lock. 2191 */ 2192 static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, 2193 ext4_lblk_t len) 2194 { 2195 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2196 ext4_lblk_t end = lblk + len - 1; 2197 ext4_lblk_t first, last; 2198 bool f_del = false, l_del = false; 2199 2200 if (len == 0) 2201 return; 2202 2203 /* 2204 * Two cases - block range within single cluster and block range 2205 * spanning two or more clusters. Note that a cluster belonging 2206 * to a range starting and/or ending on a cluster boundary is treated 2207 * as if it does not contain a delayed extent. The new range may 2208 * have allocated space for previously delayed blocks out to the 2209 * cluster boundary, requiring that any pre-existing pending 2210 * reservation be canceled. Because this code only looks at blocks 2211 * outside the range, it should revise pending reservations 2212 * correctly even if the extent represented by the range can't be 2213 * inserted in the extents status tree due to ENOSPC. 2214 */ 2215 2216 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { 2217 first = EXT4_LBLK_CMASK(sbi, lblk); 2218 if (first != lblk) 2219 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2220 first, lblk - 1); 2221 if (f_del) { 2222 __insert_pending(inode, first); 2223 } else { 2224 last = EXT4_LBLK_CMASK(sbi, end) + 2225 sbi->s_cluster_ratio - 1; 2226 if (last != end) 2227 l_del = __es_scan_range(inode, 2228 &ext4_es_is_delonly, 2229 end + 1, last); 2230 if (l_del) 2231 __insert_pending(inode, last); 2232 else 2233 __remove_pending(inode, last); 2234 } 2235 } else { 2236 first = EXT4_LBLK_CMASK(sbi, lblk); 2237 if (first != lblk) 2238 f_del = __es_scan_range(inode, &ext4_es_is_delonly, 2239 first, lblk - 1); 2240 if (f_del) 2241 __insert_pending(inode, first); 2242 else 2243 __remove_pending(inode, first); 2244 2245 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; 2246 if (last != end) 2247 l_del = __es_scan_range(inode, &ext4_es_is_delonly, 2248 end + 1, last); 2249 if (l_del) 2250 __insert_pending(inode, last); 2251 else 2252 __remove_pending(inode, last); 2253 } 2254 } 2255