1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/blkdev.h> 8 #include <linux/writeback.h> 9 #include <linux/sched/mm.h> 10 #include "messages.h" 11 #include "misc.h" 12 #include "ctree.h" 13 #include "transaction.h" 14 #include "btrfs_inode.h" 15 #include "extent_io.h" 16 #include "disk-io.h" 17 #include "compression.h" 18 #include "delalloc-space.h" 19 #include "qgroup.h" 20 #include "subpage.h" 21 #include "file.h" 22 #include "super.h" 23 24 static struct kmem_cache *btrfs_ordered_extent_cache; 25 26 static u64 entry_end(struct btrfs_ordered_extent *entry) 27 { 28 if (entry->file_offset + entry->num_bytes < entry->file_offset) 29 return (u64)-1; 30 return entry->file_offset + entry->num_bytes; 31 } 32 33 /* returns NULL if the insertion worked, or it returns the node it did find 34 * in the tree 35 */ 36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 37 struct rb_node *node) 38 { 39 struct rb_node **p = &root->rb_node; 40 struct rb_node *parent = NULL; 41 struct btrfs_ordered_extent *entry; 42 43 while (*p) { 44 parent = *p; 45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 46 47 if (file_offset < entry->file_offset) 48 p = &(*p)->rb_left; 49 else if (file_offset >= entry_end(entry)) 50 p = &(*p)->rb_right; 51 else 52 return parent; 53 } 54 55 rb_link_node(node, parent, p); 56 rb_insert_color(node, root); 57 return NULL; 58 } 59 60 /* 61 * look for a given offset in the tree, and if it can't be found return the 62 * first lesser offset 63 */ 64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 65 struct rb_node **prev_ret) 66 { 67 struct rb_node *n = root->rb_node; 68 struct rb_node *prev = NULL; 69 struct rb_node *test; 70 struct btrfs_ordered_extent *entry; 71 struct btrfs_ordered_extent *prev_entry = NULL; 72 73 while (n) { 74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 75 prev = n; 76 prev_entry = entry; 77 78 if (file_offset < entry->file_offset) 79 n = n->rb_left; 80 else if (file_offset >= entry_end(entry)) 81 n = n->rb_right; 82 else 83 return n; 84 } 85 if (!prev_ret) 86 return NULL; 87 88 while (prev && file_offset >= entry_end(prev_entry)) { 89 test = rb_next(prev); 90 if (!test) 91 break; 92 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 93 rb_node); 94 if (file_offset < entry_end(prev_entry)) 95 break; 96 97 prev = test; 98 } 99 if (prev) 100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 101 rb_node); 102 while (prev && file_offset < entry_end(prev_entry)) { 103 test = rb_prev(prev); 104 if (!test) 105 break; 106 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 107 rb_node); 108 prev = test; 109 } 110 *prev_ret = prev; 111 return NULL; 112 } 113 114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 u64 len) 116 { 117 if (file_offset + len <= entry->file_offset || 118 entry->file_offset + entry->num_bytes <= file_offset) 119 return 0; 120 return 1; 121 } 122 123 /* 124 * look find the first ordered struct that has this offset, otherwise 125 * the first one less than this offset 126 */ 127 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 128 u64 file_offset) 129 { 130 struct rb_root *root = &tree->tree; 131 struct rb_node *prev = NULL; 132 struct rb_node *ret; 133 struct btrfs_ordered_extent *entry; 134 135 if (tree->last) { 136 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 137 rb_node); 138 if (in_range(file_offset, entry->file_offset, entry->num_bytes)) 139 return tree->last; 140 } 141 ret = __tree_search(root, file_offset, &prev); 142 if (!ret) 143 ret = prev; 144 if (ret) 145 tree->last = ret; 146 return ret; 147 } 148 149 /* 150 * Add an ordered extent to the per-inode tree. 151 * 152 * @inode: Inode that this extent is for. 153 * @file_offset: Logical offset in file where the extent starts. 154 * @num_bytes: Logical length of extent in file. 155 * @ram_bytes: Full length of unencoded data. 156 * @disk_bytenr: Offset of extent on disk. 157 * @disk_num_bytes: Size of extent on disk. 158 * @offset: Offset into unencoded data where file data starts. 159 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). 160 * @compress_type: Compression algorithm used for data. 161 * 162 * Most of these parameters correspond to &struct btrfs_file_extent_item. The 163 * tree is given a single reference on the ordered extent that was inserted, and 164 * the returned pointer is given a second reference. 165 * 166 * Return: the new ordered extent or error pointer. 167 */ 168 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( 169 struct btrfs_inode *inode, u64 file_offset, 170 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, 171 u64 disk_num_bytes, u64 offset, unsigned long flags, 172 int compress_type) 173 { 174 struct btrfs_root *root = inode->root; 175 struct btrfs_fs_info *fs_info = root->fs_info; 176 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 177 struct rb_node *node; 178 struct btrfs_ordered_extent *entry; 179 int ret; 180 181 if (flags & 182 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { 183 /* For nocow write, we can release the qgroup rsv right now */ 184 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); 185 if (ret < 0) 186 return ERR_PTR(ret); 187 ret = 0; 188 } else { 189 /* 190 * The ordered extent has reserved qgroup space, release now 191 * and pass the reserved number for qgroup_record to free. 192 */ 193 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); 194 if (ret < 0) 195 return ERR_PTR(ret); 196 } 197 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 198 if (!entry) 199 return ERR_PTR(-ENOMEM); 200 201 entry->file_offset = file_offset; 202 entry->num_bytes = num_bytes; 203 entry->ram_bytes = ram_bytes; 204 entry->disk_bytenr = disk_bytenr; 205 entry->disk_num_bytes = disk_num_bytes; 206 entry->offset = offset; 207 entry->bytes_left = num_bytes; 208 entry->inode = igrab(&inode->vfs_inode); 209 entry->compress_type = compress_type; 210 entry->truncated_len = (u64)-1; 211 entry->qgroup_rsv = ret; 212 entry->physical = (u64)-1; 213 214 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0); 215 entry->flags = flags; 216 217 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes, 218 fs_info->delalloc_batch); 219 220 /* one ref for the tree */ 221 refcount_set(&entry->refs, 1); 222 init_waitqueue_head(&entry->wait); 223 INIT_LIST_HEAD(&entry->list); 224 INIT_LIST_HEAD(&entry->log_list); 225 INIT_LIST_HEAD(&entry->root_extent_list); 226 INIT_LIST_HEAD(&entry->work_list); 227 init_completion(&entry->completion); 228 229 trace_btrfs_ordered_extent_add(inode, entry); 230 231 spin_lock_irq(&tree->lock); 232 node = tree_insert(&tree->tree, file_offset, 233 &entry->rb_node); 234 if (node) 235 btrfs_panic(fs_info, -EEXIST, 236 "inconsistency in ordered tree at offset %llu", 237 file_offset); 238 spin_unlock_irq(&tree->lock); 239 240 spin_lock(&root->ordered_extent_lock); 241 list_add_tail(&entry->root_extent_list, 242 &root->ordered_extents); 243 root->nr_ordered_extents++; 244 if (root->nr_ordered_extents == 1) { 245 spin_lock(&fs_info->ordered_root_lock); 246 BUG_ON(!list_empty(&root->ordered_root)); 247 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 248 spin_unlock(&fs_info->ordered_root_lock); 249 } 250 spin_unlock(&root->ordered_extent_lock); 251 252 /* 253 * We don't need the count_max_extents here, we can assume that all of 254 * that work has been done at higher layers, so this is truly the 255 * smallest the extent is going to get. 256 */ 257 spin_lock(&inode->lock); 258 btrfs_mod_outstanding_extents(inode, 1); 259 spin_unlock(&inode->lock); 260 261 /* One ref for the returned entry to match semantics of lookup. */ 262 refcount_inc(&entry->refs); 263 264 return entry; 265 } 266 267 /* 268 * Add a new btrfs_ordered_extent for the range, but drop the reference instead 269 * of returning it to the caller. 270 */ 271 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, 272 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, 273 u64 disk_num_bytes, u64 offset, unsigned long flags, 274 int compress_type) 275 { 276 struct btrfs_ordered_extent *ordered; 277 278 ordered = btrfs_alloc_ordered_extent(inode, file_offset, num_bytes, 279 ram_bytes, disk_bytenr, 280 disk_num_bytes, offset, flags, 281 compress_type); 282 283 if (IS_ERR(ordered)) 284 return PTR_ERR(ordered); 285 btrfs_put_ordered_extent(ordered); 286 287 return 0; 288 } 289 290 /* 291 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 292 * when an ordered extent is finished. If the list covers more than one 293 * ordered extent, it is split across multiples. 294 */ 295 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, 296 struct btrfs_ordered_sum *sum) 297 { 298 struct btrfs_ordered_inode_tree *tree; 299 300 tree = &BTRFS_I(entry->inode)->ordered_tree; 301 spin_lock_irq(&tree->lock); 302 list_add_tail(&sum->list, &entry->list); 303 spin_unlock_irq(&tree->lock); 304 } 305 306 static void finish_ordered_fn(struct btrfs_work *work) 307 { 308 struct btrfs_ordered_extent *ordered_extent; 309 310 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 311 btrfs_finish_ordered_io(ordered_extent); 312 } 313 314 /* 315 * Mark all ordered extents io inside the specified range finished. 316 * 317 * @page: The involved page for the operation. 318 * For uncompressed buffered IO, the page status also needs to be 319 * updated to indicate whether the pending ordered io is finished. 320 * Can be NULL for direct IO and compressed write. 321 * For these cases, callers are ensured they won't execute the 322 * endio function twice. 323 * 324 * This function is called for endio, thus the range must have ordered 325 * extent(s) covering it. 326 */ 327 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, 328 struct page *page, u64 file_offset, 329 u64 num_bytes, bool uptodate) 330 { 331 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 332 struct btrfs_fs_info *fs_info = inode->root->fs_info; 333 struct btrfs_workqueue *wq; 334 struct rb_node *node; 335 struct btrfs_ordered_extent *entry = NULL; 336 unsigned long flags; 337 u64 cur = file_offset; 338 339 if (btrfs_is_free_space_inode(inode)) 340 wq = fs_info->endio_freespace_worker; 341 else 342 wq = fs_info->endio_write_workers; 343 344 if (page) 345 ASSERT(page->mapping && page_offset(page) <= file_offset && 346 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE); 347 348 spin_lock_irqsave(&tree->lock, flags); 349 while (cur < file_offset + num_bytes) { 350 u64 entry_end; 351 u64 end; 352 u32 len; 353 354 node = tree_search(tree, cur); 355 /* No ordered extents at all */ 356 if (!node) 357 break; 358 359 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 360 entry_end = entry->file_offset + entry->num_bytes; 361 /* 362 * |<-- OE --->| | 363 * cur 364 * Go to next OE. 365 */ 366 if (cur >= entry_end) { 367 node = rb_next(node); 368 /* No more ordered extents, exit */ 369 if (!node) 370 break; 371 entry = rb_entry(node, struct btrfs_ordered_extent, 372 rb_node); 373 374 /* Go to next ordered extent and continue */ 375 cur = entry->file_offset; 376 continue; 377 } 378 /* 379 * | |<--- OE --->| 380 * cur 381 * Go to the start of OE. 382 */ 383 if (cur < entry->file_offset) { 384 cur = entry->file_offset; 385 continue; 386 } 387 388 /* 389 * Now we are definitely inside one ordered extent. 390 * 391 * |<--- OE --->| 392 * | 393 * cur 394 */ 395 end = min(entry->file_offset + entry->num_bytes, 396 file_offset + num_bytes) - 1; 397 ASSERT(end + 1 - cur < U32_MAX); 398 len = end + 1 - cur; 399 400 if (page) { 401 /* 402 * Ordered (Private2) bit indicates whether we still 403 * have pending io unfinished for the ordered extent. 404 * 405 * If there's no such bit, we need to skip to next range. 406 */ 407 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) { 408 cur += len; 409 continue; 410 } 411 btrfs_page_clear_ordered(fs_info, page, cur, len); 412 } 413 414 /* Now we're fine to update the accounting */ 415 if (unlikely(len > entry->bytes_left)) { 416 WARN_ON(1); 417 btrfs_crit(fs_info, 418 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu", 419 inode->root->root_key.objectid, 420 btrfs_ino(inode), 421 entry->file_offset, 422 entry->num_bytes, 423 len, entry->bytes_left); 424 entry->bytes_left = 0; 425 } else { 426 entry->bytes_left -= len; 427 } 428 429 if (!uptodate) 430 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 431 432 /* 433 * All the IO of the ordered extent is finished, we need to queue 434 * the finish_func to be executed. 435 */ 436 if (entry->bytes_left == 0) { 437 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 438 cond_wake_up(&entry->wait); 439 refcount_inc(&entry->refs); 440 trace_btrfs_ordered_extent_mark_finished(inode, entry); 441 spin_unlock_irqrestore(&tree->lock, flags); 442 btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL); 443 btrfs_queue_work(wq, &entry->work); 444 spin_lock_irqsave(&tree->lock, flags); 445 } 446 cur += len; 447 } 448 spin_unlock_irqrestore(&tree->lock, flags); 449 } 450 451 /* 452 * Finish IO for one ordered extent across a given range. The range can only 453 * contain one ordered extent. 454 * 455 * @cached: The cached ordered extent. If not NULL, we can skip the tree 456 * search and use the ordered extent directly. 457 * Will be also used to store the finished ordered extent. 458 * @file_offset: File offset for the finished IO 459 * @io_size: Length of the finish IO range 460 * 461 * Return true if the ordered extent is finished in the range, and update 462 * @cached. 463 * Return false otherwise. 464 * 465 * NOTE: The range can NOT cross multiple ordered extents. 466 * Thus caller should ensure the range doesn't cross ordered extents. 467 */ 468 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, 469 struct btrfs_ordered_extent **cached, 470 u64 file_offset, u64 io_size) 471 { 472 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 473 struct rb_node *node; 474 struct btrfs_ordered_extent *entry = NULL; 475 unsigned long flags; 476 bool finished = false; 477 478 spin_lock_irqsave(&tree->lock, flags); 479 if (cached && *cached) { 480 entry = *cached; 481 goto have_entry; 482 } 483 484 node = tree_search(tree, file_offset); 485 if (!node) 486 goto out; 487 488 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 489 have_entry: 490 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 491 goto out; 492 493 if (io_size > entry->bytes_left) 494 btrfs_crit(inode->root->fs_info, 495 "bad ordered accounting left %llu size %llu", 496 entry->bytes_left, io_size); 497 498 entry->bytes_left -= io_size; 499 500 if (entry->bytes_left == 0) { 501 /* 502 * Ensure only one caller can set the flag and finished_ret 503 * accordingly 504 */ 505 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 506 /* test_and_set_bit implies a barrier */ 507 cond_wake_up_nomb(&entry->wait); 508 } 509 out: 510 if (finished && cached && entry) { 511 *cached = entry; 512 refcount_inc(&entry->refs); 513 trace_btrfs_ordered_extent_dec_test_pending(inode, entry); 514 } 515 spin_unlock_irqrestore(&tree->lock, flags); 516 return finished; 517 } 518 519 /* 520 * used to drop a reference on an ordered extent. This will free 521 * the extent if the last reference is dropped 522 */ 523 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 524 { 525 struct list_head *cur; 526 struct btrfs_ordered_sum *sum; 527 528 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry); 529 530 if (refcount_dec_and_test(&entry->refs)) { 531 ASSERT(list_empty(&entry->root_extent_list)); 532 ASSERT(list_empty(&entry->log_list)); 533 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 534 if (entry->inode) 535 btrfs_add_delayed_iput(BTRFS_I(entry->inode)); 536 while (!list_empty(&entry->list)) { 537 cur = entry->list.next; 538 sum = list_entry(cur, struct btrfs_ordered_sum, list); 539 list_del(&sum->list); 540 kvfree(sum); 541 } 542 kmem_cache_free(btrfs_ordered_extent_cache, entry); 543 } 544 } 545 546 /* 547 * remove an ordered extent from the tree. No references are dropped 548 * and waiters are woken up. 549 */ 550 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, 551 struct btrfs_ordered_extent *entry) 552 { 553 struct btrfs_ordered_inode_tree *tree; 554 struct btrfs_root *root = btrfs_inode->root; 555 struct btrfs_fs_info *fs_info = root->fs_info; 556 struct rb_node *node; 557 bool pending; 558 bool freespace_inode; 559 560 /* 561 * If this is a free space inode the thread has not acquired the ordered 562 * extents lockdep map. 563 */ 564 freespace_inode = btrfs_is_free_space_inode(btrfs_inode); 565 566 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered); 567 /* This is paired with btrfs_add_ordered_extent. */ 568 spin_lock(&btrfs_inode->lock); 569 btrfs_mod_outstanding_extents(btrfs_inode, -1); 570 spin_unlock(&btrfs_inode->lock); 571 if (root != fs_info->tree_root) { 572 u64 release; 573 574 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags)) 575 release = entry->disk_num_bytes; 576 else 577 release = entry->num_bytes; 578 btrfs_delalloc_release_metadata(btrfs_inode, release, false); 579 } 580 581 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes, 582 fs_info->delalloc_batch); 583 584 tree = &btrfs_inode->ordered_tree; 585 spin_lock_irq(&tree->lock); 586 node = &entry->rb_node; 587 rb_erase(node, &tree->tree); 588 RB_CLEAR_NODE(node); 589 if (tree->last == node) 590 tree->last = NULL; 591 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 592 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags); 593 spin_unlock_irq(&tree->lock); 594 595 /* 596 * The current running transaction is waiting on us, we need to let it 597 * know that we're complete and wake it up. 598 */ 599 if (pending) { 600 struct btrfs_transaction *trans; 601 602 /* 603 * The checks for trans are just a formality, it should be set, 604 * but if it isn't we don't want to deref/assert under the spin 605 * lock, so be nice and check if trans is set, but ASSERT() so 606 * if it isn't set a developer will notice. 607 */ 608 spin_lock(&fs_info->trans_lock); 609 trans = fs_info->running_transaction; 610 if (trans) 611 refcount_inc(&trans->use_count); 612 spin_unlock(&fs_info->trans_lock); 613 614 ASSERT(trans); 615 if (trans) { 616 if (atomic_dec_and_test(&trans->pending_ordered)) 617 wake_up(&trans->pending_wait); 618 btrfs_put_transaction(trans); 619 } 620 } 621 622 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered); 623 624 spin_lock(&root->ordered_extent_lock); 625 list_del_init(&entry->root_extent_list); 626 root->nr_ordered_extents--; 627 628 trace_btrfs_ordered_extent_remove(btrfs_inode, entry); 629 630 if (!root->nr_ordered_extents) { 631 spin_lock(&fs_info->ordered_root_lock); 632 BUG_ON(list_empty(&root->ordered_root)); 633 list_del_init(&root->ordered_root); 634 spin_unlock(&fs_info->ordered_root_lock); 635 } 636 spin_unlock(&root->ordered_extent_lock); 637 wake_up(&entry->wait); 638 if (!freespace_inode) 639 btrfs_lockdep_release(fs_info, btrfs_ordered_extent); 640 } 641 642 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 643 { 644 struct btrfs_ordered_extent *ordered; 645 646 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 647 btrfs_start_ordered_extent(ordered); 648 complete(&ordered->completion); 649 } 650 651 /* 652 * wait for all the ordered extents in a root. This is done when balancing 653 * space between drives. 654 */ 655 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 656 const u64 range_start, const u64 range_len) 657 { 658 struct btrfs_fs_info *fs_info = root->fs_info; 659 LIST_HEAD(splice); 660 LIST_HEAD(skipped); 661 LIST_HEAD(works); 662 struct btrfs_ordered_extent *ordered, *next; 663 u64 count = 0; 664 const u64 range_end = range_start + range_len; 665 666 mutex_lock(&root->ordered_extent_mutex); 667 spin_lock(&root->ordered_extent_lock); 668 list_splice_init(&root->ordered_extents, &splice); 669 while (!list_empty(&splice) && nr) { 670 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 671 root_extent_list); 672 673 if (range_end <= ordered->disk_bytenr || 674 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { 675 list_move_tail(&ordered->root_extent_list, &skipped); 676 cond_resched_lock(&root->ordered_extent_lock); 677 continue; 678 } 679 680 list_move_tail(&ordered->root_extent_list, 681 &root->ordered_extents); 682 refcount_inc(&ordered->refs); 683 spin_unlock(&root->ordered_extent_lock); 684 685 btrfs_init_work(&ordered->flush_work, 686 btrfs_run_ordered_extent_work, NULL, NULL); 687 list_add_tail(&ordered->work_list, &works); 688 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 689 690 cond_resched(); 691 spin_lock(&root->ordered_extent_lock); 692 if (nr != U64_MAX) 693 nr--; 694 count++; 695 } 696 list_splice_tail(&skipped, &root->ordered_extents); 697 list_splice_tail(&splice, &root->ordered_extents); 698 spin_unlock(&root->ordered_extent_lock); 699 700 list_for_each_entry_safe(ordered, next, &works, work_list) { 701 list_del_init(&ordered->work_list); 702 wait_for_completion(&ordered->completion); 703 btrfs_put_ordered_extent(ordered); 704 cond_resched(); 705 } 706 mutex_unlock(&root->ordered_extent_mutex); 707 708 return count; 709 } 710 711 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 712 const u64 range_start, const u64 range_len) 713 { 714 struct btrfs_root *root; 715 struct list_head splice; 716 u64 done; 717 718 INIT_LIST_HEAD(&splice); 719 720 mutex_lock(&fs_info->ordered_operations_mutex); 721 spin_lock(&fs_info->ordered_root_lock); 722 list_splice_init(&fs_info->ordered_roots, &splice); 723 while (!list_empty(&splice) && nr) { 724 root = list_first_entry(&splice, struct btrfs_root, 725 ordered_root); 726 root = btrfs_grab_root(root); 727 BUG_ON(!root); 728 list_move_tail(&root->ordered_root, 729 &fs_info->ordered_roots); 730 spin_unlock(&fs_info->ordered_root_lock); 731 732 done = btrfs_wait_ordered_extents(root, nr, 733 range_start, range_len); 734 btrfs_put_root(root); 735 736 spin_lock(&fs_info->ordered_root_lock); 737 if (nr != U64_MAX) { 738 nr -= done; 739 } 740 } 741 list_splice_tail(&splice, &fs_info->ordered_roots); 742 spin_unlock(&fs_info->ordered_root_lock); 743 mutex_unlock(&fs_info->ordered_operations_mutex); 744 } 745 746 /* 747 * Start IO and wait for a given ordered extent to finish. 748 * 749 * Wait on page writeback for all the pages in the extent and the IO completion 750 * code to insert metadata into the btree corresponding to the extent. 751 */ 752 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry) 753 { 754 u64 start = entry->file_offset; 755 u64 end = start + entry->num_bytes - 1; 756 struct btrfs_inode *inode = BTRFS_I(entry->inode); 757 bool freespace_inode; 758 759 trace_btrfs_ordered_extent_start(inode, entry); 760 761 /* 762 * If this is a free space inode do not take the ordered extents lockdep 763 * map. 764 */ 765 freespace_inode = btrfs_is_free_space_inode(inode); 766 767 /* 768 * pages in the range can be dirty, clean or writeback. We 769 * start IO on any dirty ones so the wait doesn't stall waiting 770 * for the flusher thread to find them 771 */ 772 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 773 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end); 774 775 if (!freespace_inode) 776 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent); 777 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); 778 } 779 780 /* 781 * Used to wait on ordered extents across a large range of bytes. 782 */ 783 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 784 { 785 int ret = 0; 786 int ret_wb = 0; 787 u64 end; 788 u64 orig_end; 789 struct btrfs_ordered_extent *ordered; 790 791 if (start + len < start) { 792 orig_end = OFFSET_MAX; 793 } else { 794 orig_end = start + len - 1; 795 if (orig_end > OFFSET_MAX) 796 orig_end = OFFSET_MAX; 797 } 798 799 /* start IO across the range first to instantiate any delalloc 800 * extents 801 */ 802 ret = btrfs_fdatawrite_range(inode, start, orig_end); 803 if (ret) 804 return ret; 805 806 /* 807 * If we have a writeback error don't return immediately. Wait first 808 * for any ordered extents that haven't completed yet. This is to make 809 * sure no one can dirty the same page ranges and call writepages() 810 * before the ordered extents complete - to avoid failures (-EEXIST) 811 * when adding the new ordered extents to the ordered tree. 812 */ 813 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 814 815 end = orig_end; 816 while (1) { 817 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end); 818 if (!ordered) 819 break; 820 if (ordered->file_offset > orig_end) { 821 btrfs_put_ordered_extent(ordered); 822 break; 823 } 824 if (ordered->file_offset + ordered->num_bytes <= start) { 825 btrfs_put_ordered_extent(ordered); 826 break; 827 } 828 btrfs_start_ordered_extent(ordered); 829 end = ordered->file_offset; 830 /* 831 * If the ordered extent had an error save the error but don't 832 * exit without waiting first for all other ordered extents in 833 * the range to complete. 834 */ 835 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 836 ret = -EIO; 837 btrfs_put_ordered_extent(ordered); 838 if (end == 0 || end == start) 839 break; 840 end--; 841 } 842 return ret_wb ? ret_wb : ret; 843 } 844 845 /* 846 * find an ordered extent corresponding to file_offset. return NULL if 847 * nothing is found, otherwise take a reference on the extent and return it 848 */ 849 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, 850 u64 file_offset) 851 { 852 struct btrfs_ordered_inode_tree *tree; 853 struct rb_node *node; 854 struct btrfs_ordered_extent *entry = NULL; 855 unsigned long flags; 856 857 tree = &inode->ordered_tree; 858 spin_lock_irqsave(&tree->lock, flags); 859 node = tree_search(tree, file_offset); 860 if (!node) 861 goto out; 862 863 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 864 if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) 865 entry = NULL; 866 if (entry) { 867 refcount_inc(&entry->refs); 868 trace_btrfs_ordered_extent_lookup(inode, entry); 869 } 870 out: 871 spin_unlock_irqrestore(&tree->lock, flags); 872 return entry; 873 } 874 875 /* Since the DIO code tries to lock a wide area we need to look for any ordered 876 * extents that exist in the range, rather than just the start of the range. 877 */ 878 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 879 struct btrfs_inode *inode, u64 file_offset, u64 len) 880 { 881 struct btrfs_ordered_inode_tree *tree; 882 struct rb_node *node; 883 struct btrfs_ordered_extent *entry = NULL; 884 885 tree = &inode->ordered_tree; 886 spin_lock_irq(&tree->lock); 887 node = tree_search(tree, file_offset); 888 if (!node) { 889 node = tree_search(tree, file_offset + len); 890 if (!node) 891 goto out; 892 } 893 894 while (1) { 895 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 896 if (range_overlaps(entry, file_offset, len)) 897 break; 898 899 if (entry->file_offset >= file_offset + len) { 900 entry = NULL; 901 break; 902 } 903 entry = NULL; 904 node = rb_next(node); 905 if (!node) 906 break; 907 } 908 out: 909 if (entry) { 910 refcount_inc(&entry->refs); 911 trace_btrfs_ordered_extent_lookup_range(inode, entry); 912 } 913 spin_unlock_irq(&tree->lock); 914 return entry; 915 } 916 917 /* 918 * Adds all ordered extents to the given list. The list ends up sorted by the 919 * file_offset of the ordered extents. 920 */ 921 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, 922 struct list_head *list) 923 { 924 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 925 struct rb_node *n; 926 927 ASSERT(inode_is_locked(&inode->vfs_inode)); 928 929 spin_lock_irq(&tree->lock); 930 for (n = rb_first(&tree->tree); n; n = rb_next(n)) { 931 struct btrfs_ordered_extent *ordered; 932 933 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 934 935 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 936 continue; 937 938 ASSERT(list_empty(&ordered->log_list)); 939 list_add_tail(&ordered->log_list, list); 940 refcount_inc(&ordered->refs); 941 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered); 942 } 943 spin_unlock_irq(&tree->lock); 944 } 945 946 /* 947 * lookup and return any extent before 'file_offset'. NULL is returned 948 * if none is found 949 */ 950 struct btrfs_ordered_extent * 951 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset) 952 { 953 struct btrfs_ordered_inode_tree *tree; 954 struct rb_node *node; 955 struct btrfs_ordered_extent *entry = NULL; 956 957 tree = &inode->ordered_tree; 958 spin_lock_irq(&tree->lock); 959 node = tree_search(tree, file_offset); 960 if (!node) 961 goto out; 962 963 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 964 refcount_inc(&entry->refs); 965 trace_btrfs_ordered_extent_lookup_first(inode, entry); 966 out: 967 spin_unlock_irq(&tree->lock); 968 return entry; 969 } 970 971 /* 972 * Lookup the first ordered extent that overlaps the range 973 * [@file_offset, @file_offset + @len). 974 * 975 * The difference between this and btrfs_lookup_first_ordered_extent() is 976 * that this one won't return any ordered extent that does not overlap the range. 977 * And the difference against btrfs_lookup_ordered_extent() is, this function 978 * ensures the first ordered extent gets returned. 979 */ 980 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range( 981 struct btrfs_inode *inode, u64 file_offset, u64 len) 982 { 983 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; 984 struct rb_node *node; 985 struct rb_node *cur; 986 struct rb_node *prev; 987 struct rb_node *next; 988 struct btrfs_ordered_extent *entry = NULL; 989 990 spin_lock_irq(&tree->lock); 991 node = tree->tree.rb_node; 992 /* 993 * Here we don't want to use tree_search() which will use tree->last 994 * and screw up the search order. 995 * And __tree_search() can't return the adjacent ordered extents 996 * either, thus here we do our own search. 997 */ 998 while (node) { 999 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 1000 1001 if (file_offset < entry->file_offset) { 1002 node = node->rb_left; 1003 } else if (file_offset >= entry_end(entry)) { 1004 node = node->rb_right; 1005 } else { 1006 /* 1007 * Direct hit, got an ordered extent that starts at 1008 * @file_offset 1009 */ 1010 goto out; 1011 } 1012 } 1013 if (!entry) { 1014 /* Empty tree */ 1015 goto out; 1016 } 1017 1018 cur = &entry->rb_node; 1019 /* We got an entry around @file_offset, check adjacent entries */ 1020 if (entry->file_offset < file_offset) { 1021 prev = cur; 1022 next = rb_next(cur); 1023 } else { 1024 prev = rb_prev(cur); 1025 next = cur; 1026 } 1027 if (prev) { 1028 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); 1029 if (range_overlaps(entry, file_offset, len)) 1030 goto out; 1031 } 1032 if (next) { 1033 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); 1034 if (range_overlaps(entry, file_offset, len)) 1035 goto out; 1036 } 1037 /* No ordered extent in the range */ 1038 entry = NULL; 1039 out: 1040 if (entry) { 1041 refcount_inc(&entry->refs); 1042 trace_btrfs_ordered_extent_lookup_first_range(inode, entry); 1043 } 1044 1045 spin_unlock_irq(&tree->lock); 1046 return entry; 1047 } 1048 1049 /* 1050 * Lock the passed range and ensures all pending ordered extents in it are run 1051 * to completion. 1052 * 1053 * @inode: Inode whose ordered tree is to be searched 1054 * @start: Beginning of range to flush 1055 * @end: Last byte of range to lock 1056 * @cached_state: If passed, will return the extent state responsible for the 1057 * locked range. It's the caller's responsibility to free the 1058 * cached state. 1059 * 1060 * Always return with the given range locked, ensuring after it's called no 1061 * order extent can be pending. 1062 */ 1063 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, 1064 u64 end, 1065 struct extent_state **cached_state) 1066 { 1067 struct btrfs_ordered_extent *ordered; 1068 struct extent_state *cache = NULL; 1069 struct extent_state **cachedp = &cache; 1070 1071 if (cached_state) 1072 cachedp = cached_state; 1073 1074 while (1) { 1075 lock_extent(&inode->io_tree, start, end, cachedp); 1076 ordered = btrfs_lookup_ordered_range(inode, start, 1077 end - start + 1); 1078 if (!ordered) { 1079 /* 1080 * If no external cached_state has been passed then 1081 * decrement the extra ref taken for cachedp since we 1082 * aren't exposing it outside of this function 1083 */ 1084 if (!cached_state) 1085 refcount_dec(&cache->refs); 1086 break; 1087 } 1088 unlock_extent(&inode->io_tree, start, end, cachedp); 1089 btrfs_start_ordered_extent(ordered); 1090 btrfs_put_ordered_extent(ordered); 1091 } 1092 } 1093 1094 /* 1095 * Lock the passed range and ensure all pending ordered extents in it are run 1096 * to completion in nowait mode. 1097 * 1098 * Return true if btrfs_lock_ordered_range does not return any extents, 1099 * otherwise false. 1100 */ 1101 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, 1102 struct extent_state **cached_state) 1103 { 1104 struct btrfs_ordered_extent *ordered; 1105 1106 if (!try_lock_extent(&inode->io_tree, start, end, cached_state)) 1107 return false; 1108 1109 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); 1110 if (!ordered) 1111 return true; 1112 1113 btrfs_put_ordered_extent(ordered); 1114 unlock_extent(&inode->io_tree, start, end, cached_state); 1115 1116 return false; 1117 } 1118 1119 /* Split out a new ordered extent for this first @len bytes of @ordered. */ 1120 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 len) 1121 { 1122 struct inode *inode = ordered->inode; 1123 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 1124 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1125 u64 file_offset = ordered->file_offset; 1126 u64 disk_bytenr = ordered->disk_bytenr; 1127 unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS; 1128 struct rb_node *node; 1129 1130 trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered); 1131 1132 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED))); 1133 1134 /* 1135 * The entire bio must be covered by the ordered extent, but we can't 1136 * reduce the original extent to a zero length either. 1137 */ 1138 if (WARN_ON_ONCE(len >= ordered->num_bytes)) 1139 return -EINVAL; 1140 /* We cannot split once ordered extent is past end_bio. */ 1141 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) 1142 return -EINVAL; 1143 /* We cannot split a compressed ordered extent. */ 1144 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) 1145 return -EINVAL; 1146 /* Checksum list should be empty. */ 1147 if (WARN_ON_ONCE(!list_empty(&ordered->list))) 1148 return -EINVAL; 1149 1150 spin_lock_irq(&tree->lock); 1151 /* Remove from tree once */ 1152 node = &ordered->rb_node; 1153 rb_erase(node, &tree->tree); 1154 RB_CLEAR_NODE(node); 1155 if (tree->last == node) 1156 tree->last = NULL; 1157 1158 ordered->file_offset += len; 1159 ordered->disk_bytenr += len; 1160 ordered->num_bytes -= len; 1161 ordered->disk_num_bytes -= len; 1162 ordered->bytes_left -= len; 1163 1164 /* Re-insert the node */ 1165 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node); 1166 if (node) 1167 btrfs_panic(fs_info, -EEXIST, 1168 "zoned: inconsistency in ordered tree at offset %llu", 1169 ordered->file_offset); 1170 1171 spin_unlock_irq(&tree->lock); 1172 1173 /* 1174 * The splitting extent is already counted and will be added again in 1175 * btrfs_add_ordered_extent(). Subtract len to avoid double counting. 1176 */ 1177 percpu_counter_add_batch(&fs_info->ordered_bytes, -len, fs_info->delalloc_batch); 1178 1179 return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len, 1180 disk_bytenr, len, 0, flags, 1181 ordered->compress_type); 1182 } 1183 1184 int __init ordered_data_init(void) 1185 { 1186 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1187 sizeof(struct btrfs_ordered_extent), 0, 1188 SLAB_MEM_SPREAD, 1189 NULL); 1190 if (!btrfs_ordered_extent_cache) 1191 return -ENOMEM; 1192 1193 return 0; 1194 } 1195 1196 void __cold ordered_data_exit(void) 1197 { 1198 kmem_cache_destroy(btrfs_ordered_extent_cache); 1199 } 1200