1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/blkdev.h> 21 #include <linux/writeback.h> 22 #include <linux/pagevec.h> 23 #include "ctree.h" 24 #include "transaction.h" 25 #include "btrfs_inode.h" 26 #include "extent_io.h" 27 #include "disk-io.h" 28 #include "compression.h" 29 30 static struct kmem_cache *btrfs_ordered_extent_cache; 31 32 static u64 entry_end(struct btrfs_ordered_extent *entry) 33 { 34 if (entry->file_offset + entry->len < entry->file_offset) 35 return (u64)-1; 36 return entry->file_offset + entry->len; 37 } 38 39 /* returns NULL if the insertion worked, or it returns the node it did find 40 * in the tree 41 */ 42 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 43 struct rb_node *node) 44 { 45 struct rb_node **p = &root->rb_node; 46 struct rb_node *parent = NULL; 47 struct btrfs_ordered_extent *entry; 48 49 while (*p) { 50 parent = *p; 51 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 52 53 if (file_offset < entry->file_offset) 54 p = &(*p)->rb_left; 55 else if (file_offset >= entry_end(entry)) 56 p = &(*p)->rb_right; 57 else 58 return parent; 59 } 60 61 rb_link_node(node, parent, p); 62 rb_insert_color(node, root); 63 return NULL; 64 } 65 66 static void ordered_data_tree_panic(struct inode *inode, int errno, 67 u64 offset) 68 { 69 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 70 btrfs_panic(fs_info, errno, 71 "Inconsistency in ordered tree at offset %llu", offset); 72 } 73 74 /* 75 * look for a given offset in the tree, and if it can't be found return the 76 * first lesser offset 77 */ 78 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 79 struct rb_node **prev_ret) 80 { 81 struct rb_node *n = root->rb_node; 82 struct rb_node *prev = NULL; 83 struct rb_node *test; 84 struct btrfs_ordered_extent *entry; 85 struct btrfs_ordered_extent *prev_entry = NULL; 86 87 while (n) { 88 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 89 prev = n; 90 prev_entry = entry; 91 92 if (file_offset < entry->file_offset) 93 n = n->rb_left; 94 else if (file_offset >= entry_end(entry)) 95 n = n->rb_right; 96 else 97 return n; 98 } 99 if (!prev_ret) 100 return NULL; 101 102 while (prev && file_offset >= entry_end(prev_entry)) { 103 test = rb_next(prev); 104 if (!test) 105 break; 106 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 107 rb_node); 108 if (file_offset < entry_end(prev_entry)) 109 break; 110 111 prev = test; 112 } 113 if (prev) 114 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 115 rb_node); 116 while (prev && file_offset < entry_end(prev_entry)) { 117 test = rb_prev(prev); 118 if (!test) 119 break; 120 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 121 rb_node); 122 prev = test; 123 } 124 *prev_ret = prev; 125 return NULL; 126 } 127 128 /* 129 * helper to check if a given offset is inside a given entry 130 */ 131 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 132 { 133 if (file_offset < entry->file_offset || 134 entry->file_offset + entry->len <= file_offset) 135 return 0; 136 return 1; 137 } 138 139 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 140 u64 len) 141 { 142 if (file_offset + len <= entry->file_offset || 143 entry->file_offset + entry->len <= file_offset) 144 return 0; 145 return 1; 146 } 147 148 /* 149 * look find the first ordered struct that has this offset, otherwise 150 * the first one less than this offset 151 */ 152 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 153 u64 file_offset) 154 { 155 struct rb_root *root = &tree->tree; 156 struct rb_node *prev = NULL; 157 struct rb_node *ret; 158 struct btrfs_ordered_extent *entry; 159 160 if (tree->last) { 161 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 162 rb_node); 163 if (offset_in_entry(entry, file_offset)) 164 return tree->last; 165 } 166 ret = __tree_search(root, file_offset, &prev); 167 if (!ret) 168 ret = prev; 169 if (ret) 170 tree->last = ret; 171 return ret; 172 } 173 174 /* allocate and add a new ordered_extent into the per-inode tree. 175 * file_offset is the logical offset in the file 176 * 177 * start is the disk block number of an extent already reserved in the 178 * extent allocation tree 179 * 180 * len is the length of the extent 181 * 182 * The tree is given a single reference on the ordered extent that was 183 * inserted. 184 */ 185 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 186 u64 start, u64 len, u64 disk_len, 187 int type, int dio, int compress_type) 188 { 189 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 190 struct btrfs_root *root = BTRFS_I(inode)->root; 191 struct btrfs_ordered_inode_tree *tree; 192 struct rb_node *node; 193 struct btrfs_ordered_extent *entry; 194 195 tree = &BTRFS_I(inode)->ordered_tree; 196 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 197 if (!entry) 198 return -ENOMEM; 199 200 entry->file_offset = file_offset; 201 entry->start = start; 202 entry->len = len; 203 entry->disk_len = disk_len; 204 entry->bytes_left = len; 205 entry->inode = igrab(inode); 206 entry->compress_type = compress_type; 207 entry->truncated_len = (u64)-1; 208 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 209 set_bit(type, &entry->flags); 210 211 if (dio) 212 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 213 214 /* one ref for the tree */ 215 refcount_set(&entry->refs, 1); 216 init_waitqueue_head(&entry->wait); 217 INIT_LIST_HEAD(&entry->list); 218 INIT_LIST_HEAD(&entry->root_extent_list); 219 INIT_LIST_HEAD(&entry->work_list); 220 init_completion(&entry->completion); 221 INIT_LIST_HEAD(&entry->log_list); 222 INIT_LIST_HEAD(&entry->trans_list); 223 224 trace_btrfs_ordered_extent_add(inode, entry); 225 226 spin_lock_irq(&tree->lock); 227 node = tree_insert(&tree->tree, file_offset, 228 &entry->rb_node); 229 if (node) 230 ordered_data_tree_panic(inode, -EEXIST, file_offset); 231 spin_unlock_irq(&tree->lock); 232 233 spin_lock(&root->ordered_extent_lock); 234 list_add_tail(&entry->root_extent_list, 235 &root->ordered_extents); 236 root->nr_ordered_extents++; 237 if (root->nr_ordered_extents == 1) { 238 spin_lock(&fs_info->ordered_root_lock); 239 BUG_ON(!list_empty(&root->ordered_root)); 240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 241 spin_unlock(&fs_info->ordered_root_lock); 242 } 243 spin_unlock(&root->ordered_extent_lock); 244 245 /* 246 * We don't need the count_max_extents here, we can assume that all of 247 * that work has been done at higher layers, so this is truly the 248 * smallest the extent is going to get. 249 */ 250 spin_lock(&BTRFS_I(inode)->lock); 251 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 252 spin_unlock(&BTRFS_I(inode)->lock); 253 254 return 0; 255 } 256 257 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 258 u64 start, u64 len, u64 disk_len, int type) 259 { 260 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 261 disk_len, type, 0, 262 BTRFS_COMPRESS_NONE); 263 } 264 265 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 266 u64 start, u64 len, u64 disk_len, int type) 267 { 268 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 269 disk_len, type, 1, 270 BTRFS_COMPRESS_NONE); 271 } 272 273 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 274 u64 start, u64 len, u64 disk_len, 275 int type, int compress_type) 276 { 277 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 278 disk_len, type, 0, 279 compress_type); 280 } 281 282 /* 283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 284 * when an ordered extent is finished. If the list covers more than one 285 * ordered extent, it is split across multiples. 286 */ 287 void btrfs_add_ordered_sum(struct inode *inode, 288 struct btrfs_ordered_extent *entry, 289 struct btrfs_ordered_sum *sum) 290 { 291 struct btrfs_ordered_inode_tree *tree; 292 293 tree = &BTRFS_I(inode)->ordered_tree; 294 spin_lock_irq(&tree->lock); 295 list_add_tail(&sum->list, &entry->list); 296 spin_unlock_irq(&tree->lock); 297 } 298 299 /* 300 * this is used to account for finished IO across a given range 301 * of the file. The IO may span ordered extents. If 302 * a given ordered_extent is completely done, 1 is returned, otherwise 303 * 0. 304 * 305 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 306 * to make sure this function only returns 1 once for a given ordered extent. 307 * 308 * file_offset is updated to one byte past the range that is recorded as 309 * complete. This allows you to walk forward in the file. 310 */ 311 int btrfs_dec_test_first_ordered_pending(struct inode *inode, 312 struct btrfs_ordered_extent **cached, 313 u64 *file_offset, u64 io_size, int uptodate) 314 { 315 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 316 struct btrfs_ordered_inode_tree *tree; 317 struct rb_node *node; 318 struct btrfs_ordered_extent *entry = NULL; 319 int ret; 320 unsigned long flags; 321 u64 dec_end; 322 u64 dec_start; 323 u64 to_dec; 324 325 tree = &BTRFS_I(inode)->ordered_tree; 326 spin_lock_irqsave(&tree->lock, flags); 327 node = tree_search(tree, *file_offset); 328 if (!node) { 329 ret = 1; 330 goto out; 331 } 332 333 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 334 if (!offset_in_entry(entry, *file_offset)) { 335 ret = 1; 336 goto out; 337 } 338 339 dec_start = max(*file_offset, entry->file_offset); 340 dec_end = min(*file_offset + io_size, entry->file_offset + 341 entry->len); 342 *file_offset = dec_end; 343 if (dec_start > dec_end) { 344 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", 345 dec_start, dec_end); 346 } 347 to_dec = dec_end - dec_start; 348 if (to_dec > entry->bytes_left) { 349 btrfs_crit(fs_info, 350 "bad ordered accounting left %llu size %llu", 351 entry->bytes_left, to_dec); 352 } 353 entry->bytes_left -= to_dec; 354 if (!uptodate) 355 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 356 357 if (entry->bytes_left == 0) { 358 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 359 /* 360 * Implicit memory barrier after test_and_set_bit 361 */ 362 if (waitqueue_active(&entry->wait)) 363 wake_up(&entry->wait); 364 } else { 365 ret = 1; 366 } 367 out: 368 if (!ret && cached && entry) { 369 *cached = entry; 370 refcount_inc(&entry->refs); 371 } 372 spin_unlock_irqrestore(&tree->lock, flags); 373 return ret == 0; 374 } 375 376 /* 377 * this is used to account for finished IO across a given range 378 * of the file. The IO should not span ordered extents. If 379 * a given ordered_extent is completely done, 1 is returned, otherwise 380 * 0. 381 * 382 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 383 * to make sure this function only returns 1 once for a given ordered extent. 384 */ 385 int btrfs_dec_test_ordered_pending(struct inode *inode, 386 struct btrfs_ordered_extent **cached, 387 u64 file_offset, u64 io_size, int uptodate) 388 { 389 struct btrfs_ordered_inode_tree *tree; 390 struct rb_node *node; 391 struct btrfs_ordered_extent *entry = NULL; 392 unsigned long flags; 393 int ret; 394 395 tree = &BTRFS_I(inode)->ordered_tree; 396 spin_lock_irqsave(&tree->lock, flags); 397 if (cached && *cached) { 398 entry = *cached; 399 goto have_entry; 400 } 401 402 node = tree_search(tree, file_offset); 403 if (!node) { 404 ret = 1; 405 goto out; 406 } 407 408 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 409 have_entry: 410 if (!offset_in_entry(entry, file_offset)) { 411 ret = 1; 412 goto out; 413 } 414 415 if (io_size > entry->bytes_left) { 416 btrfs_crit(BTRFS_I(inode)->root->fs_info, 417 "bad ordered accounting left %llu size %llu", 418 entry->bytes_left, io_size); 419 } 420 entry->bytes_left -= io_size; 421 if (!uptodate) 422 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 423 424 if (entry->bytes_left == 0) { 425 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 426 /* 427 * Implicit memory barrier after test_and_set_bit 428 */ 429 if (waitqueue_active(&entry->wait)) 430 wake_up(&entry->wait); 431 } else { 432 ret = 1; 433 } 434 out: 435 if (!ret && cached && entry) { 436 *cached = entry; 437 refcount_inc(&entry->refs); 438 } 439 spin_unlock_irqrestore(&tree->lock, flags); 440 return ret == 0; 441 } 442 443 /* Needs to either be called under a log transaction or the log_mutex */ 444 void btrfs_get_logged_extents(struct btrfs_inode *inode, 445 struct list_head *logged_list, 446 const loff_t start, 447 const loff_t end) 448 { 449 struct btrfs_ordered_inode_tree *tree; 450 struct btrfs_ordered_extent *ordered; 451 struct rb_node *n; 452 struct rb_node *prev; 453 454 tree = &inode->ordered_tree; 455 spin_lock_irq(&tree->lock); 456 n = __tree_search(&tree->tree, end, &prev); 457 if (!n) 458 n = prev; 459 for (; n; n = rb_prev(n)) { 460 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 461 if (ordered->file_offset > end) 462 continue; 463 if (entry_end(ordered) <= start) 464 break; 465 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 466 continue; 467 list_add(&ordered->log_list, logged_list); 468 refcount_inc(&ordered->refs); 469 } 470 spin_unlock_irq(&tree->lock); 471 } 472 473 void btrfs_put_logged_extents(struct list_head *logged_list) 474 { 475 struct btrfs_ordered_extent *ordered; 476 477 while (!list_empty(logged_list)) { 478 ordered = list_first_entry(logged_list, 479 struct btrfs_ordered_extent, 480 log_list); 481 list_del_init(&ordered->log_list); 482 btrfs_put_ordered_extent(ordered); 483 } 484 } 485 486 void btrfs_submit_logged_extents(struct list_head *logged_list, 487 struct btrfs_root *log) 488 { 489 int index = log->log_transid % 2; 490 491 spin_lock_irq(&log->log_extents_lock[index]); 492 list_splice_tail(logged_list, &log->logged_list[index]); 493 spin_unlock_irq(&log->log_extents_lock[index]); 494 } 495 496 void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, 497 struct btrfs_root *log, u64 transid) 498 { 499 struct btrfs_ordered_extent *ordered; 500 int index = transid % 2; 501 502 spin_lock_irq(&log->log_extents_lock[index]); 503 while (!list_empty(&log->logged_list[index])) { 504 struct inode *inode; 505 ordered = list_first_entry(&log->logged_list[index], 506 struct btrfs_ordered_extent, 507 log_list); 508 list_del_init(&ordered->log_list); 509 inode = ordered->inode; 510 spin_unlock_irq(&log->log_extents_lock[index]); 511 512 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && 513 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { 514 u64 start = ordered->file_offset; 515 u64 end = ordered->file_offset + ordered->len - 1; 516 517 WARN_ON(!inode); 518 filemap_fdatawrite_range(inode->i_mapping, start, end); 519 } 520 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 521 &ordered->flags)); 522 523 /* 524 * In order to keep us from losing our ordered extent 525 * information when committing the transaction we have to make 526 * sure that any logged extents are completed when we go to 527 * commit the transaction. To do this we simply increase the 528 * current transactions pending_ordered counter and decrement it 529 * when the ordered extent completes. 530 */ 531 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 532 struct btrfs_ordered_inode_tree *tree; 533 534 tree = &BTRFS_I(inode)->ordered_tree; 535 spin_lock_irq(&tree->lock); 536 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 537 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); 538 atomic_inc(&trans->transaction->pending_ordered); 539 } 540 spin_unlock_irq(&tree->lock); 541 } 542 btrfs_put_ordered_extent(ordered); 543 spin_lock_irq(&log->log_extents_lock[index]); 544 } 545 spin_unlock_irq(&log->log_extents_lock[index]); 546 } 547 548 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) 549 { 550 struct btrfs_ordered_extent *ordered; 551 int index = transid % 2; 552 553 spin_lock_irq(&log->log_extents_lock[index]); 554 while (!list_empty(&log->logged_list[index])) { 555 ordered = list_first_entry(&log->logged_list[index], 556 struct btrfs_ordered_extent, 557 log_list); 558 list_del_init(&ordered->log_list); 559 spin_unlock_irq(&log->log_extents_lock[index]); 560 btrfs_put_ordered_extent(ordered); 561 spin_lock_irq(&log->log_extents_lock[index]); 562 } 563 spin_unlock_irq(&log->log_extents_lock[index]); 564 } 565 566 /* 567 * used to drop a reference on an ordered extent. This will free 568 * the extent if the last reference is dropped 569 */ 570 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 571 { 572 struct list_head *cur; 573 struct btrfs_ordered_sum *sum; 574 575 trace_btrfs_ordered_extent_put(entry->inode, entry); 576 577 if (refcount_dec_and_test(&entry->refs)) { 578 ASSERT(list_empty(&entry->log_list)); 579 ASSERT(list_empty(&entry->trans_list)); 580 ASSERT(list_empty(&entry->root_extent_list)); 581 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 582 if (entry->inode) 583 btrfs_add_delayed_iput(entry->inode); 584 while (!list_empty(&entry->list)) { 585 cur = entry->list.next; 586 sum = list_entry(cur, struct btrfs_ordered_sum, list); 587 list_del(&sum->list); 588 kfree(sum); 589 } 590 kmem_cache_free(btrfs_ordered_extent_cache, entry); 591 } 592 } 593 594 /* 595 * remove an ordered extent from the tree. No references are dropped 596 * and waiters are woken up. 597 */ 598 void btrfs_remove_ordered_extent(struct inode *inode, 599 struct btrfs_ordered_extent *entry) 600 { 601 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 602 struct btrfs_ordered_inode_tree *tree; 603 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 604 struct btrfs_root *root = btrfs_inode->root; 605 struct rb_node *node; 606 bool dec_pending_ordered = false; 607 608 /* This is paired with btrfs_add_ordered_extent. */ 609 spin_lock(&btrfs_inode->lock); 610 btrfs_mod_outstanding_extents(btrfs_inode, -1); 611 spin_unlock(&btrfs_inode->lock); 612 if (root != fs_info->tree_root) 613 btrfs_delalloc_release_metadata(btrfs_inode, entry->len); 614 615 tree = &btrfs_inode->ordered_tree; 616 spin_lock_irq(&tree->lock); 617 node = &entry->rb_node; 618 rb_erase(node, &tree->tree); 619 RB_CLEAR_NODE(node); 620 if (tree->last == node) 621 tree->last = NULL; 622 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 623 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) 624 dec_pending_ordered = true; 625 spin_unlock_irq(&tree->lock); 626 627 /* 628 * The current running transaction is waiting on us, we need to let it 629 * know that we're complete and wake it up. 630 */ 631 if (dec_pending_ordered) { 632 struct btrfs_transaction *trans; 633 634 /* 635 * The checks for trans are just a formality, it should be set, 636 * but if it isn't we don't want to deref/assert under the spin 637 * lock, so be nice and check if trans is set, but ASSERT() so 638 * if it isn't set a developer will notice. 639 */ 640 spin_lock(&fs_info->trans_lock); 641 trans = fs_info->running_transaction; 642 if (trans) 643 refcount_inc(&trans->use_count); 644 spin_unlock(&fs_info->trans_lock); 645 646 ASSERT(trans); 647 if (trans) { 648 if (atomic_dec_and_test(&trans->pending_ordered)) 649 wake_up(&trans->pending_wait); 650 btrfs_put_transaction(trans); 651 } 652 } 653 654 spin_lock(&root->ordered_extent_lock); 655 list_del_init(&entry->root_extent_list); 656 root->nr_ordered_extents--; 657 658 trace_btrfs_ordered_extent_remove(inode, entry); 659 660 if (!root->nr_ordered_extents) { 661 spin_lock(&fs_info->ordered_root_lock); 662 BUG_ON(list_empty(&root->ordered_root)); 663 list_del_init(&root->ordered_root); 664 spin_unlock(&fs_info->ordered_root_lock); 665 } 666 spin_unlock(&root->ordered_extent_lock); 667 wake_up(&entry->wait); 668 } 669 670 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 671 { 672 struct btrfs_ordered_extent *ordered; 673 674 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 675 btrfs_start_ordered_extent(ordered->inode, ordered, 1); 676 complete(&ordered->completion); 677 } 678 679 /* 680 * wait for all the ordered extents in a root. This is done when balancing 681 * space between drives. 682 */ 683 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, 684 const u64 range_start, const u64 range_len) 685 { 686 struct btrfs_fs_info *fs_info = root->fs_info; 687 LIST_HEAD(splice); 688 LIST_HEAD(skipped); 689 LIST_HEAD(works); 690 struct btrfs_ordered_extent *ordered, *next; 691 u64 count = 0; 692 const u64 range_end = range_start + range_len; 693 694 mutex_lock(&root->ordered_extent_mutex); 695 spin_lock(&root->ordered_extent_lock); 696 list_splice_init(&root->ordered_extents, &splice); 697 while (!list_empty(&splice) && nr) { 698 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 699 root_extent_list); 700 701 if (range_end <= ordered->start || 702 ordered->start + ordered->disk_len <= range_start) { 703 list_move_tail(&ordered->root_extent_list, &skipped); 704 cond_resched_lock(&root->ordered_extent_lock); 705 continue; 706 } 707 708 list_move_tail(&ordered->root_extent_list, 709 &root->ordered_extents); 710 refcount_inc(&ordered->refs); 711 spin_unlock(&root->ordered_extent_lock); 712 713 btrfs_init_work(&ordered->flush_work, 714 btrfs_flush_delalloc_helper, 715 btrfs_run_ordered_extent_work, NULL, NULL); 716 list_add_tail(&ordered->work_list, &works); 717 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 718 719 cond_resched(); 720 spin_lock(&root->ordered_extent_lock); 721 if (nr != U64_MAX) 722 nr--; 723 count++; 724 } 725 list_splice_tail(&skipped, &root->ordered_extents); 726 list_splice_tail(&splice, &root->ordered_extents); 727 spin_unlock(&root->ordered_extent_lock); 728 729 list_for_each_entry_safe(ordered, next, &works, work_list) { 730 list_del_init(&ordered->work_list); 731 wait_for_completion(&ordered->completion); 732 btrfs_put_ordered_extent(ordered); 733 cond_resched(); 734 } 735 mutex_unlock(&root->ordered_extent_mutex); 736 737 return count; 738 } 739 740 u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, 741 const u64 range_start, const u64 range_len) 742 { 743 struct btrfs_root *root; 744 struct list_head splice; 745 u64 total_done = 0; 746 u64 done; 747 748 INIT_LIST_HEAD(&splice); 749 750 mutex_lock(&fs_info->ordered_operations_mutex); 751 spin_lock(&fs_info->ordered_root_lock); 752 list_splice_init(&fs_info->ordered_roots, &splice); 753 while (!list_empty(&splice) && nr) { 754 root = list_first_entry(&splice, struct btrfs_root, 755 ordered_root); 756 root = btrfs_grab_fs_root(root); 757 BUG_ON(!root); 758 list_move_tail(&root->ordered_root, 759 &fs_info->ordered_roots); 760 spin_unlock(&fs_info->ordered_root_lock); 761 762 done = btrfs_wait_ordered_extents(root, nr, 763 range_start, range_len); 764 btrfs_put_fs_root(root); 765 total_done += done; 766 767 spin_lock(&fs_info->ordered_root_lock); 768 if (nr != U64_MAX) { 769 nr -= done; 770 } 771 } 772 list_splice_tail(&splice, &fs_info->ordered_roots); 773 spin_unlock(&fs_info->ordered_root_lock); 774 mutex_unlock(&fs_info->ordered_operations_mutex); 775 776 return total_done; 777 } 778 779 /* 780 * Used to start IO or wait for a given ordered extent to finish. 781 * 782 * If wait is one, this effectively waits on page writeback for all the pages 783 * in the extent, and it waits on the io completion code to insert 784 * metadata into the btree corresponding to the extent 785 */ 786 void btrfs_start_ordered_extent(struct inode *inode, 787 struct btrfs_ordered_extent *entry, 788 int wait) 789 { 790 u64 start = entry->file_offset; 791 u64 end = start + entry->len - 1; 792 793 trace_btrfs_ordered_extent_start(inode, entry); 794 795 /* 796 * pages in the range can be dirty, clean or writeback. We 797 * start IO on any dirty ones so the wait doesn't stall waiting 798 * for the flusher thread to find them 799 */ 800 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 801 filemap_fdatawrite_range(inode->i_mapping, start, end); 802 if (wait) { 803 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 804 &entry->flags)); 805 } 806 } 807 808 /* 809 * Used to wait on ordered extents across a large range of bytes. 810 */ 811 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 812 { 813 int ret = 0; 814 int ret_wb = 0; 815 u64 end; 816 u64 orig_end; 817 struct btrfs_ordered_extent *ordered; 818 819 if (start + len < start) { 820 orig_end = INT_LIMIT(loff_t); 821 } else { 822 orig_end = start + len - 1; 823 if (orig_end > INT_LIMIT(loff_t)) 824 orig_end = INT_LIMIT(loff_t); 825 } 826 827 /* start IO across the range first to instantiate any delalloc 828 * extents 829 */ 830 ret = btrfs_fdatawrite_range(inode, start, orig_end); 831 if (ret) 832 return ret; 833 834 /* 835 * If we have a writeback error don't return immediately. Wait first 836 * for any ordered extents that haven't completed yet. This is to make 837 * sure no one can dirty the same page ranges and call writepages() 838 * before the ordered extents complete - to avoid failures (-EEXIST) 839 * when adding the new ordered extents to the ordered tree. 840 */ 841 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 842 843 end = orig_end; 844 while (1) { 845 ordered = btrfs_lookup_first_ordered_extent(inode, end); 846 if (!ordered) 847 break; 848 if (ordered->file_offset > orig_end) { 849 btrfs_put_ordered_extent(ordered); 850 break; 851 } 852 if (ordered->file_offset + ordered->len <= start) { 853 btrfs_put_ordered_extent(ordered); 854 break; 855 } 856 btrfs_start_ordered_extent(inode, ordered, 1); 857 end = ordered->file_offset; 858 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 859 ret = -EIO; 860 btrfs_put_ordered_extent(ordered); 861 if (ret || end == 0 || end == start) 862 break; 863 end--; 864 } 865 return ret_wb ? ret_wb : ret; 866 } 867 868 /* 869 * find an ordered extent corresponding to file_offset. return NULL if 870 * nothing is found, otherwise take a reference on the extent and return it 871 */ 872 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 873 u64 file_offset) 874 { 875 struct btrfs_ordered_inode_tree *tree; 876 struct rb_node *node; 877 struct btrfs_ordered_extent *entry = NULL; 878 879 tree = &BTRFS_I(inode)->ordered_tree; 880 spin_lock_irq(&tree->lock); 881 node = tree_search(tree, file_offset); 882 if (!node) 883 goto out; 884 885 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 886 if (!offset_in_entry(entry, file_offset)) 887 entry = NULL; 888 if (entry) 889 refcount_inc(&entry->refs); 890 out: 891 spin_unlock_irq(&tree->lock); 892 return entry; 893 } 894 895 /* Since the DIO code tries to lock a wide area we need to look for any ordered 896 * extents that exist in the range, rather than just the start of the range. 897 */ 898 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 899 struct btrfs_inode *inode, u64 file_offset, u64 len) 900 { 901 struct btrfs_ordered_inode_tree *tree; 902 struct rb_node *node; 903 struct btrfs_ordered_extent *entry = NULL; 904 905 tree = &inode->ordered_tree; 906 spin_lock_irq(&tree->lock); 907 node = tree_search(tree, file_offset); 908 if (!node) { 909 node = tree_search(tree, file_offset + len); 910 if (!node) 911 goto out; 912 } 913 914 while (1) { 915 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 916 if (range_overlaps(entry, file_offset, len)) 917 break; 918 919 if (entry->file_offset >= file_offset + len) { 920 entry = NULL; 921 break; 922 } 923 entry = NULL; 924 node = rb_next(node); 925 if (!node) 926 break; 927 } 928 out: 929 if (entry) 930 refcount_inc(&entry->refs); 931 spin_unlock_irq(&tree->lock); 932 return entry; 933 } 934 935 bool btrfs_have_ordered_extents_in_range(struct inode *inode, 936 u64 file_offset, 937 u64 len) 938 { 939 struct btrfs_ordered_extent *oe; 940 941 oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); 942 if (oe) { 943 btrfs_put_ordered_extent(oe); 944 return true; 945 } 946 return false; 947 } 948 949 /* 950 * lookup and return any extent before 'file_offset'. NULL is returned 951 * if none is found 952 */ 953 struct btrfs_ordered_extent * 954 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) 955 { 956 struct btrfs_ordered_inode_tree *tree; 957 struct rb_node *node; 958 struct btrfs_ordered_extent *entry = NULL; 959 960 tree = &BTRFS_I(inode)->ordered_tree; 961 spin_lock_irq(&tree->lock); 962 node = tree_search(tree, file_offset); 963 if (!node) 964 goto out; 965 966 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 967 refcount_inc(&entry->refs); 968 out: 969 spin_unlock_irq(&tree->lock); 970 return entry; 971 } 972 973 /* 974 * After an extent is done, call this to conditionally update the on disk 975 * i_size. i_size is updated to cover any fully written part of the file. 976 */ 977 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 978 struct btrfs_ordered_extent *ordered) 979 { 980 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 981 u64 disk_i_size; 982 u64 new_i_size; 983 u64 i_size = i_size_read(inode); 984 struct rb_node *node; 985 struct rb_node *prev = NULL; 986 struct btrfs_ordered_extent *test; 987 int ret = 1; 988 u64 orig_offset = offset; 989 990 spin_lock_irq(&tree->lock); 991 if (ordered) { 992 offset = entry_end(ordered); 993 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) 994 offset = min(offset, 995 ordered->file_offset + 996 ordered->truncated_len); 997 } else { 998 offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); 999 } 1000 disk_i_size = BTRFS_I(inode)->disk_i_size; 1001 1002 /* 1003 * truncate file. 1004 * If ordered is not NULL, then this is called from endio and 1005 * disk_i_size will be updated by either truncate itself or any 1006 * in-flight IOs which are inside the disk_i_size. 1007 * 1008 * Because btrfs_setsize() may set i_size with disk_i_size if truncate 1009 * fails somehow, we need to make sure we have a precise disk_i_size by 1010 * updating it as usual. 1011 * 1012 */ 1013 if (!ordered && disk_i_size > i_size) { 1014 BTRFS_I(inode)->disk_i_size = orig_offset; 1015 ret = 0; 1016 goto out; 1017 } 1018 1019 /* 1020 * if the disk i_size is already at the inode->i_size, or 1021 * this ordered extent is inside the disk i_size, we're done 1022 */ 1023 if (disk_i_size == i_size) 1024 goto out; 1025 1026 /* 1027 * We still need to update disk_i_size if outstanding_isize is greater 1028 * than disk_i_size. 1029 */ 1030 if (offset <= disk_i_size && 1031 (!ordered || ordered->outstanding_isize <= disk_i_size)) 1032 goto out; 1033 1034 /* 1035 * walk backward from this ordered extent to disk_i_size. 1036 * if we find an ordered extent then we can't update disk i_size 1037 * yet 1038 */ 1039 if (ordered) { 1040 node = rb_prev(&ordered->rb_node); 1041 } else { 1042 prev = tree_search(tree, offset); 1043 /* 1044 * we insert file extents without involving ordered struct, 1045 * so there should be no ordered struct cover this offset 1046 */ 1047 if (prev) { 1048 test = rb_entry(prev, struct btrfs_ordered_extent, 1049 rb_node); 1050 BUG_ON(offset_in_entry(test, offset)); 1051 } 1052 node = prev; 1053 } 1054 for (; node; node = rb_prev(node)) { 1055 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 1056 1057 /* We treat this entry as if it doesn't exist */ 1058 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) 1059 continue; 1060 1061 if (entry_end(test) <= disk_i_size) 1062 break; 1063 if (test->file_offset >= i_size) 1064 break; 1065 1066 /* 1067 * We don't update disk_i_size now, so record this undealt 1068 * i_size. Or we will not know the real i_size. 1069 */ 1070 if (test->outstanding_isize < offset) 1071 test->outstanding_isize = offset; 1072 if (ordered && 1073 ordered->outstanding_isize > test->outstanding_isize) 1074 test->outstanding_isize = ordered->outstanding_isize; 1075 goto out; 1076 } 1077 new_i_size = min_t(u64, offset, i_size); 1078 1079 /* 1080 * Some ordered extents may completed before the current one, and 1081 * we hold the real i_size in ->outstanding_isize. 1082 */ 1083 if (ordered && ordered->outstanding_isize > new_i_size) 1084 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); 1085 BTRFS_I(inode)->disk_i_size = new_i_size; 1086 ret = 0; 1087 out: 1088 /* 1089 * We need to do this because we can't remove ordered extents until 1090 * after the i_disk_size has been updated and then the inode has been 1091 * updated to reflect the change, so we need to tell anybody who finds 1092 * this ordered extent that we've already done all the real work, we 1093 * just haven't completed all the other work. 1094 */ 1095 if (ordered) 1096 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); 1097 spin_unlock_irq(&tree->lock); 1098 return ret; 1099 } 1100 1101 /* 1102 * search the ordered extents for one corresponding to 'offset' and 1103 * try to find a checksum. This is used because we allow pages to 1104 * be reclaimed before their checksum is actually put into the btree 1105 */ 1106 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 1107 u32 *sum, int len) 1108 { 1109 struct btrfs_ordered_sum *ordered_sum; 1110 struct btrfs_ordered_extent *ordered; 1111 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 1112 unsigned long num_sectors; 1113 unsigned long i; 1114 u32 sectorsize = btrfs_inode_sectorsize(inode); 1115 int index = 0; 1116 1117 ordered = btrfs_lookup_ordered_extent(inode, offset); 1118 if (!ordered) 1119 return 0; 1120 1121 spin_lock_irq(&tree->lock); 1122 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 1123 if (disk_bytenr >= ordered_sum->bytenr && 1124 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { 1125 i = (disk_bytenr - ordered_sum->bytenr) >> 1126 inode->i_sb->s_blocksize_bits; 1127 num_sectors = ordered_sum->len >> 1128 inode->i_sb->s_blocksize_bits; 1129 num_sectors = min_t(int, len - index, num_sectors - i); 1130 memcpy(sum + index, ordered_sum->sums + i, 1131 num_sectors); 1132 1133 index += (int)num_sectors; 1134 if (index == len) 1135 goto out; 1136 disk_bytenr += num_sectors * sectorsize; 1137 } 1138 } 1139 out: 1140 spin_unlock_irq(&tree->lock); 1141 btrfs_put_ordered_extent(ordered); 1142 return index; 1143 } 1144 1145 int __init ordered_data_init(void) 1146 { 1147 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1148 sizeof(struct btrfs_ordered_extent), 0, 1149 SLAB_MEM_SPREAD, 1150 NULL); 1151 if (!btrfs_ordered_extent_cache) 1152 return -ENOMEM; 1153 1154 return 0; 1155 } 1156 1157 void ordered_data_exit(void) 1158 { 1159 kmem_cache_destroy(btrfs_ordered_extent_cache); 1160 } 1161