1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/slab.h> 20 #include <linux/blkdev.h> 21 #include <linux/writeback.h> 22 #include <linux/pagevec.h> 23 #include "ctree.h" 24 #include "transaction.h" 25 #include "btrfs_inode.h" 26 #include "extent_io.h" 27 #include "disk-io.h" 28 #include "compression.h" 29 30 static struct kmem_cache *btrfs_ordered_extent_cache; 31 32 static u64 entry_end(struct btrfs_ordered_extent *entry) 33 { 34 if (entry->file_offset + entry->len < entry->file_offset) 35 return (u64)-1; 36 return entry->file_offset + entry->len; 37 } 38 39 /* returns NULL if the insertion worked, or it returns the node it did find 40 * in the tree 41 */ 42 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, 43 struct rb_node *node) 44 { 45 struct rb_node **p = &root->rb_node; 46 struct rb_node *parent = NULL; 47 struct btrfs_ordered_extent *entry; 48 49 while (*p) { 50 parent = *p; 51 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); 52 53 if (file_offset < entry->file_offset) 54 p = &(*p)->rb_left; 55 else if (file_offset >= entry_end(entry)) 56 p = &(*p)->rb_right; 57 else 58 return parent; 59 } 60 61 rb_link_node(node, parent, p); 62 rb_insert_color(node, root); 63 return NULL; 64 } 65 66 static void ordered_data_tree_panic(struct inode *inode, int errno, 67 u64 offset) 68 { 69 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 70 btrfs_panic(fs_info, errno, 71 "Inconsistency in ordered tree at offset %llu", offset); 72 } 73 74 /* 75 * look for a given offset in the tree, and if it can't be found return the 76 * first lesser offset 77 */ 78 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, 79 struct rb_node **prev_ret) 80 { 81 struct rb_node *n = root->rb_node; 82 struct rb_node *prev = NULL; 83 struct rb_node *test; 84 struct btrfs_ordered_extent *entry; 85 struct btrfs_ordered_extent *prev_entry = NULL; 86 87 while (n) { 88 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); 89 prev = n; 90 prev_entry = entry; 91 92 if (file_offset < entry->file_offset) 93 n = n->rb_left; 94 else if (file_offset >= entry_end(entry)) 95 n = n->rb_right; 96 else 97 return n; 98 } 99 if (!prev_ret) 100 return NULL; 101 102 while (prev && file_offset >= entry_end(prev_entry)) { 103 test = rb_next(prev); 104 if (!test) 105 break; 106 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 107 rb_node); 108 if (file_offset < entry_end(prev_entry)) 109 break; 110 111 prev = test; 112 } 113 if (prev) 114 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, 115 rb_node); 116 while (prev && file_offset < entry_end(prev_entry)) { 117 test = rb_prev(prev); 118 if (!test) 119 break; 120 prev_entry = rb_entry(test, struct btrfs_ordered_extent, 121 rb_node); 122 prev = test; 123 } 124 *prev_ret = prev; 125 return NULL; 126 } 127 128 /* 129 * helper to check if a given offset is inside a given entry 130 */ 131 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) 132 { 133 if (file_offset < entry->file_offset || 134 entry->file_offset + entry->len <= file_offset) 135 return 0; 136 return 1; 137 } 138 139 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 140 u64 len) 141 { 142 if (file_offset + len <= entry->file_offset || 143 entry->file_offset + entry->len <= file_offset) 144 return 0; 145 return 1; 146 } 147 148 /* 149 * look find the first ordered struct that has this offset, otherwise 150 * the first one less than this offset 151 */ 152 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, 153 u64 file_offset) 154 { 155 struct rb_root *root = &tree->tree; 156 struct rb_node *prev = NULL; 157 struct rb_node *ret; 158 struct btrfs_ordered_extent *entry; 159 160 if (tree->last) { 161 entry = rb_entry(tree->last, struct btrfs_ordered_extent, 162 rb_node); 163 if (offset_in_entry(entry, file_offset)) 164 return tree->last; 165 } 166 ret = __tree_search(root, file_offset, &prev); 167 if (!ret) 168 ret = prev; 169 if (ret) 170 tree->last = ret; 171 return ret; 172 } 173 174 /* allocate and add a new ordered_extent into the per-inode tree. 175 * file_offset is the logical offset in the file 176 * 177 * start is the disk block number of an extent already reserved in the 178 * extent allocation tree 179 * 180 * len is the length of the extent 181 * 182 * The tree is given a single reference on the ordered extent that was 183 * inserted. 184 */ 185 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 186 u64 start, u64 len, u64 disk_len, 187 int type, int dio, int compress_type) 188 { 189 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 190 struct btrfs_root *root = BTRFS_I(inode)->root; 191 struct btrfs_ordered_inode_tree *tree; 192 struct rb_node *node; 193 struct btrfs_ordered_extent *entry; 194 195 tree = &BTRFS_I(inode)->ordered_tree; 196 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); 197 if (!entry) 198 return -ENOMEM; 199 200 entry->file_offset = file_offset; 201 entry->start = start; 202 entry->len = len; 203 entry->disk_len = disk_len; 204 entry->bytes_left = len; 205 entry->inode = igrab(inode); 206 entry->compress_type = compress_type; 207 entry->truncated_len = (u64)-1; 208 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 209 set_bit(type, &entry->flags); 210 211 if (dio) 212 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); 213 214 /* one ref for the tree */ 215 refcount_set(&entry->refs, 1); 216 init_waitqueue_head(&entry->wait); 217 INIT_LIST_HEAD(&entry->list); 218 INIT_LIST_HEAD(&entry->root_extent_list); 219 INIT_LIST_HEAD(&entry->work_list); 220 init_completion(&entry->completion); 221 INIT_LIST_HEAD(&entry->log_list); 222 INIT_LIST_HEAD(&entry->trans_list); 223 224 trace_btrfs_ordered_extent_add(inode, entry); 225 226 spin_lock_irq(&tree->lock); 227 node = tree_insert(&tree->tree, file_offset, 228 &entry->rb_node); 229 if (node) 230 ordered_data_tree_panic(inode, -EEXIST, file_offset); 231 spin_unlock_irq(&tree->lock); 232 233 spin_lock(&root->ordered_extent_lock); 234 list_add_tail(&entry->root_extent_list, 235 &root->ordered_extents); 236 root->nr_ordered_extents++; 237 if (root->nr_ordered_extents == 1) { 238 spin_lock(&fs_info->ordered_root_lock); 239 BUG_ON(!list_empty(&root->ordered_root)); 240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots); 241 spin_unlock(&fs_info->ordered_root_lock); 242 } 243 spin_unlock(&root->ordered_extent_lock); 244 245 return 0; 246 } 247 248 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 249 u64 start, u64 len, u64 disk_len, int type) 250 { 251 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 252 disk_len, type, 0, 253 BTRFS_COMPRESS_NONE); 254 } 255 256 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 257 u64 start, u64 len, u64 disk_len, int type) 258 { 259 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 260 disk_len, type, 1, 261 BTRFS_COMPRESS_NONE); 262 } 263 264 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 265 u64 start, u64 len, u64 disk_len, 266 int type, int compress_type) 267 { 268 return __btrfs_add_ordered_extent(inode, file_offset, start, len, 269 disk_len, type, 0, 270 compress_type); 271 } 272 273 /* 274 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 275 * when an ordered extent is finished. If the list covers more than one 276 * ordered extent, it is split across multiples. 277 */ 278 void btrfs_add_ordered_sum(struct inode *inode, 279 struct btrfs_ordered_extent *entry, 280 struct btrfs_ordered_sum *sum) 281 { 282 struct btrfs_ordered_inode_tree *tree; 283 284 tree = &BTRFS_I(inode)->ordered_tree; 285 spin_lock_irq(&tree->lock); 286 list_add_tail(&sum->list, &entry->list); 287 spin_unlock_irq(&tree->lock); 288 } 289 290 /* 291 * this is used to account for finished IO across a given range 292 * of the file. The IO may span ordered extents. If 293 * a given ordered_extent is completely done, 1 is returned, otherwise 294 * 0. 295 * 296 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 297 * to make sure this function only returns 1 once for a given ordered extent. 298 * 299 * file_offset is updated to one byte past the range that is recorded as 300 * complete. This allows you to walk forward in the file. 301 */ 302 int btrfs_dec_test_first_ordered_pending(struct inode *inode, 303 struct btrfs_ordered_extent **cached, 304 u64 *file_offset, u64 io_size, int uptodate) 305 { 306 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 307 struct btrfs_ordered_inode_tree *tree; 308 struct rb_node *node; 309 struct btrfs_ordered_extent *entry = NULL; 310 int ret; 311 unsigned long flags; 312 u64 dec_end; 313 u64 dec_start; 314 u64 to_dec; 315 316 tree = &BTRFS_I(inode)->ordered_tree; 317 spin_lock_irqsave(&tree->lock, flags); 318 node = tree_search(tree, *file_offset); 319 if (!node) { 320 ret = 1; 321 goto out; 322 } 323 324 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 325 if (!offset_in_entry(entry, *file_offset)) { 326 ret = 1; 327 goto out; 328 } 329 330 dec_start = max(*file_offset, entry->file_offset); 331 dec_end = min(*file_offset + io_size, entry->file_offset + 332 entry->len); 333 *file_offset = dec_end; 334 if (dec_start > dec_end) { 335 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", 336 dec_start, dec_end); 337 } 338 to_dec = dec_end - dec_start; 339 if (to_dec > entry->bytes_left) { 340 btrfs_crit(fs_info, 341 "bad ordered accounting left %llu size %llu", 342 entry->bytes_left, to_dec); 343 } 344 entry->bytes_left -= to_dec; 345 if (!uptodate) 346 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 347 348 if (entry->bytes_left == 0) { 349 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 350 /* 351 * Implicit memory barrier after test_and_set_bit 352 */ 353 if (waitqueue_active(&entry->wait)) 354 wake_up(&entry->wait); 355 } else { 356 ret = 1; 357 } 358 out: 359 if (!ret && cached && entry) { 360 *cached = entry; 361 refcount_inc(&entry->refs); 362 } 363 spin_unlock_irqrestore(&tree->lock, flags); 364 return ret == 0; 365 } 366 367 /* 368 * this is used to account for finished IO across a given range 369 * of the file. The IO should not span ordered extents. If 370 * a given ordered_extent is completely done, 1 is returned, otherwise 371 * 0. 372 * 373 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used 374 * to make sure this function only returns 1 once for a given ordered extent. 375 */ 376 int btrfs_dec_test_ordered_pending(struct inode *inode, 377 struct btrfs_ordered_extent **cached, 378 u64 file_offset, u64 io_size, int uptodate) 379 { 380 struct btrfs_ordered_inode_tree *tree; 381 struct rb_node *node; 382 struct btrfs_ordered_extent *entry = NULL; 383 unsigned long flags; 384 int ret; 385 386 tree = &BTRFS_I(inode)->ordered_tree; 387 spin_lock_irqsave(&tree->lock, flags); 388 if (cached && *cached) { 389 entry = *cached; 390 goto have_entry; 391 } 392 393 node = tree_search(tree, file_offset); 394 if (!node) { 395 ret = 1; 396 goto out; 397 } 398 399 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 400 have_entry: 401 if (!offset_in_entry(entry, file_offset)) { 402 ret = 1; 403 goto out; 404 } 405 406 if (io_size > entry->bytes_left) { 407 btrfs_crit(BTRFS_I(inode)->root->fs_info, 408 "bad ordered accounting left %llu size %llu", 409 entry->bytes_left, io_size); 410 } 411 entry->bytes_left -= io_size; 412 if (!uptodate) 413 set_bit(BTRFS_ORDERED_IOERR, &entry->flags); 414 415 if (entry->bytes_left == 0) { 416 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 417 /* 418 * Implicit memory barrier after test_and_set_bit 419 */ 420 if (waitqueue_active(&entry->wait)) 421 wake_up(&entry->wait); 422 } else { 423 ret = 1; 424 } 425 out: 426 if (!ret && cached && entry) { 427 *cached = entry; 428 refcount_inc(&entry->refs); 429 } 430 spin_unlock_irqrestore(&tree->lock, flags); 431 return ret == 0; 432 } 433 434 /* Needs to either be called under a log transaction or the log_mutex */ 435 void btrfs_get_logged_extents(struct btrfs_inode *inode, 436 struct list_head *logged_list, 437 const loff_t start, 438 const loff_t end) 439 { 440 struct btrfs_ordered_inode_tree *tree; 441 struct btrfs_ordered_extent *ordered; 442 struct rb_node *n; 443 struct rb_node *prev; 444 445 tree = &inode->ordered_tree; 446 spin_lock_irq(&tree->lock); 447 n = __tree_search(&tree->tree, end, &prev); 448 if (!n) 449 n = prev; 450 for (; n; n = rb_prev(n)) { 451 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 452 if (ordered->file_offset > end) 453 continue; 454 if (entry_end(ordered) <= start) 455 break; 456 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) 457 continue; 458 list_add(&ordered->log_list, logged_list); 459 refcount_inc(&ordered->refs); 460 } 461 spin_unlock_irq(&tree->lock); 462 } 463 464 void btrfs_put_logged_extents(struct list_head *logged_list) 465 { 466 struct btrfs_ordered_extent *ordered; 467 468 while (!list_empty(logged_list)) { 469 ordered = list_first_entry(logged_list, 470 struct btrfs_ordered_extent, 471 log_list); 472 list_del_init(&ordered->log_list); 473 btrfs_put_ordered_extent(ordered); 474 } 475 } 476 477 void btrfs_submit_logged_extents(struct list_head *logged_list, 478 struct btrfs_root *log) 479 { 480 int index = log->log_transid % 2; 481 482 spin_lock_irq(&log->log_extents_lock[index]); 483 list_splice_tail(logged_list, &log->logged_list[index]); 484 spin_unlock_irq(&log->log_extents_lock[index]); 485 } 486 487 void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, 488 struct btrfs_root *log, u64 transid) 489 { 490 struct btrfs_ordered_extent *ordered; 491 int index = transid % 2; 492 493 spin_lock_irq(&log->log_extents_lock[index]); 494 while (!list_empty(&log->logged_list[index])) { 495 struct inode *inode; 496 ordered = list_first_entry(&log->logged_list[index], 497 struct btrfs_ordered_extent, 498 log_list); 499 list_del_init(&ordered->log_list); 500 inode = ordered->inode; 501 spin_unlock_irq(&log->log_extents_lock[index]); 502 503 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && 504 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { 505 u64 start = ordered->file_offset; 506 u64 end = ordered->file_offset + ordered->len - 1; 507 508 WARN_ON(!inode); 509 filemap_fdatawrite_range(inode->i_mapping, start, end); 510 } 511 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 512 &ordered->flags)); 513 514 /* 515 * In order to keep us from losing our ordered extent 516 * information when committing the transaction we have to make 517 * sure that any logged extents are completed when we go to 518 * commit the transaction. To do this we simply increase the 519 * current transactions pending_ordered counter and decrement it 520 * when the ordered extent completes. 521 */ 522 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 523 struct btrfs_ordered_inode_tree *tree; 524 525 tree = &BTRFS_I(inode)->ordered_tree; 526 spin_lock_irq(&tree->lock); 527 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 528 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); 529 atomic_inc(&trans->transaction->pending_ordered); 530 } 531 spin_unlock_irq(&tree->lock); 532 } 533 btrfs_put_ordered_extent(ordered); 534 spin_lock_irq(&log->log_extents_lock[index]); 535 } 536 spin_unlock_irq(&log->log_extents_lock[index]); 537 } 538 539 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) 540 { 541 struct btrfs_ordered_extent *ordered; 542 int index = transid % 2; 543 544 spin_lock_irq(&log->log_extents_lock[index]); 545 while (!list_empty(&log->logged_list[index])) { 546 ordered = list_first_entry(&log->logged_list[index], 547 struct btrfs_ordered_extent, 548 log_list); 549 list_del_init(&ordered->log_list); 550 spin_unlock_irq(&log->log_extents_lock[index]); 551 btrfs_put_ordered_extent(ordered); 552 spin_lock_irq(&log->log_extents_lock[index]); 553 } 554 spin_unlock_irq(&log->log_extents_lock[index]); 555 } 556 557 /* 558 * used to drop a reference on an ordered extent. This will free 559 * the extent if the last reference is dropped 560 */ 561 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 562 { 563 struct list_head *cur; 564 struct btrfs_ordered_sum *sum; 565 566 trace_btrfs_ordered_extent_put(entry->inode, entry); 567 568 if (refcount_dec_and_test(&entry->refs)) { 569 ASSERT(list_empty(&entry->log_list)); 570 ASSERT(list_empty(&entry->trans_list)); 571 ASSERT(list_empty(&entry->root_extent_list)); 572 ASSERT(RB_EMPTY_NODE(&entry->rb_node)); 573 if (entry->inode) 574 btrfs_add_delayed_iput(entry->inode); 575 while (!list_empty(&entry->list)) { 576 cur = entry->list.next; 577 sum = list_entry(cur, struct btrfs_ordered_sum, list); 578 list_del(&sum->list); 579 kfree(sum); 580 } 581 kmem_cache_free(btrfs_ordered_extent_cache, entry); 582 } 583 } 584 585 /* 586 * remove an ordered extent from the tree. No references are dropped 587 * and waiters are woken up. 588 */ 589 void btrfs_remove_ordered_extent(struct inode *inode, 590 struct btrfs_ordered_extent *entry) 591 { 592 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 593 struct btrfs_ordered_inode_tree *tree; 594 struct btrfs_root *root = BTRFS_I(inode)->root; 595 struct rb_node *node; 596 bool dec_pending_ordered = false; 597 598 tree = &BTRFS_I(inode)->ordered_tree; 599 spin_lock_irq(&tree->lock); 600 node = &entry->rb_node; 601 rb_erase(node, &tree->tree); 602 RB_CLEAR_NODE(node); 603 if (tree->last == node) 604 tree->last = NULL; 605 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 606 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) 607 dec_pending_ordered = true; 608 spin_unlock_irq(&tree->lock); 609 610 /* 611 * The current running transaction is waiting on us, we need to let it 612 * know that we're complete and wake it up. 613 */ 614 if (dec_pending_ordered) { 615 struct btrfs_transaction *trans; 616 617 /* 618 * The checks for trans are just a formality, it should be set, 619 * but if it isn't we don't want to deref/assert under the spin 620 * lock, so be nice and check if trans is set, but ASSERT() so 621 * if it isn't set a developer will notice. 622 */ 623 spin_lock(&fs_info->trans_lock); 624 trans = fs_info->running_transaction; 625 if (trans) 626 refcount_inc(&trans->use_count); 627 spin_unlock(&fs_info->trans_lock); 628 629 ASSERT(trans); 630 if (trans) { 631 if (atomic_dec_and_test(&trans->pending_ordered)) 632 wake_up(&trans->pending_wait); 633 btrfs_put_transaction(trans); 634 } 635 } 636 637 spin_lock(&root->ordered_extent_lock); 638 list_del_init(&entry->root_extent_list); 639 root->nr_ordered_extents--; 640 641 trace_btrfs_ordered_extent_remove(inode, entry); 642 643 if (!root->nr_ordered_extents) { 644 spin_lock(&fs_info->ordered_root_lock); 645 BUG_ON(list_empty(&root->ordered_root)); 646 list_del_init(&root->ordered_root); 647 spin_unlock(&fs_info->ordered_root_lock); 648 } 649 spin_unlock(&root->ordered_extent_lock); 650 wake_up(&entry->wait); 651 } 652 653 static void btrfs_run_ordered_extent_work(struct btrfs_work *work) 654 { 655 struct btrfs_ordered_extent *ordered; 656 657 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); 658 btrfs_start_ordered_extent(ordered->inode, ordered, 1); 659 complete(&ordered->completion); 660 } 661 662 /* 663 * wait for all the ordered extents in a root. This is done when balancing 664 * space between drives. 665 */ 666 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, 667 const u64 range_start, const u64 range_len) 668 { 669 struct btrfs_fs_info *fs_info = root->fs_info; 670 LIST_HEAD(splice); 671 LIST_HEAD(skipped); 672 LIST_HEAD(works); 673 struct btrfs_ordered_extent *ordered, *next; 674 int count = 0; 675 const u64 range_end = range_start + range_len; 676 677 mutex_lock(&root->ordered_extent_mutex); 678 spin_lock(&root->ordered_extent_lock); 679 list_splice_init(&root->ordered_extents, &splice); 680 while (!list_empty(&splice) && nr) { 681 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, 682 root_extent_list); 683 684 if (range_end <= ordered->start || 685 ordered->start + ordered->disk_len <= range_start) { 686 list_move_tail(&ordered->root_extent_list, &skipped); 687 cond_resched_lock(&root->ordered_extent_lock); 688 continue; 689 } 690 691 list_move_tail(&ordered->root_extent_list, 692 &root->ordered_extents); 693 refcount_inc(&ordered->refs); 694 spin_unlock(&root->ordered_extent_lock); 695 696 btrfs_init_work(&ordered->flush_work, 697 btrfs_flush_delalloc_helper, 698 btrfs_run_ordered_extent_work, NULL, NULL); 699 list_add_tail(&ordered->work_list, &works); 700 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); 701 702 cond_resched(); 703 spin_lock(&root->ordered_extent_lock); 704 if (nr != -1) 705 nr--; 706 count++; 707 } 708 list_splice_tail(&skipped, &root->ordered_extents); 709 list_splice_tail(&splice, &root->ordered_extents); 710 spin_unlock(&root->ordered_extent_lock); 711 712 list_for_each_entry_safe(ordered, next, &works, work_list) { 713 list_del_init(&ordered->work_list); 714 wait_for_completion(&ordered->completion); 715 btrfs_put_ordered_extent(ordered); 716 cond_resched(); 717 } 718 mutex_unlock(&root->ordered_extent_mutex); 719 720 return count; 721 } 722 723 int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, 724 const u64 range_start, const u64 range_len) 725 { 726 struct btrfs_root *root; 727 struct list_head splice; 728 int done; 729 int total_done = 0; 730 731 INIT_LIST_HEAD(&splice); 732 733 mutex_lock(&fs_info->ordered_operations_mutex); 734 spin_lock(&fs_info->ordered_root_lock); 735 list_splice_init(&fs_info->ordered_roots, &splice); 736 while (!list_empty(&splice) && nr) { 737 root = list_first_entry(&splice, struct btrfs_root, 738 ordered_root); 739 root = btrfs_grab_fs_root(root); 740 BUG_ON(!root); 741 list_move_tail(&root->ordered_root, 742 &fs_info->ordered_roots); 743 spin_unlock(&fs_info->ordered_root_lock); 744 745 done = btrfs_wait_ordered_extents(root, nr, 746 range_start, range_len); 747 btrfs_put_fs_root(root); 748 total_done += done; 749 750 spin_lock(&fs_info->ordered_root_lock); 751 if (nr != -1) { 752 nr -= done; 753 WARN_ON(nr < 0); 754 } 755 } 756 list_splice_tail(&splice, &fs_info->ordered_roots); 757 spin_unlock(&fs_info->ordered_root_lock); 758 mutex_unlock(&fs_info->ordered_operations_mutex); 759 760 return total_done; 761 } 762 763 /* 764 * Used to start IO or wait for a given ordered extent to finish. 765 * 766 * If wait is one, this effectively waits on page writeback for all the pages 767 * in the extent, and it waits on the io completion code to insert 768 * metadata into the btree corresponding to the extent 769 */ 770 void btrfs_start_ordered_extent(struct inode *inode, 771 struct btrfs_ordered_extent *entry, 772 int wait) 773 { 774 u64 start = entry->file_offset; 775 u64 end = start + entry->len - 1; 776 777 trace_btrfs_ordered_extent_start(inode, entry); 778 779 /* 780 * pages in the range can be dirty, clean or writeback. We 781 * start IO on any dirty ones so the wait doesn't stall waiting 782 * for the flusher thread to find them 783 */ 784 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 785 filemap_fdatawrite_range(inode->i_mapping, start, end); 786 if (wait) { 787 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 788 &entry->flags)); 789 } 790 } 791 792 /* 793 * Used to wait on ordered extents across a large range of bytes. 794 */ 795 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 796 { 797 int ret = 0; 798 int ret_wb = 0; 799 u64 end; 800 u64 orig_end; 801 struct btrfs_ordered_extent *ordered; 802 803 if (start + len < start) { 804 orig_end = INT_LIMIT(loff_t); 805 } else { 806 orig_end = start + len - 1; 807 if (orig_end > INT_LIMIT(loff_t)) 808 orig_end = INT_LIMIT(loff_t); 809 } 810 811 /* start IO across the range first to instantiate any delalloc 812 * extents 813 */ 814 ret = btrfs_fdatawrite_range(inode, start, orig_end); 815 if (ret) 816 return ret; 817 818 /* 819 * If we have a writeback error don't return immediately. Wait first 820 * for any ordered extents that haven't completed yet. This is to make 821 * sure no one can dirty the same page ranges and call writepages() 822 * before the ordered extents complete - to avoid failures (-EEXIST) 823 * when adding the new ordered extents to the ordered tree. 824 */ 825 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 826 827 end = orig_end; 828 while (1) { 829 ordered = btrfs_lookup_first_ordered_extent(inode, end); 830 if (!ordered) 831 break; 832 if (ordered->file_offset > orig_end) { 833 btrfs_put_ordered_extent(ordered); 834 break; 835 } 836 if (ordered->file_offset + ordered->len <= start) { 837 btrfs_put_ordered_extent(ordered); 838 break; 839 } 840 btrfs_start_ordered_extent(inode, ordered, 1); 841 end = ordered->file_offset; 842 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) 843 ret = -EIO; 844 btrfs_put_ordered_extent(ordered); 845 if (ret || end == 0 || end == start) 846 break; 847 end--; 848 } 849 return ret_wb ? ret_wb : ret; 850 } 851 852 /* 853 * find an ordered extent corresponding to file_offset. return NULL if 854 * nothing is found, otherwise take a reference on the extent and return it 855 */ 856 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 857 u64 file_offset) 858 { 859 struct btrfs_ordered_inode_tree *tree; 860 struct rb_node *node; 861 struct btrfs_ordered_extent *entry = NULL; 862 863 tree = &BTRFS_I(inode)->ordered_tree; 864 spin_lock_irq(&tree->lock); 865 node = tree_search(tree, file_offset); 866 if (!node) 867 goto out; 868 869 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 870 if (!offset_in_entry(entry, file_offset)) 871 entry = NULL; 872 if (entry) 873 refcount_inc(&entry->refs); 874 out: 875 spin_unlock_irq(&tree->lock); 876 return entry; 877 } 878 879 /* Since the DIO code tries to lock a wide area we need to look for any ordered 880 * extents that exist in the range, rather than just the start of the range. 881 */ 882 struct btrfs_ordered_extent *btrfs_lookup_ordered_range( 883 struct btrfs_inode *inode, u64 file_offset, u64 len) 884 { 885 struct btrfs_ordered_inode_tree *tree; 886 struct rb_node *node; 887 struct btrfs_ordered_extent *entry = NULL; 888 889 tree = &inode->ordered_tree; 890 spin_lock_irq(&tree->lock); 891 node = tree_search(tree, file_offset); 892 if (!node) { 893 node = tree_search(tree, file_offset + len); 894 if (!node) 895 goto out; 896 } 897 898 while (1) { 899 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 900 if (range_overlaps(entry, file_offset, len)) 901 break; 902 903 if (entry->file_offset >= file_offset + len) { 904 entry = NULL; 905 break; 906 } 907 entry = NULL; 908 node = rb_next(node); 909 if (!node) 910 break; 911 } 912 out: 913 if (entry) 914 refcount_inc(&entry->refs); 915 spin_unlock_irq(&tree->lock); 916 return entry; 917 } 918 919 bool btrfs_have_ordered_extents_in_range(struct inode *inode, 920 u64 file_offset, 921 u64 len) 922 { 923 struct btrfs_ordered_extent *oe; 924 925 oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); 926 if (oe) { 927 btrfs_put_ordered_extent(oe); 928 return true; 929 } 930 return false; 931 } 932 933 /* 934 * lookup and return any extent before 'file_offset'. NULL is returned 935 * if none is found 936 */ 937 struct btrfs_ordered_extent * 938 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) 939 { 940 struct btrfs_ordered_inode_tree *tree; 941 struct rb_node *node; 942 struct btrfs_ordered_extent *entry = NULL; 943 944 tree = &BTRFS_I(inode)->ordered_tree; 945 spin_lock_irq(&tree->lock); 946 node = tree_search(tree, file_offset); 947 if (!node) 948 goto out; 949 950 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 951 refcount_inc(&entry->refs); 952 out: 953 spin_unlock_irq(&tree->lock); 954 return entry; 955 } 956 957 /* 958 * After an extent is done, call this to conditionally update the on disk 959 * i_size. i_size is updated to cover any fully written part of the file. 960 */ 961 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 962 struct btrfs_ordered_extent *ordered) 963 { 964 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 965 u64 disk_i_size; 966 u64 new_i_size; 967 u64 i_size = i_size_read(inode); 968 struct rb_node *node; 969 struct rb_node *prev = NULL; 970 struct btrfs_ordered_extent *test; 971 int ret = 1; 972 u64 orig_offset = offset; 973 974 spin_lock_irq(&tree->lock); 975 if (ordered) { 976 offset = entry_end(ordered); 977 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) 978 offset = min(offset, 979 ordered->file_offset + 980 ordered->truncated_len); 981 } else { 982 offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); 983 } 984 disk_i_size = BTRFS_I(inode)->disk_i_size; 985 986 /* 987 * truncate file. 988 * If ordered is not NULL, then this is called from endio and 989 * disk_i_size will be updated by either truncate itself or any 990 * in-flight IOs which are inside the disk_i_size. 991 * 992 * Because btrfs_setsize() may set i_size with disk_i_size if truncate 993 * fails somehow, we need to make sure we have a precise disk_i_size by 994 * updating it as usual. 995 * 996 */ 997 if (!ordered && disk_i_size > i_size) { 998 BTRFS_I(inode)->disk_i_size = orig_offset; 999 ret = 0; 1000 goto out; 1001 } 1002 1003 /* 1004 * if the disk i_size is already at the inode->i_size, or 1005 * this ordered extent is inside the disk i_size, we're done 1006 */ 1007 if (disk_i_size == i_size) 1008 goto out; 1009 1010 /* 1011 * We still need to update disk_i_size if outstanding_isize is greater 1012 * than disk_i_size. 1013 */ 1014 if (offset <= disk_i_size && 1015 (!ordered || ordered->outstanding_isize <= disk_i_size)) 1016 goto out; 1017 1018 /* 1019 * walk backward from this ordered extent to disk_i_size. 1020 * if we find an ordered extent then we can't update disk i_size 1021 * yet 1022 */ 1023 if (ordered) { 1024 node = rb_prev(&ordered->rb_node); 1025 } else { 1026 prev = tree_search(tree, offset); 1027 /* 1028 * we insert file extents without involving ordered struct, 1029 * so there should be no ordered struct cover this offset 1030 */ 1031 if (prev) { 1032 test = rb_entry(prev, struct btrfs_ordered_extent, 1033 rb_node); 1034 BUG_ON(offset_in_entry(test, offset)); 1035 } 1036 node = prev; 1037 } 1038 for (; node; node = rb_prev(node)) { 1039 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 1040 1041 /* We treat this entry as if it doesn't exist */ 1042 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) 1043 continue; 1044 1045 if (entry_end(test) <= disk_i_size) 1046 break; 1047 if (test->file_offset >= i_size) 1048 break; 1049 1050 /* 1051 * We don't update disk_i_size now, so record this undealt 1052 * i_size. Or we will not know the real i_size. 1053 */ 1054 if (test->outstanding_isize < offset) 1055 test->outstanding_isize = offset; 1056 if (ordered && 1057 ordered->outstanding_isize > test->outstanding_isize) 1058 test->outstanding_isize = ordered->outstanding_isize; 1059 goto out; 1060 } 1061 new_i_size = min_t(u64, offset, i_size); 1062 1063 /* 1064 * Some ordered extents may completed before the current one, and 1065 * we hold the real i_size in ->outstanding_isize. 1066 */ 1067 if (ordered && ordered->outstanding_isize > new_i_size) 1068 new_i_size = min_t(u64, ordered->outstanding_isize, i_size); 1069 BTRFS_I(inode)->disk_i_size = new_i_size; 1070 ret = 0; 1071 out: 1072 /* 1073 * We need to do this because we can't remove ordered extents until 1074 * after the i_disk_size has been updated and then the inode has been 1075 * updated to reflect the change, so we need to tell anybody who finds 1076 * this ordered extent that we've already done all the real work, we 1077 * just haven't completed all the other work. 1078 */ 1079 if (ordered) 1080 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); 1081 spin_unlock_irq(&tree->lock); 1082 return ret; 1083 } 1084 1085 /* 1086 * search the ordered extents for one corresponding to 'offset' and 1087 * try to find a checksum. This is used because we allow pages to 1088 * be reclaimed before their checksum is actually put into the btree 1089 */ 1090 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, 1091 u32 *sum, int len) 1092 { 1093 struct btrfs_ordered_sum *ordered_sum; 1094 struct btrfs_ordered_extent *ordered; 1095 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 1096 unsigned long num_sectors; 1097 unsigned long i; 1098 u32 sectorsize = btrfs_inode_sectorsize(inode); 1099 int index = 0; 1100 1101 ordered = btrfs_lookup_ordered_extent(inode, offset); 1102 if (!ordered) 1103 return 0; 1104 1105 spin_lock_irq(&tree->lock); 1106 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 1107 if (disk_bytenr >= ordered_sum->bytenr && 1108 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { 1109 i = (disk_bytenr - ordered_sum->bytenr) >> 1110 inode->i_sb->s_blocksize_bits; 1111 num_sectors = ordered_sum->len >> 1112 inode->i_sb->s_blocksize_bits; 1113 num_sectors = min_t(int, len - index, num_sectors - i); 1114 memcpy(sum + index, ordered_sum->sums + i, 1115 num_sectors); 1116 1117 index += (int)num_sectors; 1118 if (index == len) 1119 goto out; 1120 disk_bytenr += num_sectors * sectorsize; 1121 } 1122 } 1123 out: 1124 spin_unlock_irq(&tree->lock); 1125 btrfs_put_ordered_extent(ordered); 1126 return index; 1127 } 1128 1129 int __init ordered_data_init(void) 1130 { 1131 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", 1132 sizeof(struct btrfs_ordered_extent), 0, 1133 SLAB_MEM_SPREAD, 1134 NULL); 1135 if (!btrfs_ordered_extent_cache) 1136 return -ENOMEM; 1137 1138 return 0; 1139 } 1140 1141 void ordered_data_exit(void) 1142 { 1143 kmem_cache_destroy(btrfs_ordered_extent_cache); 1144 } 1145