1 /* 2 * Copyright (C) 2011 Fujitsu. All rights reserved. 3 * Written by Miao Xie <miaox@cn.fujitsu.com> 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public 7 * License v2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public 15 * License along with this program; if not, write to the 16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 17 * Boston, MA 021110-1307, USA. 18 */ 19 20 #include <linux/slab.h> 21 #include "delayed-inode.h" 22 #include "disk-io.h" 23 #include "transaction.h" 24 25 #define BTRFS_DELAYED_WRITEBACK 400 26 #define BTRFS_DELAYED_BACKGROUND 100 27 28 static struct kmem_cache *delayed_node_cache; 29 30 int __init btrfs_delayed_inode_init(void) 31 { 32 delayed_node_cache = kmem_cache_create("delayed_node", 33 sizeof(struct btrfs_delayed_node), 34 0, 35 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 36 NULL); 37 if (!delayed_node_cache) 38 return -ENOMEM; 39 return 0; 40 } 41 42 void btrfs_delayed_inode_exit(void) 43 { 44 if (delayed_node_cache) 45 kmem_cache_destroy(delayed_node_cache); 46 } 47 48 static inline void btrfs_init_delayed_node( 49 struct btrfs_delayed_node *delayed_node, 50 struct btrfs_root *root, u64 inode_id) 51 { 52 delayed_node->root = root; 53 delayed_node->inode_id = inode_id; 54 atomic_set(&delayed_node->refs, 0); 55 delayed_node->count = 0; 56 delayed_node->in_list = 0; 57 delayed_node->inode_dirty = 0; 58 delayed_node->ins_root = RB_ROOT; 59 delayed_node->del_root = RB_ROOT; 60 mutex_init(&delayed_node->mutex); 61 delayed_node->index_cnt = 0; 62 INIT_LIST_HEAD(&delayed_node->n_list); 63 INIT_LIST_HEAD(&delayed_node->p_list); 64 delayed_node->bytes_reserved = 0; 65 } 66 67 static inline int btrfs_is_continuous_delayed_item( 68 struct btrfs_delayed_item *item1, 69 struct btrfs_delayed_item *item2) 70 { 71 if (item1->key.type == BTRFS_DIR_INDEX_KEY && 72 item1->key.objectid == item2->key.objectid && 73 item1->key.type == item2->key.type && 74 item1->key.offset + 1 == item2->key.offset) 75 return 1; 76 return 0; 77 } 78 79 static inline struct btrfs_delayed_root *btrfs_get_delayed_root( 80 struct btrfs_root *root) 81 { 82 return root->fs_info->delayed_root; 83 } 84 85 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) 86 { 87 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 88 struct btrfs_root *root = btrfs_inode->root; 89 u64 ino = btrfs_ino(inode); 90 struct btrfs_delayed_node *node; 91 92 node = ACCESS_ONCE(btrfs_inode->delayed_node); 93 if (node) { 94 atomic_inc(&node->refs); 95 return node; 96 } 97 98 spin_lock(&root->inode_lock); 99 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 100 if (node) { 101 if (btrfs_inode->delayed_node) { 102 atomic_inc(&node->refs); /* can be accessed */ 103 BUG_ON(btrfs_inode->delayed_node != node); 104 spin_unlock(&root->inode_lock); 105 return node; 106 } 107 btrfs_inode->delayed_node = node; 108 atomic_inc(&node->refs); /* can be accessed */ 109 atomic_inc(&node->refs); /* cached in the inode */ 110 spin_unlock(&root->inode_lock); 111 return node; 112 } 113 spin_unlock(&root->inode_lock); 114 115 return NULL; 116 } 117 118 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 119 struct inode *inode) 120 { 121 struct btrfs_delayed_node *node; 122 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 123 struct btrfs_root *root = btrfs_inode->root; 124 u64 ino = btrfs_ino(inode); 125 int ret; 126 127 again: 128 node = btrfs_get_delayed_node(inode); 129 if (node) 130 return node; 131 132 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); 133 if (!node) 134 return ERR_PTR(-ENOMEM); 135 btrfs_init_delayed_node(node, root, ino); 136 137 atomic_inc(&node->refs); /* cached in the btrfs inode */ 138 atomic_inc(&node->refs); /* can be accessed */ 139 140 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 141 if (ret) { 142 kmem_cache_free(delayed_node_cache, node); 143 return ERR_PTR(ret); 144 } 145 146 spin_lock(&root->inode_lock); 147 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); 148 if (ret == -EEXIST) { 149 kmem_cache_free(delayed_node_cache, node); 150 spin_unlock(&root->inode_lock); 151 radix_tree_preload_end(); 152 goto again; 153 } 154 btrfs_inode->delayed_node = node; 155 spin_unlock(&root->inode_lock); 156 radix_tree_preload_end(); 157 158 return node; 159 } 160 161 /* 162 * Call it when holding delayed_node->mutex 163 * 164 * If mod = 1, add this node into the prepared list. 165 */ 166 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, 167 struct btrfs_delayed_node *node, 168 int mod) 169 { 170 spin_lock(&root->lock); 171 if (node->in_list) { 172 if (!list_empty(&node->p_list)) 173 list_move_tail(&node->p_list, &root->prepare_list); 174 else if (mod) 175 list_add_tail(&node->p_list, &root->prepare_list); 176 } else { 177 list_add_tail(&node->n_list, &root->node_list); 178 list_add_tail(&node->p_list, &root->prepare_list); 179 atomic_inc(&node->refs); /* inserted into list */ 180 root->nodes++; 181 node->in_list = 1; 182 } 183 spin_unlock(&root->lock); 184 } 185 186 /* Call it when holding delayed_node->mutex */ 187 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, 188 struct btrfs_delayed_node *node) 189 { 190 spin_lock(&root->lock); 191 if (node->in_list) { 192 root->nodes--; 193 atomic_dec(&node->refs); /* not in the list */ 194 list_del_init(&node->n_list); 195 if (!list_empty(&node->p_list)) 196 list_del_init(&node->p_list); 197 node->in_list = 0; 198 } 199 spin_unlock(&root->lock); 200 } 201 202 struct btrfs_delayed_node *btrfs_first_delayed_node( 203 struct btrfs_delayed_root *delayed_root) 204 { 205 struct list_head *p; 206 struct btrfs_delayed_node *node = NULL; 207 208 spin_lock(&delayed_root->lock); 209 if (list_empty(&delayed_root->node_list)) 210 goto out; 211 212 p = delayed_root->node_list.next; 213 node = list_entry(p, struct btrfs_delayed_node, n_list); 214 atomic_inc(&node->refs); 215 out: 216 spin_unlock(&delayed_root->lock); 217 218 return node; 219 } 220 221 struct btrfs_delayed_node *btrfs_next_delayed_node( 222 struct btrfs_delayed_node *node) 223 { 224 struct btrfs_delayed_root *delayed_root; 225 struct list_head *p; 226 struct btrfs_delayed_node *next = NULL; 227 228 delayed_root = node->root->fs_info->delayed_root; 229 spin_lock(&delayed_root->lock); 230 if (!node->in_list) { /* not in the list */ 231 if (list_empty(&delayed_root->node_list)) 232 goto out; 233 p = delayed_root->node_list.next; 234 } else if (list_is_last(&node->n_list, &delayed_root->node_list)) 235 goto out; 236 else 237 p = node->n_list.next; 238 239 next = list_entry(p, struct btrfs_delayed_node, n_list); 240 atomic_inc(&next->refs); 241 out: 242 spin_unlock(&delayed_root->lock); 243 244 return next; 245 } 246 247 static void __btrfs_release_delayed_node( 248 struct btrfs_delayed_node *delayed_node, 249 int mod) 250 { 251 struct btrfs_delayed_root *delayed_root; 252 253 if (!delayed_node) 254 return; 255 256 delayed_root = delayed_node->root->fs_info->delayed_root; 257 258 mutex_lock(&delayed_node->mutex); 259 if (delayed_node->count) 260 btrfs_queue_delayed_node(delayed_root, delayed_node, mod); 261 else 262 btrfs_dequeue_delayed_node(delayed_root, delayed_node); 263 mutex_unlock(&delayed_node->mutex); 264 265 if (atomic_dec_and_test(&delayed_node->refs)) { 266 struct btrfs_root *root = delayed_node->root; 267 spin_lock(&root->inode_lock); 268 if (atomic_read(&delayed_node->refs) == 0) { 269 radix_tree_delete(&root->delayed_nodes_tree, 270 delayed_node->inode_id); 271 kmem_cache_free(delayed_node_cache, delayed_node); 272 } 273 spin_unlock(&root->inode_lock); 274 } 275 } 276 277 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) 278 { 279 __btrfs_release_delayed_node(node, 0); 280 } 281 282 struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( 283 struct btrfs_delayed_root *delayed_root) 284 { 285 struct list_head *p; 286 struct btrfs_delayed_node *node = NULL; 287 288 spin_lock(&delayed_root->lock); 289 if (list_empty(&delayed_root->prepare_list)) 290 goto out; 291 292 p = delayed_root->prepare_list.next; 293 list_del_init(p); 294 node = list_entry(p, struct btrfs_delayed_node, p_list); 295 atomic_inc(&node->refs); 296 out: 297 spin_unlock(&delayed_root->lock); 298 299 return node; 300 } 301 302 static inline void btrfs_release_prepared_delayed_node( 303 struct btrfs_delayed_node *node) 304 { 305 __btrfs_release_delayed_node(node, 1); 306 } 307 308 struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) 309 { 310 struct btrfs_delayed_item *item; 311 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); 312 if (item) { 313 item->data_len = data_len; 314 item->ins_or_del = 0; 315 item->bytes_reserved = 0; 316 item->delayed_node = NULL; 317 atomic_set(&item->refs, 1); 318 } 319 return item; 320 } 321 322 /* 323 * __btrfs_lookup_delayed_item - look up the delayed item by key 324 * @delayed_node: pointer to the delayed node 325 * @key: the key to look up 326 * @prev: used to store the prev item if the right item isn't found 327 * @next: used to store the next item if the right item isn't found 328 * 329 * Note: if we don't find the right item, we will return the prev item and 330 * the next item. 331 */ 332 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( 333 struct rb_root *root, 334 struct btrfs_key *key, 335 struct btrfs_delayed_item **prev, 336 struct btrfs_delayed_item **next) 337 { 338 struct rb_node *node, *prev_node = NULL; 339 struct btrfs_delayed_item *delayed_item = NULL; 340 int ret = 0; 341 342 node = root->rb_node; 343 344 while (node) { 345 delayed_item = rb_entry(node, struct btrfs_delayed_item, 346 rb_node); 347 prev_node = node; 348 ret = btrfs_comp_cpu_keys(&delayed_item->key, key); 349 if (ret < 0) 350 node = node->rb_right; 351 else if (ret > 0) 352 node = node->rb_left; 353 else 354 return delayed_item; 355 } 356 357 if (prev) { 358 if (!prev_node) 359 *prev = NULL; 360 else if (ret < 0) 361 *prev = delayed_item; 362 else if ((node = rb_prev(prev_node)) != NULL) { 363 *prev = rb_entry(node, struct btrfs_delayed_item, 364 rb_node); 365 } else 366 *prev = NULL; 367 } 368 369 if (next) { 370 if (!prev_node) 371 *next = NULL; 372 else if (ret > 0) 373 *next = delayed_item; 374 else if ((node = rb_next(prev_node)) != NULL) { 375 *next = rb_entry(node, struct btrfs_delayed_item, 376 rb_node); 377 } else 378 *next = NULL; 379 } 380 return NULL; 381 } 382 383 struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( 384 struct btrfs_delayed_node *delayed_node, 385 struct btrfs_key *key) 386 { 387 struct btrfs_delayed_item *item; 388 389 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, 390 NULL, NULL); 391 return item; 392 } 393 394 struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( 395 struct btrfs_delayed_node *delayed_node, 396 struct btrfs_key *key) 397 { 398 struct btrfs_delayed_item *item; 399 400 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, 401 NULL, NULL); 402 return item; 403 } 404 405 struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( 406 struct btrfs_delayed_node *delayed_node, 407 struct btrfs_key *key) 408 { 409 struct btrfs_delayed_item *item, *next; 410 411 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, 412 NULL, &next); 413 if (!item) 414 item = next; 415 416 return item; 417 } 418 419 struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( 420 struct btrfs_delayed_node *delayed_node, 421 struct btrfs_key *key) 422 { 423 struct btrfs_delayed_item *item, *next; 424 425 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, 426 NULL, &next); 427 if (!item) 428 item = next; 429 430 return item; 431 } 432 433 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, 434 struct btrfs_delayed_item *ins, 435 int action) 436 { 437 struct rb_node **p, *node; 438 struct rb_node *parent_node = NULL; 439 struct rb_root *root; 440 struct btrfs_delayed_item *item; 441 int cmp; 442 443 if (action == BTRFS_DELAYED_INSERTION_ITEM) 444 root = &delayed_node->ins_root; 445 else if (action == BTRFS_DELAYED_DELETION_ITEM) 446 root = &delayed_node->del_root; 447 else 448 BUG(); 449 p = &root->rb_node; 450 node = &ins->rb_node; 451 452 while (*p) { 453 parent_node = *p; 454 item = rb_entry(parent_node, struct btrfs_delayed_item, 455 rb_node); 456 457 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); 458 if (cmp < 0) 459 p = &(*p)->rb_right; 460 else if (cmp > 0) 461 p = &(*p)->rb_left; 462 else 463 return -EEXIST; 464 } 465 466 rb_link_node(node, parent_node, p); 467 rb_insert_color(node, root); 468 ins->delayed_node = delayed_node; 469 ins->ins_or_del = action; 470 471 if (ins->key.type == BTRFS_DIR_INDEX_KEY && 472 action == BTRFS_DELAYED_INSERTION_ITEM && 473 ins->key.offset >= delayed_node->index_cnt) 474 delayed_node->index_cnt = ins->key.offset + 1; 475 476 delayed_node->count++; 477 atomic_inc(&delayed_node->root->fs_info->delayed_root->items); 478 return 0; 479 } 480 481 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, 482 struct btrfs_delayed_item *item) 483 { 484 return __btrfs_add_delayed_item(node, item, 485 BTRFS_DELAYED_INSERTION_ITEM); 486 } 487 488 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, 489 struct btrfs_delayed_item *item) 490 { 491 return __btrfs_add_delayed_item(node, item, 492 BTRFS_DELAYED_DELETION_ITEM); 493 } 494 495 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) 496 { 497 struct rb_root *root; 498 struct btrfs_delayed_root *delayed_root; 499 500 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; 501 502 BUG_ON(!delayed_root); 503 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && 504 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); 505 506 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) 507 root = &delayed_item->delayed_node->ins_root; 508 else 509 root = &delayed_item->delayed_node->del_root; 510 511 rb_erase(&delayed_item->rb_node, root); 512 delayed_item->delayed_node->count--; 513 atomic_dec(&delayed_root->items); 514 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && 515 waitqueue_active(&delayed_root->wait)) 516 wake_up(&delayed_root->wait); 517 } 518 519 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) 520 { 521 if (item) { 522 __btrfs_remove_delayed_item(item); 523 if (atomic_dec_and_test(&item->refs)) 524 kfree(item); 525 } 526 } 527 528 struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( 529 struct btrfs_delayed_node *delayed_node) 530 { 531 struct rb_node *p; 532 struct btrfs_delayed_item *item = NULL; 533 534 p = rb_first(&delayed_node->ins_root); 535 if (p) 536 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 537 538 return item; 539 } 540 541 struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( 542 struct btrfs_delayed_node *delayed_node) 543 { 544 struct rb_node *p; 545 struct btrfs_delayed_item *item = NULL; 546 547 p = rb_first(&delayed_node->del_root); 548 if (p) 549 item = rb_entry(p, struct btrfs_delayed_item, rb_node); 550 551 return item; 552 } 553 554 struct btrfs_delayed_item *__btrfs_next_delayed_item( 555 struct btrfs_delayed_item *item) 556 { 557 struct rb_node *p; 558 struct btrfs_delayed_item *next = NULL; 559 560 p = rb_next(&item->rb_node); 561 if (p) 562 next = rb_entry(p, struct btrfs_delayed_item, rb_node); 563 564 return next; 565 } 566 567 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, 568 u64 root_id) 569 { 570 struct btrfs_key root_key; 571 572 if (root->objectid == root_id) 573 return root; 574 575 root_key.objectid = root_id; 576 root_key.type = BTRFS_ROOT_ITEM_KEY; 577 root_key.offset = (u64)-1; 578 return btrfs_read_fs_root_no_name(root->fs_info, &root_key); 579 } 580 581 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, 582 struct btrfs_root *root, 583 struct btrfs_delayed_item *item) 584 { 585 struct btrfs_block_rsv *src_rsv; 586 struct btrfs_block_rsv *dst_rsv; 587 u64 num_bytes; 588 int ret; 589 590 if (!trans->bytes_reserved) 591 return 0; 592 593 src_rsv = trans->block_rsv; 594 dst_rsv = &root->fs_info->delayed_block_rsv; 595 596 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 598 if (!ret) 599 item->bytes_reserved = num_bytes; 600 601 return ret; 602 } 603 604 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 605 struct btrfs_delayed_item *item) 606 { 607 struct btrfs_block_rsv *rsv; 608 609 if (!item->bytes_reserved) 610 return; 611 612 rsv = &root->fs_info->delayed_block_rsv; 613 btrfs_block_rsv_release(root, rsv, 614 item->bytes_reserved); 615 } 616 617 static int btrfs_delayed_inode_reserve_metadata( 618 struct btrfs_trans_handle *trans, 619 struct btrfs_root *root, 620 struct btrfs_delayed_node *node) 621 { 622 struct btrfs_block_rsv *src_rsv; 623 struct btrfs_block_rsv *dst_rsv; 624 u64 num_bytes; 625 int ret; 626 627 src_rsv = trans->block_rsv; 628 dst_rsv = &root->fs_info->delayed_block_rsv; 629 630 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 631 632 /* 633 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 634 * which doesn't reserve space for speed. This is a problem since we 635 * still need to reserve space for this update, so try to reserve the 636 * space. 637 * 638 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since 639 * we're accounted for. 640 */ 641 if (!trans->bytes_reserved && 642 src_rsv != &root->fs_info->delalloc_block_rsv) { 643 ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); 644 /* 645 * Since we're under a transaction reserve_metadata_bytes could 646 * try to commit the transaction which will make it return 647 * EAGAIN to make us stop the transaction we have, so return 648 * ENOSPC instead so that btrfs_dirty_inode knows what to do. 649 */ 650 if (ret == -EAGAIN) 651 ret = -ENOSPC; 652 if (!ret) 653 node->bytes_reserved = num_bytes; 654 return ret; 655 } 656 657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 658 if (!ret) 659 node->bytes_reserved = num_bytes; 660 661 return ret; 662 } 663 664 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, 665 struct btrfs_delayed_node *node) 666 { 667 struct btrfs_block_rsv *rsv; 668 669 if (!node->bytes_reserved) 670 return; 671 672 rsv = &root->fs_info->delayed_block_rsv; 673 btrfs_block_rsv_release(root, rsv, 674 node->bytes_reserved); 675 node->bytes_reserved = 0; 676 } 677 678 /* 679 * This helper will insert some continuous items into the same leaf according 680 * to the free space of the leaf. 681 */ 682 static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, 683 struct btrfs_root *root, 684 struct btrfs_path *path, 685 struct btrfs_delayed_item *item) 686 { 687 struct btrfs_delayed_item *curr, *next; 688 int free_space; 689 int total_data_size = 0, total_size = 0; 690 struct extent_buffer *leaf; 691 char *data_ptr; 692 struct btrfs_key *keys; 693 u32 *data_size; 694 struct list_head head; 695 int slot; 696 int nitems; 697 int i; 698 int ret = 0; 699 700 BUG_ON(!path->nodes[0]); 701 702 leaf = path->nodes[0]; 703 free_space = btrfs_leaf_free_space(root, leaf); 704 INIT_LIST_HEAD(&head); 705 706 next = item; 707 nitems = 0; 708 709 /* 710 * count the number of the continuous items that we can insert in batch 711 */ 712 while (total_size + next->data_len + sizeof(struct btrfs_item) <= 713 free_space) { 714 total_data_size += next->data_len; 715 total_size += next->data_len + sizeof(struct btrfs_item); 716 list_add_tail(&next->tree_list, &head); 717 nitems++; 718 719 curr = next; 720 next = __btrfs_next_delayed_item(curr); 721 if (!next) 722 break; 723 724 if (!btrfs_is_continuous_delayed_item(curr, next)) 725 break; 726 } 727 728 if (!nitems) { 729 ret = 0; 730 goto out; 731 } 732 733 /* 734 * we need allocate some memory space, but it might cause the task 735 * to sleep, so we set all locked nodes in the path to blocking locks 736 * first. 737 */ 738 btrfs_set_path_blocking(path); 739 740 keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); 741 if (!keys) { 742 ret = -ENOMEM; 743 goto out; 744 } 745 746 data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); 747 if (!data_size) { 748 ret = -ENOMEM; 749 goto error; 750 } 751 752 /* get keys of all the delayed items */ 753 i = 0; 754 list_for_each_entry(next, &head, tree_list) { 755 keys[i] = next->key; 756 data_size[i] = next->data_len; 757 i++; 758 } 759 760 /* reset all the locked nodes in the patch to spinning locks. */ 761 btrfs_clear_path_blocking(path, NULL, 0); 762 763 /* insert the keys of the items */ 764 ret = setup_items_for_insert(trans, root, path, keys, data_size, 765 total_data_size, total_size, nitems); 766 if (ret) 767 goto error; 768 769 /* insert the dir index items */ 770 slot = path->slots[0]; 771 list_for_each_entry_safe(curr, next, &head, tree_list) { 772 data_ptr = btrfs_item_ptr(leaf, slot, char); 773 write_extent_buffer(leaf, &curr->data, 774 (unsigned long)data_ptr, 775 curr->data_len); 776 slot++; 777 778 btrfs_delayed_item_release_metadata(root, curr); 779 780 list_del(&curr->tree_list); 781 btrfs_release_delayed_item(curr); 782 } 783 784 error: 785 kfree(data_size); 786 kfree(keys); 787 out: 788 return ret; 789 } 790 791 /* 792 * This helper can just do simple insertion that needn't extend item for new 793 * data, such as directory name index insertion, inode insertion. 794 */ 795 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, 796 struct btrfs_root *root, 797 struct btrfs_path *path, 798 struct btrfs_delayed_item *delayed_item) 799 { 800 struct extent_buffer *leaf; 801 struct btrfs_item *item; 802 char *ptr; 803 int ret; 804 805 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, 806 delayed_item->data_len); 807 if (ret < 0 && ret != -EEXIST) 808 return ret; 809 810 leaf = path->nodes[0]; 811 812 item = btrfs_item_nr(leaf, path->slots[0]); 813 ptr = btrfs_item_ptr(leaf, path->slots[0], char); 814 815 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, 816 delayed_item->data_len); 817 btrfs_mark_buffer_dirty(leaf); 818 819 btrfs_delayed_item_release_metadata(root, delayed_item); 820 return 0; 821 } 822 823 /* 824 * we insert an item first, then if there are some continuous items, we try 825 * to insert those items into the same leaf. 826 */ 827 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, 828 struct btrfs_path *path, 829 struct btrfs_root *root, 830 struct btrfs_delayed_node *node) 831 { 832 struct btrfs_delayed_item *curr, *prev; 833 int ret = 0; 834 835 do_again: 836 mutex_lock(&node->mutex); 837 curr = __btrfs_first_delayed_insertion_item(node); 838 if (!curr) 839 goto insert_end; 840 841 ret = btrfs_insert_delayed_item(trans, root, path, curr); 842 if (ret < 0) { 843 btrfs_release_path(path); 844 goto insert_end; 845 } 846 847 prev = curr; 848 curr = __btrfs_next_delayed_item(prev); 849 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { 850 /* insert the continuous items into the same leaf */ 851 path->slots[0]++; 852 btrfs_batch_insert_items(trans, root, path, curr); 853 } 854 btrfs_release_delayed_item(prev); 855 btrfs_mark_buffer_dirty(path->nodes[0]); 856 857 btrfs_release_path(path); 858 mutex_unlock(&node->mutex); 859 goto do_again; 860 861 insert_end: 862 mutex_unlock(&node->mutex); 863 return ret; 864 } 865 866 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, 867 struct btrfs_root *root, 868 struct btrfs_path *path, 869 struct btrfs_delayed_item *item) 870 { 871 struct btrfs_delayed_item *curr, *next; 872 struct extent_buffer *leaf; 873 struct btrfs_key key; 874 struct list_head head; 875 int nitems, i, last_item; 876 int ret = 0; 877 878 BUG_ON(!path->nodes[0]); 879 880 leaf = path->nodes[0]; 881 882 i = path->slots[0]; 883 last_item = btrfs_header_nritems(leaf) - 1; 884 if (i > last_item) 885 return -ENOENT; /* FIXME: Is errno suitable? */ 886 887 next = item; 888 INIT_LIST_HEAD(&head); 889 btrfs_item_key_to_cpu(leaf, &key, i); 890 nitems = 0; 891 /* 892 * count the number of the dir index items that we can delete in batch 893 */ 894 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { 895 list_add_tail(&next->tree_list, &head); 896 nitems++; 897 898 curr = next; 899 next = __btrfs_next_delayed_item(curr); 900 if (!next) 901 break; 902 903 if (!btrfs_is_continuous_delayed_item(curr, next)) 904 break; 905 906 i++; 907 if (i > last_item) 908 break; 909 btrfs_item_key_to_cpu(leaf, &key, i); 910 } 911 912 if (!nitems) 913 return 0; 914 915 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); 916 if (ret) 917 goto out; 918 919 list_for_each_entry_safe(curr, next, &head, tree_list) { 920 btrfs_delayed_item_release_metadata(root, curr); 921 list_del(&curr->tree_list); 922 btrfs_release_delayed_item(curr); 923 } 924 925 out: 926 return ret; 927 } 928 929 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, 930 struct btrfs_path *path, 931 struct btrfs_root *root, 932 struct btrfs_delayed_node *node) 933 { 934 struct btrfs_delayed_item *curr, *prev; 935 int ret = 0; 936 937 do_again: 938 mutex_lock(&node->mutex); 939 curr = __btrfs_first_delayed_deletion_item(node); 940 if (!curr) 941 goto delete_fail; 942 943 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); 944 if (ret < 0) 945 goto delete_fail; 946 else if (ret > 0) { 947 /* 948 * can't find the item which the node points to, so this node 949 * is invalid, just drop it. 950 */ 951 prev = curr; 952 curr = __btrfs_next_delayed_item(prev); 953 btrfs_release_delayed_item(prev); 954 ret = 0; 955 btrfs_release_path(path); 956 if (curr) 957 goto do_again; 958 else 959 goto delete_fail; 960 } 961 962 btrfs_batch_delete_items(trans, root, path, curr); 963 btrfs_release_path(path); 964 mutex_unlock(&node->mutex); 965 goto do_again; 966 967 delete_fail: 968 btrfs_release_path(path); 969 mutex_unlock(&node->mutex); 970 return ret; 971 } 972 973 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) 974 { 975 struct btrfs_delayed_root *delayed_root; 976 977 if (delayed_node && delayed_node->inode_dirty) { 978 BUG_ON(!delayed_node->root); 979 delayed_node->inode_dirty = 0; 980 delayed_node->count--; 981 982 delayed_root = delayed_node->root->fs_info->delayed_root; 983 atomic_dec(&delayed_root->items); 984 if (atomic_read(&delayed_root->items) < 985 BTRFS_DELAYED_BACKGROUND && 986 waitqueue_active(&delayed_root->wait)) 987 wake_up(&delayed_root->wait); 988 } 989 } 990 991 static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, 992 struct btrfs_root *root, 993 struct btrfs_path *path, 994 struct btrfs_delayed_node *node) 995 { 996 struct btrfs_key key; 997 struct btrfs_inode_item *inode_item; 998 struct extent_buffer *leaf; 999 int ret; 1000 1001 mutex_lock(&node->mutex); 1002 if (!node->inode_dirty) { 1003 mutex_unlock(&node->mutex); 1004 return 0; 1005 } 1006 1007 key.objectid = node->inode_id; 1008 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 1009 key.offset = 0; 1010 ret = btrfs_lookup_inode(trans, root, path, &key, 1); 1011 if (ret > 0) { 1012 btrfs_release_path(path); 1013 mutex_unlock(&node->mutex); 1014 return -ENOENT; 1015 } else if (ret < 0) { 1016 mutex_unlock(&node->mutex); 1017 return ret; 1018 } 1019 1020 btrfs_unlock_up_safe(path, 1); 1021 leaf = path->nodes[0]; 1022 inode_item = btrfs_item_ptr(leaf, path->slots[0], 1023 struct btrfs_inode_item); 1024 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, 1025 sizeof(struct btrfs_inode_item)); 1026 btrfs_mark_buffer_dirty(leaf); 1027 btrfs_release_path(path); 1028 1029 btrfs_delayed_inode_release_metadata(root, node); 1030 btrfs_release_delayed_inode(node); 1031 mutex_unlock(&node->mutex); 1032 1033 return 0; 1034 } 1035 1036 /* Called when committing the transaction. */ 1037 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, 1038 struct btrfs_root *root) 1039 { 1040 struct btrfs_delayed_root *delayed_root; 1041 struct btrfs_delayed_node *curr_node, *prev_node; 1042 struct btrfs_path *path; 1043 struct btrfs_block_rsv *block_rsv; 1044 int ret = 0; 1045 1046 path = btrfs_alloc_path(); 1047 if (!path) 1048 return -ENOMEM; 1049 path->leave_spinning = 1; 1050 1051 block_rsv = trans->block_rsv; 1052 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1053 1054 delayed_root = btrfs_get_delayed_root(root); 1055 1056 curr_node = btrfs_first_delayed_node(delayed_root); 1057 while (curr_node) { 1058 root = curr_node->root; 1059 ret = btrfs_insert_delayed_items(trans, path, root, 1060 curr_node); 1061 if (!ret) 1062 ret = btrfs_delete_delayed_items(trans, path, root, 1063 curr_node); 1064 if (!ret) 1065 ret = btrfs_update_delayed_inode(trans, root, path, 1066 curr_node); 1067 if (ret) { 1068 btrfs_release_delayed_node(curr_node); 1069 break; 1070 } 1071 1072 prev_node = curr_node; 1073 curr_node = btrfs_next_delayed_node(curr_node); 1074 btrfs_release_delayed_node(prev_node); 1075 } 1076 1077 btrfs_free_path(path); 1078 trans->block_rsv = block_rsv; 1079 return ret; 1080 } 1081 1082 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1083 struct btrfs_delayed_node *node) 1084 { 1085 struct btrfs_path *path; 1086 struct btrfs_block_rsv *block_rsv; 1087 int ret; 1088 1089 path = btrfs_alloc_path(); 1090 if (!path) 1091 return -ENOMEM; 1092 path->leave_spinning = 1; 1093 1094 block_rsv = trans->block_rsv; 1095 trans->block_rsv = &node->root->fs_info->delayed_block_rsv; 1096 1097 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1098 if (!ret) 1099 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1100 if (!ret) 1101 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1102 btrfs_free_path(path); 1103 1104 trans->block_rsv = block_rsv; 1105 return ret; 1106 } 1107 1108 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, 1109 struct inode *inode) 1110 { 1111 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1112 int ret; 1113 1114 if (!delayed_node) 1115 return 0; 1116 1117 mutex_lock(&delayed_node->mutex); 1118 if (!delayed_node->count) { 1119 mutex_unlock(&delayed_node->mutex); 1120 btrfs_release_delayed_node(delayed_node); 1121 return 0; 1122 } 1123 mutex_unlock(&delayed_node->mutex); 1124 1125 ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); 1126 btrfs_release_delayed_node(delayed_node); 1127 return ret; 1128 } 1129 1130 void btrfs_remove_delayed_node(struct inode *inode) 1131 { 1132 struct btrfs_delayed_node *delayed_node; 1133 1134 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); 1135 if (!delayed_node) 1136 return; 1137 1138 BTRFS_I(inode)->delayed_node = NULL; 1139 btrfs_release_delayed_node(delayed_node); 1140 } 1141 1142 struct btrfs_async_delayed_node { 1143 struct btrfs_root *root; 1144 struct btrfs_delayed_node *delayed_node; 1145 struct btrfs_work work; 1146 }; 1147 1148 static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) 1149 { 1150 struct btrfs_async_delayed_node *async_node; 1151 struct btrfs_trans_handle *trans; 1152 struct btrfs_path *path; 1153 struct btrfs_delayed_node *delayed_node = NULL; 1154 struct btrfs_root *root; 1155 struct btrfs_block_rsv *block_rsv; 1156 unsigned long nr = 0; 1157 int need_requeue = 0; 1158 int ret; 1159 1160 async_node = container_of(work, struct btrfs_async_delayed_node, work); 1161 1162 path = btrfs_alloc_path(); 1163 if (!path) 1164 goto out; 1165 path->leave_spinning = 1; 1166 1167 delayed_node = async_node->delayed_node; 1168 root = delayed_node->root; 1169 1170 trans = btrfs_join_transaction(root); 1171 if (IS_ERR(trans)) 1172 goto free_path; 1173 1174 block_rsv = trans->block_rsv; 1175 trans->block_rsv = &root->fs_info->delayed_block_rsv; 1176 1177 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); 1178 if (!ret) 1179 ret = btrfs_delete_delayed_items(trans, path, root, 1180 delayed_node); 1181 1182 if (!ret) 1183 btrfs_update_delayed_inode(trans, root, path, delayed_node); 1184 1185 /* 1186 * Maybe new delayed items have been inserted, so we need requeue 1187 * the work. Besides that, we must dequeue the empty delayed nodes 1188 * to avoid the race between delayed items balance and the worker. 1189 * The race like this: 1190 * Task1 Worker thread 1191 * count == 0, needn't requeue 1192 * also needn't insert the 1193 * delayed node into prepare 1194 * list again. 1195 * add lots of delayed items 1196 * queue the delayed node 1197 * already in the list, 1198 * and not in the prepare 1199 * list, it means the delayed 1200 * node is being dealt with 1201 * by the worker. 1202 * do delayed items balance 1203 * the delayed node is being 1204 * dealt with by the worker 1205 * now, just wait. 1206 * the worker goto idle. 1207 * Task1 will sleep until the transaction is commited. 1208 */ 1209 mutex_lock(&delayed_node->mutex); 1210 if (delayed_node->count) 1211 need_requeue = 1; 1212 else 1213 btrfs_dequeue_delayed_node(root->fs_info->delayed_root, 1214 delayed_node); 1215 mutex_unlock(&delayed_node->mutex); 1216 1217 nr = trans->blocks_used; 1218 1219 trans->block_rsv = block_rsv; 1220 btrfs_end_transaction_dmeta(trans, root); 1221 __btrfs_btree_balance_dirty(root, nr); 1222 free_path: 1223 btrfs_free_path(path); 1224 out: 1225 if (need_requeue) 1226 btrfs_requeue_work(&async_node->work); 1227 else { 1228 btrfs_release_prepared_delayed_node(delayed_node); 1229 kfree(async_node); 1230 } 1231 } 1232 1233 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, 1234 struct btrfs_root *root, int all) 1235 { 1236 struct btrfs_async_delayed_node *async_node; 1237 struct btrfs_delayed_node *curr; 1238 int count = 0; 1239 1240 again: 1241 curr = btrfs_first_prepared_delayed_node(delayed_root); 1242 if (!curr) 1243 return 0; 1244 1245 async_node = kmalloc(sizeof(*async_node), GFP_NOFS); 1246 if (!async_node) { 1247 btrfs_release_prepared_delayed_node(curr); 1248 return -ENOMEM; 1249 } 1250 1251 async_node->root = root; 1252 async_node->delayed_node = curr; 1253 1254 async_node->work.func = btrfs_async_run_delayed_node_done; 1255 async_node->work.flags = 0; 1256 1257 btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); 1258 count++; 1259 1260 if (all || count < 4) 1261 goto again; 1262 1263 return 0; 1264 } 1265 1266 void btrfs_assert_delayed_root_empty(struct btrfs_root *root) 1267 { 1268 struct btrfs_delayed_root *delayed_root; 1269 delayed_root = btrfs_get_delayed_root(root); 1270 WARN_ON(btrfs_first_delayed_node(delayed_root)); 1271 } 1272 1273 void btrfs_balance_delayed_items(struct btrfs_root *root) 1274 { 1275 struct btrfs_delayed_root *delayed_root; 1276 1277 delayed_root = btrfs_get_delayed_root(root); 1278 1279 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) 1280 return; 1281 1282 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { 1283 int ret; 1284 ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); 1285 if (ret) 1286 return; 1287 1288 wait_event_interruptible_timeout( 1289 delayed_root->wait, 1290 (atomic_read(&delayed_root->items) < 1291 BTRFS_DELAYED_BACKGROUND), 1292 HZ); 1293 return; 1294 } 1295 1296 btrfs_wq_run_delayed_node(delayed_root, root, 0); 1297 } 1298 1299 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1300 struct btrfs_root *root, const char *name, 1301 int name_len, struct inode *dir, 1302 struct btrfs_disk_key *disk_key, u8 type, 1303 u64 index) 1304 { 1305 struct btrfs_delayed_node *delayed_node; 1306 struct btrfs_delayed_item *delayed_item; 1307 struct btrfs_dir_item *dir_item; 1308 int ret; 1309 1310 delayed_node = btrfs_get_or_create_delayed_node(dir); 1311 if (IS_ERR(delayed_node)) 1312 return PTR_ERR(delayed_node); 1313 1314 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); 1315 if (!delayed_item) { 1316 ret = -ENOMEM; 1317 goto release_node; 1318 } 1319 1320 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); 1321 /* 1322 * we have reserved enough space when we start a new transaction, 1323 * so reserving metadata failure is impossible 1324 */ 1325 BUG_ON(ret); 1326 1327 delayed_item->key.objectid = btrfs_ino(dir); 1328 btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); 1329 delayed_item->key.offset = index; 1330 1331 dir_item = (struct btrfs_dir_item *)delayed_item->data; 1332 dir_item->location = *disk_key; 1333 dir_item->transid = cpu_to_le64(trans->transid); 1334 dir_item->data_len = 0; 1335 dir_item->name_len = cpu_to_le16(name_len); 1336 dir_item->type = type; 1337 memcpy((char *)(dir_item + 1), name, name_len); 1338 1339 mutex_lock(&delayed_node->mutex); 1340 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); 1341 if (unlikely(ret)) { 1342 printk(KERN_ERR "err add delayed dir index item(name: %s) into " 1343 "the insertion tree of the delayed node" 1344 "(root id: %llu, inode id: %llu, errno: %d)\n", 1345 name, 1346 (unsigned long long)delayed_node->root->objectid, 1347 (unsigned long long)delayed_node->inode_id, 1348 ret); 1349 BUG(); 1350 } 1351 mutex_unlock(&delayed_node->mutex); 1352 1353 release_node: 1354 btrfs_release_delayed_node(delayed_node); 1355 return ret; 1356 } 1357 1358 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, 1359 struct btrfs_delayed_node *node, 1360 struct btrfs_key *key) 1361 { 1362 struct btrfs_delayed_item *item; 1363 1364 mutex_lock(&node->mutex); 1365 item = __btrfs_lookup_delayed_insertion_item(node, key); 1366 if (!item) { 1367 mutex_unlock(&node->mutex); 1368 return 1; 1369 } 1370 1371 btrfs_delayed_item_release_metadata(root, item); 1372 btrfs_release_delayed_item(item); 1373 mutex_unlock(&node->mutex); 1374 return 0; 1375 } 1376 1377 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, 1378 struct btrfs_root *root, struct inode *dir, 1379 u64 index) 1380 { 1381 struct btrfs_delayed_node *node; 1382 struct btrfs_delayed_item *item; 1383 struct btrfs_key item_key; 1384 int ret; 1385 1386 node = btrfs_get_or_create_delayed_node(dir); 1387 if (IS_ERR(node)) 1388 return PTR_ERR(node); 1389 1390 item_key.objectid = btrfs_ino(dir); 1391 btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); 1392 item_key.offset = index; 1393 1394 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); 1395 if (!ret) 1396 goto end; 1397 1398 item = btrfs_alloc_delayed_item(0); 1399 if (!item) { 1400 ret = -ENOMEM; 1401 goto end; 1402 } 1403 1404 item->key = item_key; 1405 1406 ret = btrfs_delayed_item_reserve_metadata(trans, root, item); 1407 /* 1408 * we have reserved enough space when we start a new transaction, 1409 * so reserving metadata failure is impossible. 1410 */ 1411 BUG_ON(ret); 1412 1413 mutex_lock(&node->mutex); 1414 ret = __btrfs_add_delayed_deletion_item(node, item); 1415 if (unlikely(ret)) { 1416 printk(KERN_ERR "err add delayed dir index item(index: %llu) " 1417 "into the deletion tree of the delayed node" 1418 "(root id: %llu, inode id: %llu, errno: %d)\n", 1419 (unsigned long long)index, 1420 (unsigned long long)node->root->objectid, 1421 (unsigned long long)node->inode_id, 1422 ret); 1423 BUG(); 1424 } 1425 mutex_unlock(&node->mutex); 1426 end: 1427 btrfs_release_delayed_node(node); 1428 return ret; 1429 } 1430 1431 int btrfs_inode_delayed_dir_index_count(struct inode *inode) 1432 { 1433 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); 1434 1435 if (!delayed_node) 1436 return -ENOENT; 1437 1438 /* 1439 * Since we have held i_mutex of this directory, it is impossible that 1440 * a new directory index is added into the delayed node and index_cnt 1441 * is updated now. So we needn't lock the delayed node. 1442 */ 1443 if (!delayed_node->index_cnt) { 1444 btrfs_release_delayed_node(delayed_node); 1445 return -EINVAL; 1446 } 1447 1448 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; 1449 btrfs_release_delayed_node(delayed_node); 1450 return 0; 1451 } 1452 1453 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 1454 struct list_head *del_list) 1455 { 1456 struct btrfs_delayed_node *delayed_node; 1457 struct btrfs_delayed_item *item; 1458 1459 delayed_node = btrfs_get_delayed_node(inode); 1460 if (!delayed_node) 1461 return; 1462 1463 mutex_lock(&delayed_node->mutex); 1464 item = __btrfs_first_delayed_insertion_item(delayed_node); 1465 while (item) { 1466 atomic_inc(&item->refs); 1467 list_add_tail(&item->readdir_list, ins_list); 1468 item = __btrfs_next_delayed_item(item); 1469 } 1470 1471 item = __btrfs_first_delayed_deletion_item(delayed_node); 1472 while (item) { 1473 atomic_inc(&item->refs); 1474 list_add_tail(&item->readdir_list, del_list); 1475 item = __btrfs_next_delayed_item(item); 1476 } 1477 mutex_unlock(&delayed_node->mutex); 1478 /* 1479 * This delayed node is still cached in the btrfs inode, so refs 1480 * must be > 1 now, and we needn't check it is going to be freed 1481 * or not. 1482 * 1483 * Besides that, this function is used to read dir, we do not 1484 * insert/delete delayed items in this period. So we also needn't 1485 * requeue or dequeue this delayed node. 1486 */ 1487 atomic_dec(&delayed_node->refs); 1488 } 1489 1490 void btrfs_put_delayed_items(struct list_head *ins_list, 1491 struct list_head *del_list) 1492 { 1493 struct btrfs_delayed_item *curr, *next; 1494 1495 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1496 list_del(&curr->readdir_list); 1497 if (atomic_dec_and_test(&curr->refs)) 1498 kfree(curr); 1499 } 1500 1501 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1502 list_del(&curr->readdir_list); 1503 if (atomic_dec_and_test(&curr->refs)) 1504 kfree(curr); 1505 } 1506 } 1507 1508 int btrfs_should_delete_dir_index(struct list_head *del_list, 1509 u64 index) 1510 { 1511 struct btrfs_delayed_item *curr, *next; 1512 int ret; 1513 1514 if (list_empty(del_list)) 1515 return 0; 1516 1517 list_for_each_entry_safe(curr, next, del_list, readdir_list) { 1518 if (curr->key.offset > index) 1519 break; 1520 1521 list_del(&curr->readdir_list); 1522 ret = (curr->key.offset == index); 1523 1524 if (atomic_dec_and_test(&curr->refs)) 1525 kfree(curr); 1526 1527 if (ret) 1528 return 1; 1529 else 1530 continue; 1531 } 1532 return 0; 1533 } 1534 1535 /* 1536 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree 1537 * 1538 */ 1539 int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, 1540 filldir_t filldir, 1541 struct list_head *ins_list) 1542 { 1543 struct btrfs_dir_item *di; 1544 struct btrfs_delayed_item *curr, *next; 1545 struct btrfs_key location; 1546 char *name; 1547 int name_len; 1548 int over = 0; 1549 unsigned char d_type; 1550 1551 if (list_empty(ins_list)) 1552 return 0; 1553 1554 /* 1555 * Changing the data of the delayed item is impossible. So 1556 * we needn't lock them. And we have held i_mutex of the 1557 * directory, nobody can delete any directory indexes now. 1558 */ 1559 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { 1560 list_del(&curr->readdir_list); 1561 1562 if (curr->key.offset < filp->f_pos) { 1563 if (atomic_dec_and_test(&curr->refs)) 1564 kfree(curr); 1565 continue; 1566 } 1567 1568 filp->f_pos = curr->key.offset; 1569 1570 di = (struct btrfs_dir_item *)curr->data; 1571 name = (char *)(di + 1); 1572 name_len = le16_to_cpu(di->name_len); 1573 1574 d_type = btrfs_filetype_table[di->type]; 1575 btrfs_disk_key_to_cpu(&location, &di->location); 1576 1577 over = filldir(dirent, name, name_len, curr->key.offset, 1578 location.objectid, d_type); 1579 1580 if (atomic_dec_and_test(&curr->refs)) 1581 kfree(curr); 1582 1583 if (over) 1584 return 1; 1585 } 1586 return 0; 1587 } 1588 1589 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 1590 generation, 64); 1591 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 1592 sequence, 64); 1593 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 1594 transid, 64); 1595 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 1596 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 1597 nbytes, 64); 1598 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 1599 block_group, 64); 1600 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 1601 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 1602 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 1603 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 1604 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 1605 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 1606 1607 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 1608 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 1609 1610 static void fill_stack_inode_item(struct btrfs_trans_handle *trans, 1611 struct btrfs_inode_item *inode_item, 1612 struct inode *inode) 1613 { 1614 btrfs_set_stack_inode_uid(inode_item, inode->i_uid); 1615 btrfs_set_stack_inode_gid(inode_item, inode->i_gid); 1616 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); 1617 btrfs_set_stack_inode_mode(inode_item, inode->i_mode); 1618 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); 1619 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); 1620 btrfs_set_stack_inode_generation(inode_item, 1621 BTRFS_I(inode)->generation); 1622 btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); 1623 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1624 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1625 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1626 btrfs_set_stack_inode_block_group(inode_item, 0); 1627 1628 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), 1629 inode->i_atime.tv_sec); 1630 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), 1631 inode->i_atime.tv_nsec); 1632 1633 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), 1634 inode->i_mtime.tv_sec); 1635 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), 1636 inode->i_mtime.tv_nsec); 1637 1638 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), 1639 inode->i_ctime.tv_sec); 1640 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), 1641 inode->i_ctime.tv_nsec); 1642 } 1643 1644 int btrfs_fill_inode(struct inode *inode, u32 *rdev) 1645 { 1646 struct btrfs_delayed_node *delayed_node; 1647 struct btrfs_inode_item *inode_item; 1648 struct btrfs_timespec *tspec; 1649 1650 delayed_node = btrfs_get_delayed_node(inode); 1651 if (!delayed_node) 1652 return -ENOENT; 1653 1654 mutex_lock(&delayed_node->mutex); 1655 if (!delayed_node->inode_dirty) { 1656 mutex_unlock(&delayed_node->mutex); 1657 btrfs_release_delayed_node(delayed_node); 1658 return -ENOENT; 1659 } 1660 1661 inode_item = &delayed_node->inode_item; 1662 1663 inode->i_uid = btrfs_stack_inode_uid(inode_item); 1664 inode->i_gid = btrfs_stack_inode_gid(inode_item); 1665 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); 1666 inode->i_mode = btrfs_stack_inode_mode(inode_item); 1667 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1668 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1669 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1670 BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); 1671 inode->i_rdev = 0; 1672 *rdev = btrfs_stack_inode_rdev(inode_item); 1673 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); 1674 1675 tspec = btrfs_inode_atime(inode_item); 1676 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); 1677 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1678 1679 tspec = btrfs_inode_mtime(inode_item); 1680 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); 1681 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1682 1683 tspec = btrfs_inode_ctime(inode_item); 1684 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); 1685 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); 1686 1687 inode->i_generation = BTRFS_I(inode)->generation; 1688 BTRFS_I(inode)->index_cnt = (u64)-1; 1689 1690 mutex_unlock(&delayed_node->mutex); 1691 btrfs_release_delayed_node(delayed_node); 1692 return 0; 1693 } 1694 1695 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1696 struct btrfs_root *root, struct inode *inode) 1697 { 1698 struct btrfs_delayed_node *delayed_node; 1699 int ret = 0; 1700 1701 delayed_node = btrfs_get_or_create_delayed_node(inode); 1702 if (IS_ERR(delayed_node)) 1703 return PTR_ERR(delayed_node); 1704 1705 mutex_lock(&delayed_node->mutex); 1706 if (delayed_node->inode_dirty) { 1707 fill_stack_inode_item(trans, &delayed_node->inode_item, inode); 1708 goto release_node; 1709 } 1710 1711 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); 1712 if (ret) 1713 goto release_node; 1714 1715 fill_stack_inode_item(trans, &delayed_node->inode_item, inode); 1716 delayed_node->inode_dirty = 1; 1717 delayed_node->count++; 1718 atomic_inc(&root->fs_info->delayed_root->items); 1719 release_node: 1720 mutex_unlock(&delayed_node->mutex); 1721 btrfs_release_delayed_node(delayed_node); 1722 return ret; 1723 } 1724 1725 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) 1726 { 1727 struct btrfs_root *root = delayed_node->root; 1728 struct btrfs_delayed_item *curr_item, *prev_item; 1729 1730 mutex_lock(&delayed_node->mutex); 1731 curr_item = __btrfs_first_delayed_insertion_item(delayed_node); 1732 while (curr_item) { 1733 btrfs_delayed_item_release_metadata(root, curr_item); 1734 prev_item = curr_item; 1735 curr_item = __btrfs_next_delayed_item(prev_item); 1736 btrfs_release_delayed_item(prev_item); 1737 } 1738 1739 curr_item = __btrfs_first_delayed_deletion_item(delayed_node); 1740 while (curr_item) { 1741 btrfs_delayed_item_release_metadata(root, curr_item); 1742 prev_item = curr_item; 1743 curr_item = __btrfs_next_delayed_item(prev_item); 1744 btrfs_release_delayed_item(prev_item); 1745 } 1746 1747 if (delayed_node->inode_dirty) { 1748 btrfs_delayed_inode_release_metadata(root, delayed_node); 1749 btrfs_release_delayed_inode(delayed_node); 1750 } 1751 mutex_unlock(&delayed_node->mutex); 1752 } 1753 1754 void btrfs_kill_delayed_inode_items(struct inode *inode) 1755 { 1756 struct btrfs_delayed_node *delayed_node; 1757 1758 delayed_node = btrfs_get_delayed_node(inode); 1759 if (!delayed_node) 1760 return; 1761 1762 __btrfs_kill_delayed_node(delayed_node); 1763 btrfs_release_delayed_node(delayed_node); 1764 } 1765 1766 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) 1767 { 1768 u64 inode_id = 0; 1769 struct btrfs_delayed_node *delayed_nodes[8]; 1770 int i, n; 1771 1772 while (1) { 1773 spin_lock(&root->inode_lock); 1774 n = radix_tree_gang_lookup(&root->delayed_nodes_tree, 1775 (void **)delayed_nodes, inode_id, 1776 ARRAY_SIZE(delayed_nodes)); 1777 if (!n) { 1778 spin_unlock(&root->inode_lock); 1779 break; 1780 } 1781 1782 inode_id = delayed_nodes[n - 1]->inode_id + 1; 1783 1784 for (i = 0; i < n; i++) 1785 atomic_inc(&delayed_nodes[i]->refs); 1786 spin_unlock(&root->inode_lock); 1787 1788 for (i = 0; i < n; i++) { 1789 __btrfs_kill_delayed_node(delayed_nodes[i]); 1790 btrfs_release_delayed_node(delayed_nodes[i]); 1791 } 1792 } 1793 } 1794