1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * refcounttree.c 4 * 5 * Copyright (C) 2009 Oracle. All rights reserved. 6 */ 7 8 #include <linux/sort.h> 9 #include <cluster/masklog.h> 10 #include "ocfs2.h" 11 #include "inode.h" 12 #include "alloc.h" 13 #include "suballoc.h" 14 #include "journal.h" 15 #include "uptodate.h" 16 #include "super.h" 17 #include "buffer_head_io.h" 18 #include "blockcheck.h" 19 #include "refcounttree.h" 20 #include "sysfile.h" 21 #include "dlmglue.h" 22 #include "extent_map.h" 23 #include "aops.h" 24 #include "xattr.h" 25 #include "namei.h" 26 #include "ocfs2_trace.h" 27 #include "file.h" 28 #include "symlink.h" 29 30 #include <linux/bio.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/writeback.h> 34 #include <linux/pagevec.h> 35 #include <linux/swap.h> 36 #include <linux/security.h> 37 #include <linux/fsnotify.h> 38 #include <linux/quotaops.h> 39 #include <linux/namei.h> 40 #include <linux/mount.h> 41 #include <linux/posix_acl.h> 42 43 struct ocfs2_cow_context { 44 struct inode *inode; 45 u32 cow_start; 46 u32 cow_len; 47 struct ocfs2_extent_tree data_et; 48 struct ocfs2_refcount_tree *ref_tree; 49 struct buffer_head *ref_root_bh; 50 struct ocfs2_alloc_context *meta_ac; 51 struct ocfs2_alloc_context *data_ac; 52 struct ocfs2_cached_dealloc_ctxt dealloc; 53 void *cow_object; 54 struct ocfs2_post_refcount *post_refcount; 55 int extra_credits; 56 int (*get_clusters)(struct ocfs2_cow_context *context, 57 u32 v_cluster, u32 *p_cluster, 58 u32 *num_clusters, 59 unsigned int *extent_flags); 60 int (*cow_duplicate_clusters)(handle_t *handle, 61 struct inode *inode, 62 u32 cpos, u32 old_cluster, 63 u32 new_cluster, u32 new_len); 64 }; 65 66 static inline struct ocfs2_refcount_tree * 67 cache_info_to_refcount(struct ocfs2_caching_info *ci) 68 { 69 return container_of(ci, struct ocfs2_refcount_tree, rf_ci); 70 } 71 72 static int ocfs2_validate_refcount_block(struct super_block *sb, 73 struct buffer_head *bh) 74 { 75 int rc; 76 struct ocfs2_refcount_block *rb = 77 (struct ocfs2_refcount_block *)bh->b_data; 78 79 trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); 80 81 BUG_ON(!buffer_uptodate(bh)); 82 83 /* 84 * If the ecc fails, we return the error but otherwise 85 * leave the filesystem running. We know any error is 86 * local to this block. 87 */ 88 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); 89 if (rc) { 90 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", 91 (unsigned long long)bh->b_blocknr); 92 return rc; 93 } 94 95 96 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { 97 rc = ocfs2_error(sb, 98 "Refcount block #%llu has bad signature %.*s\n", 99 (unsigned long long)bh->b_blocknr, 7, 100 rb->rf_signature); 101 goto out; 102 } 103 104 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { 105 rc = ocfs2_error(sb, 106 "Refcount block #%llu has an invalid rf_blkno of %llu\n", 107 (unsigned long long)bh->b_blocknr, 108 (unsigned long long)le64_to_cpu(rb->rf_blkno)); 109 goto out; 110 } 111 112 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { 113 rc = ocfs2_error(sb, 114 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n", 115 (unsigned long long)bh->b_blocknr, 116 le32_to_cpu(rb->rf_fs_generation)); 117 goto out; 118 } 119 out: 120 return rc; 121 } 122 123 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, 124 u64 rb_blkno, 125 struct buffer_head **bh) 126 { 127 int rc; 128 struct buffer_head *tmp = *bh; 129 130 rc = ocfs2_read_block(ci, rb_blkno, &tmp, 131 ocfs2_validate_refcount_block); 132 133 /* If ocfs2_read_block() got us a new bh, pass it up. */ 134 if (!rc && !*bh) 135 *bh = tmp; 136 137 return rc; 138 } 139 140 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) 141 { 142 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 143 144 return rf->rf_blkno; 145 } 146 147 static struct super_block * 148 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) 149 { 150 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 151 152 return rf->rf_sb; 153 } 154 155 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) 156 __acquires(&rf->rf_lock) 157 { 158 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 159 160 spin_lock(&rf->rf_lock); 161 } 162 163 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) 164 __releases(&rf->rf_lock) 165 { 166 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 167 168 spin_unlock(&rf->rf_lock); 169 } 170 171 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) 172 { 173 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 174 175 mutex_lock(&rf->rf_io_mutex); 176 } 177 178 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) 179 { 180 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 181 182 mutex_unlock(&rf->rf_io_mutex); 183 } 184 185 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { 186 .co_owner = ocfs2_refcount_cache_owner, 187 .co_get_super = ocfs2_refcount_cache_get_super, 188 .co_cache_lock = ocfs2_refcount_cache_lock, 189 .co_cache_unlock = ocfs2_refcount_cache_unlock, 190 .co_io_lock = ocfs2_refcount_cache_io_lock, 191 .co_io_unlock = ocfs2_refcount_cache_io_unlock, 192 }; 193 194 static struct ocfs2_refcount_tree * 195 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) 196 { 197 struct rb_node *n = osb->osb_rf_lock_tree.rb_node; 198 struct ocfs2_refcount_tree *tree = NULL; 199 200 while (n) { 201 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); 202 203 if (blkno < tree->rf_blkno) 204 n = n->rb_left; 205 else if (blkno > tree->rf_blkno) 206 n = n->rb_right; 207 else 208 return tree; 209 } 210 211 return NULL; 212 } 213 214 /* osb_lock is already locked. */ 215 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, 216 struct ocfs2_refcount_tree *new) 217 { 218 u64 rf_blkno = new->rf_blkno; 219 struct rb_node *parent = NULL; 220 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; 221 struct ocfs2_refcount_tree *tmp; 222 223 while (*p) { 224 parent = *p; 225 226 tmp = rb_entry(parent, struct ocfs2_refcount_tree, 227 rf_node); 228 229 if (rf_blkno < tmp->rf_blkno) 230 p = &(*p)->rb_left; 231 else if (rf_blkno > tmp->rf_blkno) 232 p = &(*p)->rb_right; 233 else { 234 /* This should never happen! */ 235 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", 236 (unsigned long long)rf_blkno); 237 BUG(); 238 } 239 } 240 241 rb_link_node(&new->rf_node, parent, p); 242 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); 243 } 244 245 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) 246 { 247 ocfs2_metadata_cache_exit(&tree->rf_ci); 248 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); 249 ocfs2_lock_res_free(&tree->rf_lockres); 250 kfree(tree); 251 } 252 253 static inline void 254 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, 255 struct ocfs2_refcount_tree *tree) 256 { 257 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); 258 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) 259 osb->osb_ref_tree_lru = NULL; 260 } 261 262 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, 263 struct ocfs2_refcount_tree *tree) 264 { 265 spin_lock(&osb->osb_lock); 266 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 267 spin_unlock(&osb->osb_lock); 268 } 269 270 static void ocfs2_kref_remove_refcount_tree(struct kref *kref) 271 { 272 struct ocfs2_refcount_tree *tree = 273 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); 274 275 ocfs2_free_refcount_tree(tree); 276 } 277 278 static inline void 279 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) 280 { 281 kref_get(&tree->rf_getcnt); 282 } 283 284 static inline void 285 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) 286 { 287 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); 288 } 289 290 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, 291 struct super_block *sb) 292 { 293 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); 294 mutex_init(&new->rf_io_mutex); 295 new->rf_sb = sb; 296 spin_lock_init(&new->rf_lock); 297 } 298 299 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, 300 struct ocfs2_refcount_tree *new, 301 u64 rf_blkno, u32 generation) 302 { 303 init_rwsem(&new->rf_sem); 304 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, 305 rf_blkno, generation); 306 } 307 308 static struct ocfs2_refcount_tree* 309 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) 310 { 311 struct ocfs2_refcount_tree *new; 312 313 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); 314 if (!new) 315 return NULL; 316 317 new->rf_blkno = rf_blkno; 318 kref_init(&new->rf_getcnt); 319 ocfs2_init_refcount_tree_ci(new, osb->sb); 320 321 return new; 322 } 323 324 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, 325 struct ocfs2_refcount_tree **ret_tree) 326 { 327 int ret = 0; 328 struct ocfs2_refcount_tree *tree, *new = NULL; 329 struct buffer_head *ref_root_bh = NULL; 330 struct ocfs2_refcount_block *ref_rb; 331 332 spin_lock(&osb->osb_lock); 333 if (osb->osb_ref_tree_lru && 334 osb->osb_ref_tree_lru->rf_blkno == rf_blkno) 335 tree = osb->osb_ref_tree_lru; 336 else 337 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 338 if (tree) 339 goto out; 340 341 spin_unlock(&osb->osb_lock); 342 343 new = ocfs2_allocate_refcount_tree(osb, rf_blkno); 344 if (!new) { 345 ret = -ENOMEM; 346 mlog_errno(ret); 347 return ret; 348 } 349 /* 350 * We need the generation to create the refcount tree lock and since 351 * it isn't changed during the tree modification, we are safe here to 352 * read without protection. 353 * We also have to purge the cache after we create the lock since the 354 * refcount block may have the stale data. It can only be trusted when 355 * we hold the refcount lock. 356 */ 357 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); 358 if (ret) { 359 mlog_errno(ret); 360 ocfs2_metadata_cache_exit(&new->rf_ci); 361 kfree(new); 362 return ret; 363 } 364 365 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 366 new->rf_generation = le32_to_cpu(ref_rb->rf_generation); 367 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, 368 new->rf_generation); 369 ocfs2_metadata_cache_purge(&new->rf_ci); 370 371 spin_lock(&osb->osb_lock); 372 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 373 if (tree) 374 goto out; 375 376 ocfs2_insert_refcount_tree(osb, new); 377 378 tree = new; 379 new = NULL; 380 381 out: 382 *ret_tree = tree; 383 384 osb->osb_ref_tree_lru = tree; 385 386 spin_unlock(&osb->osb_lock); 387 388 if (new) 389 ocfs2_free_refcount_tree(new); 390 391 brelse(ref_root_bh); 392 return ret; 393 } 394 395 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) 396 { 397 int ret; 398 struct buffer_head *di_bh = NULL; 399 struct ocfs2_dinode *di; 400 401 ret = ocfs2_read_inode_block(inode, &di_bh); 402 if (ret) { 403 mlog_errno(ret); 404 goto out; 405 } 406 407 BUG_ON(!ocfs2_is_refcount_inode(inode)); 408 409 di = (struct ocfs2_dinode *)di_bh->b_data; 410 *ref_blkno = le64_to_cpu(di->i_refcount_loc); 411 brelse(di_bh); 412 out: 413 return ret; 414 } 415 416 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 417 struct ocfs2_refcount_tree *tree, int rw) 418 { 419 int ret; 420 421 ret = ocfs2_refcount_lock(tree, rw); 422 if (ret) { 423 mlog_errno(ret); 424 goto out; 425 } 426 427 if (rw) 428 down_write(&tree->rf_sem); 429 else 430 down_read(&tree->rf_sem); 431 432 out: 433 return ret; 434 } 435 436 /* 437 * Lock the refcount tree pointed by ref_blkno and return the tree. 438 * In most case, we lock the tree and read the refcount block. 439 * So read it here if the caller really needs it. 440 * 441 * If the tree has been re-created by other node, it will free the 442 * old one and re-create it. 443 */ 444 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 445 u64 ref_blkno, int rw, 446 struct ocfs2_refcount_tree **ret_tree, 447 struct buffer_head **ref_bh) 448 { 449 int ret, delete_tree = 0; 450 struct ocfs2_refcount_tree *tree = NULL; 451 struct buffer_head *ref_root_bh = NULL; 452 struct ocfs2_refcount_block *rb; 453 454 again: 455 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); 456 if (ret) { 457 mlog_errno(ret); 458 return ret; 459 } 460 461 ocfs2_refcount_tree_get(tree); 462 463 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); 464 if (ret) { 465 mlog_errno(ret); 466 ocfs2_refcount_tree_put(tree); 467 goto out; 468 } 469 470 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 471 &ref_root_bh); 472 if (ret) { 473 mlog_errno(ret); 474 ocfs2_unlock_refcount_tree(osb, tree, rw); 475 goto out; 476 } 477 478 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 479 /* 480 * If the refcount block has been freed and re-created, we may need 481 * to recreate the refcount tree also. 482 * 483 * Here we just remove the tree from the rb-tree, and the last 484 * kref holder will unlock and delete this refcount_tree. 485 * Then we goto "again" and ocfs2_get_refcount_tree will create 486 * the new refcount tree for us. 487 */ 488 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { 489 if (!tree->rf_removed) { 490 ocfs2_erase_refcount_tree_from_list(osb, tree); 491 tree->rf_removed = 1; 492 delete_tree = 1; 493 } 494 495 ocfs2_unlock_refcount_tree(osb, tree, rw); 496 /* 497 * We get an extra reference when we create the refcount 498 * tree, so another put will destroy it. 499 */ 500 if (delete_tree) 501 ocfs2_refcount_tree_put(tree); 502 brelse(ref_root_bh); 503 ref_root_bh = NULL; 504 goto again; 505 } 506 507 *ret_tree = tree; 508 if (ref_bh) { 509 *ref_bh = ref_root_bh; 510 ref_root_bh = NULL; 511 } 512 out: 513 brelse(ref_root_bh); 514 return ret; 515 } 516 517 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, 518 struct ocfs2_refcount_tree *tree, int rw) 519 { 520 if (rw) 521 up_write(&tree->rf_sem); 522 else 523 up_read(&tree->rf_sem); 524 525 ocfs2_refcount_unlock(tree, rw); 526 ocfs2_refcount_tree_put(tree); 527 } 528 529 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) 530 { 531 struct rb_node *node; 532 struct ocfs2_refcount_tree *tree; 533 struct rb_root *root = &osb->osb_rf_lock_tree; 534 535 while ((node = rb_last(root)) != NULL) { 536 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 537 538 trace_ocfs2_purge_refcount_trees( 539 (unsigned long long) tree->rf_blkno); 540 541 rb_erase(&tree->rf_node, root); 542 ocfs2_free_refcount_tree(tree); 543 } 544 } 545 546 /* 547 * Create a refcount tree for an inode. 548 * We take for granted that the inode is already locked. 549 */ 550 static int ocfs2_create_refcount_tree(struct inode *inode, 551 struct buffer_head *di_bh) 552 { 553 int ret; 554 handle_t *handle = NULL; 555 struct ocfs2_alloc_context *meta_ac = NULL; 556 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 557 struct ocfs2_inode_info *oi = OCFS2_I(inode); 558 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 559 struct buffer_head *new_bh = NULL; 560 struct ocfs2_refcount_block *rb; 561 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; 562 u16 suballoc_bit_start; 563 u32 num_got; 564 u64 suballoc_loc, first_blkno; 565 566 BUG_ON(ocfs2_is_refcount_inode(inode)); 567 568 trace_ocfs2_create_refcount_tree( 569 (unsigned long long)oi->ip_blkno); 570 571 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 572 if (ret) { 573 mlog_errno(ret); 574 goto out; 575 } 576 577 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); 578 if (IS_ERR(handle)) { 579 ret = PTR_ERR(handle); 580 mlog_errno(ret); 581 goto out; 582 } 583 584 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 585 OCFS2_JOURNAL_ACCESS_WRITE); 586 if (ret) { 587 mlog_errno(ret); 588 goto out_commit; 589 } 590 591 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 592 &suballoc_bit_start, &num_got, 593 &first_blkno); 594 if (ret) { 595 mlog_errno(ret); 596 goto out_commit; 597 } 598 599 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); 600 if (!new_tree) { 601 ret = -ENOMEM; 602 mlog_errno(ret); 603 goto out_commit; 604 } 605 606 new_bh = sb_getblk(inode->i_sb, first_blkno); 607 if (!new_bh) { 608 ret = -ENOMEM; 609 mlog_errno(ret); 610 goto out_commit; 611 } 612 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); 613 614 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, 615 OCFS2_JOURNAL_ACCESS_CREATE); 616 if (ret) { 617 mlog_errno(ret); 618 goto out_commit; 619 } 620 621 /* Initialize ocfs2_refcount_block. */ 622 rb = (struct ocfs2_refcount_block *)new_bh->b_data; 623 memset(rb, 0, inode->i_sb->s_blocksize); 624 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 625 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 626 rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 627 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 628 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); 629 rb->rf_blkno = cpu_to_le64(first_blkno); 630 rb->rf_count = cpu_to_le32(1); 631 rb->rf_records.rl_count = 632 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); 633 spin_lock(&osb->osb_lock); 634 rb->rf_generation = cpu_to_le32(osb->s_next_generation++); 635 spin_unlock(&osb->osb_lock); 636 637 ocfs2_journal_dirty(handle, new_bh); 638 639 spin_lock(&oi->ip_lock); 640 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 641 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 642 di->i_refcount_loc = cpu_to_le64(first_blkno); 643 spin_unlock(&oi->ip_lock); 644 645 trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); 646 647 ocfs2_journal_dirty(handle, di_bh); 648 649 /* 650 * We have to init the tree lock here since it will use 651 * the generation number to create it. 652 */ 653 new_tree->rf_generation = le32_to_cpu(rb->rf_generation); 654 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, 655 new_tree->rf_generation); 656 657 spin_lock(&osb->osb_lock); 658 tree = ocfs2_find_refcount_tree(osb, first_blkno); 659 660 /* 661 * We've just created a new refcount tree in this block. If 662 * we found a refcount tree on the ocfs2_super, it must be 663 * one we just deleted. We free the old tree before 664 * inserting the new tree. 665 */ 666 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); 667 if (tree) 668 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 669 ocfs2_insert_refcount_tree(osb, new_tree); 670 spin_unlock(&osb->osb_lock); 671 new_tree = NULL; 672 if (tree) 673 ocfs2_refcount_tree_put(tree); 674 675 out_commit: 676 ocfs2_commit_trans(osb, handle); 677 678 out: 679 if (new_tree) { 680 ocfs2_metadata_cache_exit(&new_tree->rf_ci); 681 kfree(new_tree); 682 } 683 684 brelse(new_bh); 685 if (meta_ac) 686 ocfs2_free_alloc_context(meta_ac); 687 688 return ret; 689 } 690 691 static int ocfs2_set_refcount_tree(struct inode *inode, 692 struct buffer_head *di_bh, 693 u64 refcount_loc) 694 { 695 int ret; 696 handle_t *handle = NULL; 697 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 698 struct ocfs2_inode_info *oi = OCFS2_I(inode); 699 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 700 struct buffer_head *ref_root_bh = NULL; 701 struct ocfs2_refcount_block *rb; 702 struct ocfs2_refcount_tree *ref_tree; 703 704 BUG_ON(ocfs2_is_refcount_inode(inode)); 705 706 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, 707 &ref_tree, &ref_root_bh); 708 if (ret) { 709 mlog_errno(ret); 710 return ret; 711 } 712 713 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); 714 if (IS_ERR(handle)) { 715 ret = PTR_ERR(handle); 716 mlog_errno(ret); 717 goto out; 718 } 719 720 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 721 OCFS2_JOURNAL_ACCESS_WRITE); 722 if (ret) { 723 mlog_errno(ret); 724 goto out_commit; 725 } 726 727 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, 728 OCFS2_JOURNAL_ACCESS_WRITE); 729 if (ret) { 730 mlog_errno(ret); 731 goto out_commit; 732 } 733 734 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 735 le32_add_cpu(&rb->rf_count, 1); 736 737 ocfs2_journal_dirty(handle, ref_root_bh); 738 739 spin_lock(&oi->ip_lock); 740 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 741 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 742 di->i_refcount_loc = cpu_to_le64(refcount_loc); 743 spin_unlock(&oi->ip_lock); 744 ocfs2_journal_dirty(handle, di_bh); 745 746 out_commit: 747 ocfs2_commit_trans(osb, handle); 748 out: 749 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 750 brelse(ref_root_bh); 751 752 return ret; 753 } 754 755 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) 756 { 757 int ret, delete_tree = 0; 758 handle_t *handle = NULL; 759 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 760 struct ocfs2_inode_info *oi = OCFS2_I(inode); 761 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 762 struct ocfs2_refcount_block *rb; 763 struct inode *alloc_inode = NULL; 764 struct buffer_head *alloc_bh = NULL; 765 struct buffer_head *blk_bh = NULL; 766 struct ocfs2_refcount_tree *ref_tree; 767 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; 768 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); 769 u16 bit = 0; 770 771 if (!ocfs2_is_refcount_inode(inode)) 772 return 0; 773 774 BUG_ON(!ref_blkno); 775 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); 776 if (ret) { 777 mlog_errno(ret); 778 return ret; 779 } 780 781 rb = (struct ocfs2_refcount_block *)blk_bh->b_data; 782 783 /* 784 * If we are the last user, we need to free the block. 785 * So lock the allocator ahead. 786 */ 787 if (le32_to_cpu(rb->rf_count) == 1) { 788 blk = le64_to_cpu(rb->rf_blkno); 789 bit = le16_to_cpu(rb->rf_suballoc_bit); 790 if (rb->rf_suballoc_loc) 791 bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); 792 else 793 bg_blkno = ocfs2_which_suballoc_group(blk, bit); 794 795 alloc_inode = ocfs2_get_system_file_inode(osb, 796 EXTENT_ALLOC_SYSTEM_INODE, 797 le16_to_cpu(rb->rf_suballoc_slot)); 798 if (!alloc_inode) { 799 ret = -ENOMEM; 800 mlog_errno(ret); 801 goto out; 802 } 803 inode_lock(alloc_inode); 804 805 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); 806 if (ret) { 807 mlog_errno(ret); 808 goto out_mutex; 809 } 810 811 credits += OCFS2_SUBALLOC_FREE; 812 } 813 814 handle = ocfs2_start_trans(osb, credits); 815 if (IS_ERR(handle)) { 816 ret = PTR_ERR(handle); 817 mlog_errno(ret); 818 goto out_unlock; 819 } 820 821 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 822 OCFS2_JOURNAL_ACCESS_WRITE); 823 if (ret) { 824 mlog_errno(ret); 825 goto out_commit; 826 } 827 828 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, 829 OCFS2_JOURNAL_ACCESS_WRITE); 830 if (ret) { 831 mlog_errno(ret); 832 goto out_commit; 833 } 834 835 spin_lock(&oi->ip_lock); 836 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; 837 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 838 di->i_refcount_loc = 0; 839 spin_unlock(&oi->ip_lock); 840 ocfs2_journal_dirty(handle, di_bh); 841 842 le32_add_cpu(&rb->rf_count , -1); 843 ocfs2_journal_dirty(handle, blk_bh); 844 845 if (!rb->rf_count) { 846 delete_tree = 1; 847 ocfs2_erase_refcount_tree_from_list(osb, ref_tree); 848 ret = ocfs2_free_suballoc_bits(handle, alloc_inode, 849 alloc_bh, bit, bg_blkno, 1); 850 if (ret) 851 mlog_errno(ret); 852 } 853 854 out_commit: 855 ocfs2_commit_trans(osb, handle); 856 out_unlock: 857 if (alloc_inode) { 858 ocfs2_inode_unlock(alloc_inode, 1); 859 brelse(alloc_bh); 860 } 861 out_mutex: 862 if (alloc_inode) { 863 inode_unlock(alloc_inode); 864 iput(alloc_inode); 865 } 866 out: 867 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 868 if (delete_tree) 869 ocfs2_refcount_tree_put(ref_tree); 870 brelse(blk_bh); 871 872 return ret; 873 } 874 875 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, 876 struct buffer_head *ref_leaf_bh, 877 u64 cpos, unsigned int len, 878 struct ocfs2_refcount_rec *ret_rec, 879 int *index) 880 { 881 int i = 0; 882 struct ocfs2_refcount_block *rb = 883 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 884 struct ocfs2_refcount_rec *rec = NULL; 885 886 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { 887 rec = &rb->rf_records.rl_recs[i]; 888 889 if (le64_to_cpu(rec->r_cpos) + 890 le32_to_cpu(rec->r_clusters) <= cpos) 891 continue; 892 else if (le64_to_cpu(rec->r_cpos) > cpos) 893 break; 894 895 /* ok, cpos fail in this rec. Just return. */ 896 if (ret_rec) 897 *ret_rec = *rec; 898 goto out; 899 } 900 901 if (ret_rec) { 902 /* We meet with a hole here, so fake the rec. */ 903 ret_rec->r_cpos = cpu_to_le64(cpos); 904 ret_rec->r_refcount = 0; 905 if (i < le16_to_cpu(rb->rf_records.rl_used) && 906 le64_to_cpu(rec->r_cpos) < cpos + len) 907 ret_rec->r_clusters = 908 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); 909 else 910 ret_rec->r_clusters = cpu_to_le32(len); 911 } 912 913 out: 914 *index = i; 915 } 916 917 /* 918 * Try to remove refcount tree. The mechanism is: 919 * 1) Check whether i_clusters == 0, if no, exit. 920 * 2) check whether we have i_xattr_loc in dinode. if yes, exit. 921 * 3) Check whether we have inline xattr stored outside, if yes, exit. 922 * 4) Remove the tree. 923 */ 924 int ocfs2_try_remove_refcount_tree(struct inode *inode, 925 struct buffer_head *di_bh) 926 { 927 int ret; 928 struct ocfs2_inode_info *oi = OCFS2_I(inode); 929 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 930 931 down_write(&oi->ip_xattr_sem); 932 down_write(&oi->ip_alloc_sem); 933 934 if (oi->ip_clusters) 935 goto out; 936 937 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) 938 goto out; 939 940 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && 941 ocfs2_has_inline_xattr_value_outside(inode, di)) 942 goto out; 943 944 ret = ocfs2_remove_refcount_tree(inode, di_bh); 945 if (ret) 946 mlog_errno(ret); 947 out: 948 up_write(&oi->ip_alloc_sem); 949 up_write(&oi->ip_xattr_sem); 950 return 0; 951 } 952 953 /* 954 * Find the end range for a leaf refcount block indicated by 955 * el->l_recs[index].e_blkno. 956 */ 957 static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, 958 struct buffer_head *ref_root_bh, 959 struct ocfs2_extent_block *eb, 960 struct ocfs2_extent_list *el, 961 int index, u32 *cpos_end) 962 { 963 int ret, i, subtree_root; 964 u32 cpos; 965 u64 blkno; 966 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 967 struct ocfs2_path *left_path = NULL, *right_path = NULL; 968 struct ocfs2_extent_tree et; 969 struct ocfs2_extent_list *tmp_el; 970 971 if (index < le16_to_cpu(el->l_next_free_rec) - 1) { 972 /* 973 * We have a extent rec after index, so just use the e_cpos 974 * of the next extent rec. 975 */ 976 *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); 977 return 0; 978 } 979 980 if (!eb || !eb->h_next_leaf_blk) { 981 /* 982 * We are the last extent rec, so any high cpos should 983 * be stored in this leaf refcount block. 984 */ 985 *cpos_end = UINT_MAX; 986 return 0; 987 } 988 989 /* 990 * If the extent block isn't the last one, we have to find 991 * the subtree root between this extent block and the next 992 * leaf extent block and get the corresponding e_cpos from 993 * the subroot. Otherwise we may corrupt the b-tree. 994 */ 995 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 996 997 left_path = ocfs2_new_path_from_et(&et); 998 if (!left_path) { 999 ret = -ENOMEM; 1000 mlog_errno(ret); 1001 goto out; 1002 } 1003 1004 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); 1005 ret = ocfs2_find_path(ci, left_path, cpos); 1006 if (ret) { 1007 mlog_errno(ret); 1008 goto out; 1009 } 1010 1011 right_path = ocfs2_new_path_from_path(left_path); 1012 if (!right_path) { 1013 ret = -ENOMEM; 1014 mlog_errno(ret); 1015 goto out; 1016 } 1017 1018 ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); 1019 if (ret) { 1020 mlog_errno(ret); 1021 goto out; 1022 } 1023 1024 ret = ocfs2_find_path(ci, right_path, cpos); 1025 if (ret) { 1026 mlog_errno(ret); 1027 goto out; 1028 } 1029 1030 subtree_root = ocfs2_find_subtree_root(&et, left_path, 1031 right_path); 1032 1033 tmp_el = left_path->p_node[subtree_root].el; 1034 blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; 1035 for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) { 1036 if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { 1037 *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); 1038 break; 1039 } 1040 } 1041 1042 BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec)); 1043 1044 out: 1045 ocfs2_free_path(left_path); 1046 ocfs2_free_path(right_path); 1047 return ret; 1048 } 1049 1050 /* 1051 * Given a cpos and len, try to find the refcount record which contains cpos. 1052 * 1. If cpos can be found in one refcount record, return the record. 1053 * 2. If cpos can't be found, return a fake record which start from cpos 1054 * and end at a small value between cpos+len and start of the next record. 1055 * This fake record has r_refcount = 0. 1056 */ 1057 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, 1058 struct buffer_head *ref_root_bh, 1059 u64 cpos, unsigned int len, 1060 struct ocfs2_refcount_rec *ret_rec, 1061 int *index, 1062 struct buffer_head **ret_bh) 1063 { 1064 int ret = 0, i, found; 1065 u32 low_cpos, cpos_end; 1066 struct ocfs2_extent_list *el; 1067 struct ocfs2_extent_rec *rec = NULL; 1068 struct ocfs2_extent_block *eb = NULL; 1069 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; 1070 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1071 struct ocfs2_refcount_block *rb = 1072 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1073 1074 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { 1075 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, 1076 ret_rec, index); 1077 *ret_bh = ref_root_bh; 1078 get_bh(ref_root_bh); 1079 return 0; 1080 } 1081 1082 el = &rb->rf_list; 1083 low_cpos = cpos & OCFS2_32BIT_POS_MASK; 1084 1085 if (el->l_tree_depth) { 1086 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); 1087 if (ret) { 1088 mlog_errno(ret); 1089 goto out; 1090 } 1091 1092 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 1093 el = &eb->h_list; 1094 1095 if (el->l_tree_depth) { 1096 ret = ocfs2_error(sb, 1097 "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n", 1098 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1099 (unsigned long long)eb_bh->b_blocknr); 1100 goto out; 1101 } 1102 } 1103 1104 found = 0; 1105 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { 1106 rec = &el->l_recs[i]; 1107 1108 if (le32_to_cpu(rec->e_cpos) <= low_cpos) { 1109 found = 1; 1110 break; 1111 } 1112 } 1113 1114 if (found) { 1115 ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, 1116 eb, el, i, &cpos_end); 1117 if (ret) { 1118 mlog_errno(ret); 1119 goto out; 1120 } 1121 1122 if (cpos_end < low_cpos + len) 1123 len = cpos_end - low_cpos; 1124 } 1125 1126 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), 1127 &ref_leaf_bh); 1128 if (ret) { 1129 mlog_errno(ret); 1130 goto out; 1131 } 1132 1133 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, 1134 ret_rec, index); 1135 *ret_bh = ref_leaf_bh; 1136 out: 1137 brelse(eb_bh); 1138 return ret; 1139 } 1140 1141 enum ocfs2_ref_rec_contig { 1142 REF_CONTIG_NONE = 0, 1143 REF_CONTIG_LEFT, 1144 REF_CONTIG_RIGHT, 1145 REF_CONTIG_LEFTRIGHT, 1146 }; 1147 1148 static enum ocfs2_ref_rec_contig 1149 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, 1150 int index) 1151 { 1152 if ((rb->rf_records.rl_recs[index].r_refcount == 1153 rb->rf_records.rl_recs[index + 1].r_refcount) && 1154 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + 1155 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == 1156 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) 1157 return REF_CONTIG_RIGHT; 1158 1159 return REF_CONTIG_NONE; 1160 } 1161 1162 static enum ocfs2_ref_rec_contig 1163 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, 1164 int index) 1165 { 1166 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; 1167 1168 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) 1169 ret = ocfs2_refcount_rec_adjacent(rb, index); 1170 1171 if (index > 0) { 1172 enum ocfs2_ref_rec_contig tmp; 1173 1174 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); 1175 1176 if (tmp == REF_CONTIG_RIGHT) { 1177 if (ret == REF_CONTIG_RIGHT) 1178 ret = REF_CONTIG_LEFTRIGHT; 1179 else 1180 ret = REF_CONTIG_LEFT; 1181 } 1182 } 1183 1184 return ret; 1185 } 1186 1187 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, 1188 int index) 1189 { 1190 BUG_ON(rb->rf_records.rl_recs[index].r_refcount != 1191 rb->rf_records.rl_recs[index+1].r_refcount); 1192 1193 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, 1194 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); 1195 1196 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) 1197 memmove(&rb->rf_records.rl_recs[index + 1], 1198 &rb->rf_records.rl_recs[index + 2], 1199 sizeof(struct ocfs2_refcount_rec) * 1200 (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); 1201 1202 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], 1203 0, sizeof(struct ocfs2_refcount_rec)); 1204 le16_add_cpu(&rb->rf_records.rl_used, -1); 1205 } 1206 1207 /* 1208 * Merge the refcount rec if we are contiguous with the adjacent recs. 1209 */ 1210 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, 1211 int index) 1212 { 1213 enum ocfs2_ref_rec_contig contig = 1214 ocfs2_refcount_rec_contig(rb, index); 1215 1216 if (contig == REF_CONTIG_NONE) 1217 return; 1218 1219 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { 1220 BUG_ON(index == 0); 1221 index--; 1222 } 1223 1224 ocfs2_rotate_refcount_rec_left(rb, index); 1225 1226 if (contig == REF_CONTIG_LEFTRIGHT) 1227 ocfs2_rotate_refcount_rec_left(rb, index); 1228 } 1229 1230 /* 1231 * Change the refcount indexed by "index" in ref_bh. 1232 * If refcount reaches 0, remove it. 1233 */ 1234 static int ocfs2_change_refcount_rec(handle_t *handle, 1235 struct ocfs2_caching_info *ci, 1236 struct buffer_head *ref_leaf_bh, 1237 int index, int merge, int change) 1238 { 1239 int ret; 1240 struct ocfs2_refcount_block *rb = 1241 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1242 struct ocfs2_refcount_list *rl = &rb->rf_records; 1243 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; 1244 1245 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1246 OCFS2_JOURNAL_ACCESS_WRITE); 1247 if (ret) { 1248 mlog_errno(ret); 1249 goto out; 1250 } 1251 1252 trace_ocfs2_change_refcount_rec( 1253 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1254 index, le32_to_cpu(rec->r_refcount), change); 1255 le32_add_cpu(&rec->r_refcount, change); 1256 1257 if (!rec->r_refcount) { 1258 if (index != le16_to_cpu(rl->rl_used) - 1) { 1259 memmove(rec, rec + 1, 1260 (le16_to_cpu(rl->rl_used) - index - 1) * 1261 sizeof(struct ocfs2_refcount_rec)); 1262 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], 1263 0, sizeof(struct ocfs2_refcount_rec)); 1264 } 1265 1266 le16_add_cpu(&rl->rl_used, -1); 1267 } else if (merge) 1268 ocfs2_refcount_rec_merge(rb, index); 1269 1270 ocfs2_journal_dirty(handle, ref_leaf_bh); 1271 out: 1272 return ret; 1273 } 1274 1275 static int ocfs2_expand_inline_ref_root(handle_t *handle, 1276 struct ocfs2_caching_info *ci, 1277 struct buffer_head *ref_root_bh, 1278 struct buffer_head **ref_leaf_bh, 1279 struct ocfs2_alloc_context *meta_ac) 1280 { 1281 int ret; 1282 u16 suballoc_bit_start; 1283 u32 num_got; 1284 u64 suballoc_loc, blkno; 1285 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1286 struct buffer_head *new_bh = NULL; 1287 struct ocfs2_refcount_block *new_rb; 1288 struct ocfs2_refcount_block *root_rb = 1289 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1290 1291 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1292 OCFS2_JOURNAL_ACCESS_WRITE); 1293 if (ret) { 1294 mlog_errno(ret); 1295 goto out; 1296 } 1297 1298 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1299 &suballoc_bit_start, &num_got, 1300 &blkno); 1301 if (ret) { 1302 mlog_errno(ret); 1303 goto out; 1304 } 1305 1306 new_bh = sb_getblk(sb, blkno); 1307 if (new_bh == NULL) { 1308 ret = -ENOMEM; 1309 mlog_errno(ret); 1310 goto out; 1311 } 1312 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1313 1314 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1315 OCFS2_JOURNAL_ACCESS_CREATE); 1316 if (ret) { 1317 mlog_errno(ret); 1318 goto out; 1319 } 1320 1321 /* 1322 * Initialize ocfs2_refcount_block. 1323 * It should contain the same information as the old root. 1324 * so just memcpy it and change the corresponding field. 1325 */ 1326 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); 1327 1328 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1329 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1330 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1331 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1332 new_rb->rf_blkno = cpu_to_le64(blkno); 1333 new_rb->rf_cpos = cpu_to_le32(0); 1334 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1335 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1336 ocfs2_journal_dirty(handle, new_bh); 1337 1338 /* Now change the root. */ 1339 memset(&root_rb->rf_list, 0, sb->s_blocksize - 1340 offsetof(struct ocfs2_refcount_block, rf_list)); 1341 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); 1342 root_rb->rf_clusters = cpu_to_le32(1); 1343 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); 1344 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); 1345 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); 1346 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); 1347 1348 ocfs2_journal_dirty(handle, ref_root_bh); 1349 1350 trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, 1351 le16_to_cpu(new_rb->rf_records.rl_used)); 1352 1353 *ref_leaf_bh = new_bh; 1354 new_bh = NULL; 1355 out: 1356 brelse(new_bh); 1357 return ret; 1358 } 1359 1360 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, 1361 struct ocfs2_refcount_rec *next) 1362 { 1363 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= 1364 ocfs2_get_ref_rec_low_cpos(next)) 1365 return 1; 1366 1367 return 0; 1368 } 1369 1370 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) 1371 { 1372 const struct ocfs2_refcount_rec *l = a, *r = b; 1373 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); 1374 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); 1375 1376 if (l_cpos > r_cpos) 1377 return 1; 1378 if (l_cpos < r_cpos) 1379 return -1; 1380 return 0; 1381 } 1382 1383 static int cmp_refcount_rec_by_cpos(const void *a, const void *b) 1384 { 1385 const struct ocfs2_refcount_rec *l = a, *r = b; 1386 u64 l_cpos = le64_to_cpu(l->r_cpos); 1387 u64 r_cpos = le64_to_cpu(r->r_cpos); 1388 1389 if (l_cpos > r_cpos) 1390 return 1; 1391 if (l_cpos < r_cpos) 1392 return -1; 1393 return 0; 1394 } 1395 1396 /* 1397 * The refcount cpos are ordered by their 64bit cpos, 1398 * But we will use the low 32 bit to be the e_cpos in the b-tree. 1399 * So we need to make sure that this pos isn't intersected with others. 1400 * 1401 * Note: The refcount block is already sorted by their low 32 bit cpos, 1402 * So just try the middle pos first, and we will exit when we find 1403 * the good position. 1404 */ 1405 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, 1406 u32 *split_pos, int *split_index) 1407 { 1408 int num_used = le16_to_cpu(rl->rl_used); 1409 int delta, middle = num_used / 2; 1410 1411 for (delta = 0; delta < middle; delta++) { 1412 /* Let's check delta earlier than middle */ 1413 if (ocfs2_refcount_rec_no_intersect( 1414 &rl->rl_recs[middle - delta - 1], 1415 &rl->rl_recs[middle - delta])) { 1416 *split_index = middle - delta; 1417 break; 1418 } 1419 1420 /* For even counts, don't walk off the end */ 1421 if ((middle + delta + 1) == num_used) 1422 continue; 1423 1424 /* Now try delta past middle */ 1425 if (ocfs2_refcount_rec_no_intersect( 1426 &rl->rl_recs[middle + delta], 1427 &rl->rl_recs[middle + delta + 1])) { 1428 *split_index = middle + delta + 1; 1429 break; 1430 } 1431 } 1432 1433 if (delta >= middle) 1434 return -ENOSPC; 1435 1436 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); 1437 return 0; 1438 } 1439 1440 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, 1441 struct buffer_head *new_bh, 1442 u32 *split_cpos) 1443 { 1444 int split_index = 0, num_moved, ret; 1445 u32 cpos = 0; 1446 struct ocfs2_refcount_block *rb = 1447 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1448 struct ocfs2_refcount_list *rl = &rb->rf_records; 1449 struct ocfs2_refcount_block *new_rb = 1450 (struct ocfs2_refcount_block *)new_bh->b_data; 1451 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1452 1453 trace_ocfs2_divide_leaf_refcount_block( 1454 (unsigned long long)ref_leaf_bh->b_blocknr, 1455 le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); 1456 1457 /* 1458 * XXX: Improvement later. 1459 * If we know all the high 32 bit cpos is the same, no need to sort. 1460 * 1461 * In order to make the whole process safe, we do: 1462 * 1. sort the entries by their low 32 bit cpos first so that we can 1463 * find the split cpos easily. 1464 * 2. call ocfs2_insert_extent to insert the new refcount block. 1465 * 3. move the refcount rec to the new block. 1466 * 4. sort the entries by their 64 bit cpos. 1467 * 5. dirty the new_rb and rb. 1468 */ 1469 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1470 sizeof(struct ocfs2_refcount_rec), 1471 cmp_refcount_rec_by_low_cpos, NULL); 1472 1473 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); 1474 if (ret) { 1475 mlog_errno(ret); 1476 return ret; 1477 } 1478 1479 new_rb->rf_cpos = cpu_to_le32(cpos); 1480 1481 /* move refcount records starting from split_index to the new block. */ 1482 num_moved = le16_to_cpu(rl->rl_used) - split_index; 1483 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], 1484 num_moved * sizeof(struct ocfs2_refcount_rec)); 1485 1486 /*ok, remove the entries we just moved over to the other block. */ 1487 memset(&rl->rl_recs[split_index], 0, 1488 num_moved * sizeof(struct ocfs2_refcount_rec)); 1489 1490 /* change old and new rl_used accordingly. */ 1491 le16_add_cpu(&rl->rl_used, -num_moved); 1492 new_rl->rl_used = cpu_to_le16(num_moved); 1493 1494 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1495 sizeof(struct ocfs2_refcount_rec), 1496 cmp_refcount_rec_by_cpos, NULL); 1497 1498 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), 1499 sizeof(struct ocfs2_refcount_rec), 1500 cmp_refcount_rec_by_cpos, NULL); 1501 1502 *split_cpos = cpos; 1503 return 0; 1504 } 1505 1506 static int ocfs2_new_leaf_refcount_block(handle_t *handle, 1507 struct ocfs2_caching_info *ci, 1508 struct buffer_head *ref_root_bh, 1509 struct buffer_head *ref_leaf_bh, 1510 struct ocfs2_alloc_context *meta_ac) 1511 { 1512 int ret; 1513 u16 suballoc_bit_start; 1514 u32 num_got, new_cpos; 1515 u64 suballoc_loc, blkno; 1516 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1517 struct ocfs2_refcount_block *root_rb = 1518 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1519 struct buffer_head *new_bh = NULL; 1520 struct ocfs2_refcount_block *new_rb; 1521 struct ocfs2_extent_tree ref_et; 1522 1523 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); 1524 1525 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1526 OCFS2_JOURNAL_ACCESS_WRITE); 1527 if (ret) { 1528 mlog_errno(ret); 1529 goto out; 1530 } 1531 1532 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1533 OCFS2_JOURNAL_ACCESS_WRITE); 1534 if (ret) { 1535 mlog_errno(ret); 1536 goto out; 1537 } 1538 1539 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1540 &suballoc_bit_start, &num_got, 1541 &blkno); 1542 if (ret) { 1543 mlog_errno(ret); 1544 goto out; 1545 } 1546 1547 new_bh = sb_getblk(sb, blkno); 1548 if (new_bh == NULL) { 1549 ret = -ENOMEM; 1550 mlog_errno(ret); 1551 goto out; 1552 } 1553 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1554 1555 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1556 OCFS2_JOURNAL_ACCESS_CREATE); 1557 if (ret) { 1558 mlog_errno(ret); 1559 goto out; 1560 } 1561 1562 /* Initialize ocfs2_refcount_block. */ 1563 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1564 memset(new_rb, 0, sb->s_blocksize); 1565 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 1566 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1567 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1568 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1569 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); 1570 new_rb->rf_blkno = cpu_to_le64(blkno); 1571 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1572 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1573 new_rb->rf_records.rl_count = 1574 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 1575 new_rb->rf_generation = root_rb->rf_generation; 1576 1577 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); 1578 if (ret) { 1579 mlog_errno(ret); 1580 goto out; 1581 } 1582 1583 ocfs2_journal_dirty(handle, ref_leaf_bh); 1584 ocfs2_journal_dirty(handle, new_bh); 1585 1586 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1587 1588 trace_ocfs2_new_leaf_refcount_block( 1589 (unsigned long long)new_bh->b_blocknr, new_cpos); 1590 1591 /* Insert the new leaf block with the specific offset cpos. */ 1592 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1593 1, 0, meta_ac); 1594 if (ret) 1595 mlog_errno(ret); 1596 1597 out: 1598 brelse(new_bh); 1599 return ret; 1600 } 1601 1602 static int ocfs2_expand_refcount_tree(handle_t *handle, 1603 struct ocfs2_caching_info *ci, 1604 struct buffer_head *ref_root_bh, 1605 struct buffer_head *ref_leaf_bh, 1606 struct ocfs2_alloc_context *meta_ac) 1607 { 1608 int ret; 1609 struct buffer_head *expand_bh = NULL; 1610 1611 if (ref_root_bh == ref_leaf_bh) { 1612 /* 1613 * the old root bh hasn't been expanded to a b-tree, 1614 * so expand it first. 1615 */ 1616 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, 1617 &expand_bh, meta_ac); 1618 if (ret) { 1619 mlog_errno(ret); 1620 goto out; 1621 } 1622 } else { 1623 expand_bh = ref_leaf_bh; 1624 get_bh(expand_bh); 1625 } 1626 1627 1628 /* Now add a new refcount block into the tree.*/ 1629 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, 1630 expand_bh, meta_ac); 1631 if (ret) 1632 mlog_errno(ret); 1633 out: 1634 brelse(expand_bh); 1635 return ret; 1636 } 1637 1638 /* 1639 * Adjust the extent rec in b-tree representing ref_leaf_bh. 1640 * 1641 * Only called when we have inserted a new refcount rec at index 0 1642 * which means ocfs2_extent_rec.e_cpos may need some change. 1643 */ 1644 static int ocfs2_adjust_refcount_rec(handle_t *handle, 1645 struct ocfs2_caching_info *ci, 1646 struct buffer_head *ref_root_bh, 1647 struct buffer_head *ref_leaf_bh, 1648 struct ocfs2_refcount_rec *rec) 1649 { 1650 int ret = 0, i; 1651 u32 new_cpos, old_cpos; 1652 struct ocfs2_path *path = NULL; 1653 struct ocfs2_extent_tree et; 1654 struct ocfs2_refcount_block *rb = 1655 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1656 struct ocfs2_extent_list *el; 1657 1658 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) 1659 goto out; 1660 1661 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1662 old_cpos = le32_to_cpu(rb->rf_cpos); 1663 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; 1664 if (old_cpos <= new_cpos) 1665 goto out; 1666 1667 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1668 1669 path = ocfs2_new_path_from_et(&et); 1670 if (!path) { 1671 ret = -ENOMEM; 1672 mlog_errno(ret); 1673 goto out; 1674 } 1675 1676 ret = ocfs2_find_path(ci, path, old_cpos); 1677 if (ret) { 1678 mlog_errno(ret); 1679 goto out; 1680 } 1681 1682 /* 1683 * 2 more credits, one for the leaf refcount block, one for 1684 * the extent block contains the extent rec. 1685 */ 1686 ret = ocfs2_extend_trans(handle, 2); 1687 if (ret < 0) { 1688 mlog_errno(ret); 1689 goto out; 1690 } 1691 1692 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1693 OCFS2_JOURNAL_ACCESS_WRITE); 1694 if (ret < 0) { 1695 mlog_errno(ret); 1696 goto out; 1697 } 1698 1699 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), 1700 OCFS2_JOURNAL_ACCESS_WRITE); 1701 if (ret < 0) { 1702 mlog_errno(ret); 1703 goto out; 1704 } 1705 1706 /* change the leaf extent block first. */ 1707 el = path_leaf_el(path); 1708 1709 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) 1710 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) 1711 break; 1712 1713 BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); 1714 1715 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); 1716 1717 /* change the r_cpos in the leaf block. */ 1718 rb->rf_cpos = cpu_to_le32(new_cpos); 1719 1720 ocfs2_journal_dirty(handle, path_leaf_bh(path)); 1721 ocfs2_journal_dirty(handle, ref_leaf_bh); 1722 1723 out: 1724 ocfs2_free_path(path); 1725 return ret; 1726 } 1727 1728 static int ocfs2_insert_refcount_rec(handle_t *handle, 1729 struct ocfs2_caching_info *ci, 1730 struct buffer_head *ref_root_bh, 1731 struct buffer_head *ref_leaf_bh, 1732 struct ocfs2_refcount_rec *rec, 1733 int index, int merge, 1734 struct ocfs2_alloc_context *meta_ac) 1735 { 1736 int ret; 1737 struct ocfs2_refcount_block *rb = 1738 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1739 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1740 struct buffer_head *new_bh = NULL; 1741 1742 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1743 1744 if (rf_list->rl_used == rf_list->rl_count) { 1745 u64 cpos = le64_to_cpu(rec->r_cpos); 1746 u32 len = le32_to_cpu(rec->r_clusters); 1747 1748 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1749 ref_leaf_bh, meta_ac); 1750 if (ret) { 1751 mlog_errno(ret); 1752 goto out; 1753 } 1754 1755 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1756 cpos, len, NULL, &index, 1757 &new_bh); 1758 if (ret) { 1759 mlog_errno(ret); 1760 goto out; 1761 } 1762 1763 ref_leaf_bh = new_bh; 1764 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1765 rf_list = &rb->rf_records; 1766 } 1767 1768 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1769 OCFS2_JOURNAL_ACCESS_WRITE); 1770 if (ret) { 1771 mlog_errno(ret); 1772 goto out; 1773 } 1774 1775 if (index < le16_to_cpu(rf_list->rl_used)) 1776 memmove(&rf_list->rl_recs[index + 1], 1777 &rf_list->rl_recs[index], 1778 (le16_to_cpu(rf_list->rl_used) - index) * 1779 sizeof(struct ocfs2_refcount_rec)); 1780 1781 trace_ocfs2_insert_refcount_rec( 1782 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1783 (unsigned long long)le64_to_cpu(rec->r_cpos), 1784 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); 1785 1786 rf_list->rl_recs[index] = *rec; 1787 1788 le16_add_cpu(&rf_list->rl_used, 1); 1789 1790 if (merge) 1791 ocfs2_refcount_rec_merge(rb, index); 1792 1793 ocfs2_journal_dirty(handle, ref_leaf_bh); 1794 1795 if (index == 0) { 1796 ret = ocfs2_adjust_refcount_rec(handle, ci, 1797 ref_root_bh, 1798 ref_leaf_bh, rec); 1799 if (ret) 1800 mlog_errno(ret); 1801 } 1802 out: 1803 brelse(new_bh); 1804 return ret; 1805 } 1806 1807 /* 1808 * Split the refcount_rec indexed by "index" in ref_leaf_bh. 1809 * This is much simple than our b-tree code. 1810 * split_rec is the new refcount rec we want to insert. 1811 * If split_rec->r_refcount > 0, we are changing the refcount(in case we 1812 * increase refcount or decrease a refcount to non-zero). 1813 * If split_rec->r_refcount == 0, we are punching a hole in current refcount 1814 * rec( in case we decrease a refcount to zero). 1815 */ 1816 static int ocfs2_split_refcount_rec(handle_t *handle, 1817 struct ocfs2_caching_info *ci, 1818 struct buffer_head *ref_root_bh, 1819 struct buffer_head *ref_leaf_bh, 1820 struct ocfs2_refcount_rec *split_rec, 1821 int index, int merge, 1822 struct ocfs2_alloc_context *meta_ac, 1823 struct ocfs2_cached_dealloc_ctxt *dealloc) 1824 { 1825 int ret, recs_need; 1826 u32 len; 1827 struct ocfs2_refcount_block *rb = 1828 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1829 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1830 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; 1831 struct ocfs2_refcount_rec *tail_rec = NULL; 1832 struct buffer_head *new_bh = NULL; 1833 1834 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1835 1836 trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), 1837 le32_to_cpu(orig_rec->r_clusters), 1838 le32_to_cpu(orig_rec->r_refcount), 1839 le64_to_cpu(split_rec->r_cpos), 1840 le32_to_cpu(split_rec->r_clusters), 1841 le32_to_cpu(split_rec->r_refcount)); 1842 1843 /* 1844 * If we just need to split the header or tail clusters, 1845 * no more recs are needed, just split is OK. 1846 * Otherwise we at least need one new recs. 1847 */ 1848 if (!split_rec->r_refcount && 1849 (split_rec->r_cpos == orig_rec->r_cpos || 1850 le64_to_cpu(split_rec->r_cpos) + 1851 le32_to_cpu(split_rec->r_clusters) == 1852 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1853 recs_need = 0; 1854 else 1855 recs_need = 1; 1856 1857 /* 1858 * We need one more rec if we split in the middle and the new rec have 1859 * some refcount in it. 1860 */ 1861 if (split_rec->r_refcount && 1862 (split_rec->r_cpos != orig_rec->r_cpos && 1863 le64_to_cpu(split_rec->r_cpos) + 1864 le32_to_cpu(split_rec->r_clusters) != 1865 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1866 recs_need++; 1867 1868 /* If the leaf block don't have enough record, expand it. */ 1869 if (le16_to_cpu(rf_list->rl_used) + recs_need > 1870 le16_to_cpu(rf_list->rl_count)) { 1871 struct ocfs2_refcount_rec tmp_rec; 1872 u64 cpos = le64_to_cpu(orig_rec->r_cpos); 1873 len = le32_to_cpu(orig_rec->r_clusters); 1874 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1875 ref_leaf_bh, meta_ac); 1876 if (ret) { 1877 mlog_errno(ret); 1878 goto out; 1879 } 1880 1881 /* 1882 * We have to re-get it since now cpos may be moved to 1883 * another leaf block. 1884 */ 1885 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1886 cpos, len, &tmp_rec, &index, 1887 &new_bh); 1888 if (ret) { 1889 mlog_errno(ret); 1890 goto out; 1891 } 1892 1893 ref_leaf_bh = new_bh; 1894 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1895 rf_list = &rb->rf_records; 1896 orig_rec = &rf_list->rl_recs[index]; 1897 } 1898 1899 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1900 OCFS2_JOURNAL_ACCESS_WRITE); 1901 if (ret) { 1902 mlog_errno(ret); 1903 goto out; 1904 } 1905 1906 /* 1907 * We have calculated out how many new records we need and store 1908 * in recs_need, so spare enough space first by moving the records 1909 * after "index" to the end. 1910 */ 1911 if (index != le16_to_cpu(rf_list->rl_used) - 1) 1912 memmove(&rf_list->rl_recs[index + 1 + recs_need], 1913 &rf_list->rl_recs[index + 1], 1914 (le16_to_cpu(rf_list->rl_used) - index - 1) * 1915 sizeof(struct ocfs2_refcount_rec)); 1916 1917 len = (le64_to_cpu(orig_rec->r_cpos) + 1918 le32_to_cpu(orig_rec->r_clusters)) - 1919 (le64_to_cpu(split_rec->r_cpos) + 1920 le32_to_cpu(split_rec->r_clusters)); 1921 1922 /* 1923 * If we have "len", the we will split in the tail and move it 1924 * to the end of the space we have just spared. 1925 */ 1926 if (len) { 1927 tail_rec = &rf_list->rl_recs[index + recs_need]; 1928 1929 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); 1930 le64_add_cpu(&tail_rec->r_cpos, 1931 le32_to_cpu(tail_rec->r_clusters) - len); 1932 tail_rec->r_clusters = cpu_to_le32(len); 1933 } 1934 1935 /* 1936 * If the split pos isn't the same as the original one, we need to 1937 * split in the head. 1938 * 1939 * Note: We have the chance that split_rec.r_refcount = 0, 1940 * recs_need = 0 and len > 0, which means we just cut the head from 1941 * the orig_rec and in that case we have done some modification in 1942 * orig_rec above, so the check for r_cpos is faked. 1943 */ 1944 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { 1945 len = le64_to_cpu(split_rec->r_cpos) - 1946 le64_to_cpu(orig_rec->r_cpos); 1947 orig_rec->r_clusters = cpu_to_le32(len); 1948 index++; 1949 } 1950 1951 le16_add_cpu(&rf_list->rl_used, recs_need); 1952 1953 if (split_rec->r_refcount) { 1954 rf_list->rl_recs[index] = *split_rec; 1955 trace_ocfs2_split_refcount_rec_insert( 1956 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1957 (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1958 le32_to_cpu(split_rec->r_clusters), 1959 le32_to_cpu(split_rec->r_refcount)); 1960 1961 if (merge) 1962 ocfs2_refcount_rec_merge(rb, index); 1963 } 1964 1965 ocfs2_journal_dirty(handle, ref_leaf_bh); 1966 1967 out: 1968 brelse(new_bh); 1969 return ret; 1970 } 1971 1972 static int __ocfs2_increase_refcount(handle_t *handle, 1973 struct ocfs2_caching_info *ci, 1974 struct buffer_head *ref_root_bh, 1975 u64 cpos, u32 len, int merge, 1976 struct ocfs2_alloc_context *meta_ac, 1977 struct ocfs2_cached_dealloc_ctxt *dealloc) 1978 { 1979 int ret = 0, index; 1980 struct buffer_head *ref_leaf_bh = NULL; 1981 struct ocfs2_refcount_rec rec; 1982 unsigned int set_len = 0; 1983 1984 trace_ocfs2_increase_refcount_begin( 1985 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1986 (unsigned long long)cpos, len); 1987 1988 while (len) { 1989 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1990 cpos, len, &rec, &index, 1991 &ref_leaf_bh); 1992 if (ret) { 1993 mlog_errno(ret); 1994 goto out; 1995 } 1996 1997 set_len = le32_to_cpu(rec.r_clusters); 1998 1999 /* 2000 * Here we may meet with 3 situations: 2001 * 2002 * 1. If we find an already existing record, and the length 2003 * is the same, cool, we just need to increase the r_refcount 2004 * and it is OK. 2005 * 2. If we find a hole, just insert it with r_refcount = 1. 2006 * 3. If we are in the middle of one extent record, split 2007 * it. 2008 */ 2009 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2010 set_len <= len) { 2011 trace_ocfs2_increase_refcount_change( 2012 (unsigned long long)cpos, set_len, 2013 le32_to_cpu(rec.r_refcount)); 2014 ret = ocfs2_change_refcount_rec(handle, ci, 2015 ref_leaf_bh, index, 2016 merge, 1); 2017 if (ret) { 2018 mlog_errno(ret); 2019 goto out; 2020 } 2021 } else if (!rec.r_refcount) { 2022 rec.r_refcount = cpu_to_le32(1); 2023 2024 trace_ocfs2_increase_refcount_insert( 2025 (unsigned long long)le64_to_cpu(rec.r_cpos), 2026 set_len); 2027 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, 2028 ref_leaf_bh, 2029 &rec, index, 2030 merge, meta_ac); 2031 if (ret) { 2032 mlog_errno(ret); 2033 goto out; 2034 } 2035 } else { 2036 set_len = min((u64)(cpos + len), 2037 le64_to_cpu(rec.r_cpos) + set_len) - cpos; 2038 rec.r_cpos = cpu_to_le64(cpos); 2039 rec.r_clusters = cpu_to_le32(set_len); 2040 le32_add_cpu(&rec.r_refcount, 1); 2041 2042 trace_ocfs2_increase_refcount_split( 2043 (unsigned long long)le64_to_cpu(rec.r_cpos), 2044 set_len, le32_to_cpu(rec.r_refcount)); 2045 ret = ocfs2_split_refcount_rec(handle, ci, 2046 ref_root_bh, ref_leaf_bh, 2047 &rec, index, merge, 2048 meta_ac, dealloc); 2049 if (ret) { 2050 mlog_errno(ret); 2051 goto out; 2052 } 2053 } 2054 2055 cpos += set_len; 2056 len -= set_len; 2057 brelse(ref_leaf_bh); 2058 ref_leaf_bh = NULL; 2059 } 2060 2061 out: 2062 brelse(ref_leaf_bh); 2063 return ret; 2064 } 2065 2066 static int ocfs2_remove_refcount_extent(handle_t *handle, 2067 struct ocfs2_caching_info *ci, 2068 struct buffer_head *ref_root_bh, 2069 struct buffer_head *ref_leaf_bh, 2070 struct ocfs2_alloc_context *meta_ac, 2071 struct ocfs2_cached_dealloc_ctxt *dealloc) 2072 { 2073 int ret; 2074 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2075 struct ocfs2_refcount_block *rb = 2076 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2077 struct ocfs2_extent_tree et; 2078 2079 BUG_ON(rb->rf_records.rl_used); 2080 2081 trace_ocfs2_remove_refcount_extent( 2082 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2083 (unsigned long long)ref_leaf_bh->b_blocknr, 2084 le32_to_cpu(rb->rf_cpos)); 2085 2086 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2087 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 2088 1, meta_ac, dealloc); 2089 if (ret) { 2090 mlog_errno(ret); 2091 goto out; 2092 } 2093 2094 ocfs2_remove_from_cache(ci, ref_leaf_bh); 2095 2096 /* 2097 * add the freed block to the dealloc so that it will be freed 2098 * when we run dealloc. 2099 */ 2100 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, 2101 le16_to_cpu(rb->rf_suballoc_slot), 2102 le64_to_cpu(rb->rf_suballoc_loc), 2103 le64_to_cpu(rb->rf_blkno), 2104 le16_to_cpu(rb->rf_suballoc_bit)); 2105 if (ret) { 2106 mlog_errno(ret); 2107 goto out; 2108 } 2109 2110 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 2111 OCFS2_JOURNAL_ACCESS_WRITE); 2112 if (ret) { 2113 mlog_errno(ret); 2114 goto out; 2115 } 2116 2117 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2118 2119 le32_add_cpu(&rb->rf_clusters, -1); 2120 2121 /* 2122 * check whether we need to restore the root refcount block if 2123 * there is no leaf extent block at atll. 2124 */ 2125 if (!rb->rf_list.l_next_free_rec) { 2126 BUG_ON(rb->rf_clusters); 2127 2128 trace_ocfs2_restore_refcount_block( 2129 (unsigned long long)ref_root_bh->b_blocknr); 2130 2131 rb->rf_flags = 0; 2132 rb->rf_parent = 0; 2133 rb->rf_cpos = 0; 2134 memset(&rb->rf_records, 0, sb->s_blocksize - 2135 offsetof(struct ocfs2_refcount_block, rf_records)); 2136 rb->rf_records.rl_count = 2137 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 2138 } 2139 2140 ocfs2_journal_dirty(handle, ref_root_bh); 2141 2142 out: 2143 return ret; 2144 } 2145 2146 int ocfs2_increase_refcount(handle_t *handle, 2147 struct ocfs2_caching_info *ci, 2148 struct buffer_head *ref_root_bh, 2149 u64 cpos, u32 len, 2150 struct ocfs2_alloc_context *meta_ac, 2151 struct ocfs2_cached_dealloc_ctxt *dealloc) 2152 { 2153 return __ocfs2_increase_refcount(handle, ci, ref_root_bh, 2154 cpos, len, 1, 2155 meta_ac, dealloc); 2156 } 2157 2158 static int ocfs2_decrease_refcount_rec(handle_t *handle, 2159 struct ocfs2_caching_info *ci, 2160 struct buffer_head *ref_root_bh, 2161 struct buffer_head *ref_leaf_bh, 2162 int index, u64 cpos, unsigned int len, 2163 struct ocfs2_alloc_context *meta_ac, 2164 struct ocfs2_cached_dealloc_ctxt *dealloc) 2165 { 2166 int ret; 2167 struct ocfs2_refcount_block *rb = 2168 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2169 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; 2170 2171 BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); 2172 BUG_ON(cpos + len > 2173 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2174 2175 trace_ocfs2_decrease_refcount_rec( 2176 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2177 (unsigned long long)cpos, len); 2178 2179 if (cpos == le64_to_cpu(rec->r_cpos) && 2180 len == le32_to_cpu(rec->r_clusters)) 2181 ret = ocfs2_change_refcount_rec(handle, ci, 2182 ref_leaf_bh, index, 1, -1); 2183 else { 2184 struct ocfs2_refcount_rec split = *rec; 2185 split.r_cpos = cpu_to_le64(cpos); 2186 split.r_clusters = cpu_to_le32(len); 2187 2188 le32_add_cpu(&split.r_refcount, -1); 2189 2190 ret = ocfs2_split_refcount_rec(handle, ci, 2191 ref_root_bh, ref_leaf_bh, 2192 &split, index, 1, 2193 meta_ac, dealloc); 2194 } 2195 2196 if (ret) { 2197 mlog_errno(ret); 2198 goto out; 2199 } 2200 2201 /* Remove the leaf refcount block if it contains no refcount record. */ 2202 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { 2203 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, 2204 ref_leaf_bh, meta_ac, 2205 dealloc); 2206 if (ret) 2207 mlog_errno(ret); 2208 } 2209 2210 out: 2211 return ret; 2212 } 2213 2214 static int __ocfs2_decrease_refcount(handle_t *handle, 2215 struct ocfs2_caching_info *ci, 2216 struct buffer_head *ref_root_bh, 2217 u64 cpos, u32 len, 2218 struct ocfs2_alloc_context *meta_ac, 2219 struct ocfs2_cached_dealloc_ctxt *dealloc, 2220 int delete) 2221 { 2222 int ret = 0, index = 0; 2223 struct ocfs2_refcount_rec rec; 2224 unsigned int r_count = 0, r_len; 2225 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2226 struct buffer_head *ref_leaf_bh = NULL; 2227 2228 trace_ocfs2_decrease_refcount( 2229 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2230 (unsigned long long)cpos, len, delete); 2231 2232 while (len) { 2233 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2234 cpos, len, &rec, &index, 2235 &ref_leaf_bh); 2236 if (ret) { 2237 mlog_errno(ret); 2238 goto out; 2239 } 2240 2241 r_count = le32_to_cpu(rec.r_refcount); 2242 BUG_ON(r_count == 0); 2243 if (!delete) 2244 BUG_ON(r_count > 1); 2245 2246 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + 2247 le32_to_cpu(rec.r_clusters)) - cpos; 2248 2249 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, 2250 ref_leaf_bh, index, 2251 cpos, r_len, 2252 meta_ac, dealloc); 2253 if (ret) { 2254 mlog_errno(ret); 2255 goto out; 2256 } 2257 2258 if (le32_to_cpu(rec.r_refcount) == 1 && delete) { 2259 ret = ocfs2_cache_cluster_dealloc(dealloc, 2260 ocfs2_clusters_to_blocks(sb, cpos), 2261 r_len); 2262 if (ret) { 2263 mlog_errno(ret); 2264 goto out; 2265 } 2266 } 2267 2268 cpos += r_len; 2269 len -= r_len; 2270 brelse(ref_leaf_bh); 2271 ref_leaf_bh = NULL; 2272 } 2273 2274 out: 2275 brelse(ref_leaf_bh); 2276 return ret; 2277 } 2278 2279 /* Caller must hold refcount tree lock. */ 2280 int ocfs2_decrease_refcount(struct inode *inode, 2281 handle_t *handle, u32 cpos, u32 len, 2282 struct ocfs2_alloc_context *meta_ac, 2283 struct ocfs2_cached_dealloc_ctxt *dealloc, 2284 int delete) 2285 { 2286 int ret; 2287 u64 ref_blkno; 2288 struct buffer_head *ref_root_bh = NULL; 2289 struct ocfs2_refcount_tree *tree; 2290 2291 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2292 2293 ret = ocfs2_get_refcount_block(inode, &ref_blkno); 2294 if (ret) { 2295 mlog_errno(ret); 2296 goto out; 2297 } 2298 2299 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); 2300 if (ret) { 2301 mlog_errno(ret); 2302 goto out; 2303 } 2304 2305 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 2306 &ref_root_bh); 2307 if (ret) { 2308 mlog_errno(ret); 2309 goto out; 2310 } 2311 2312 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, 2313 cpos, len, meta_ac, dealloc, delete); 2314 if (ret) 2315 mlog_errno(ret); 2316 out: 2317 brelse(ref_root_bh); 2318 return ret; 2319 } 2320 2321 /* 2322 * Mark the already-existing extent at cpos as refcounted for len clusters. 2323 * This adds the refcount extent flag. 2324 * 2325 * If the existing extent is larger than the request, initiate a 2326 * split. An attempt will be made at merging with adjacent extents. 2327 * 2328 * The caller is responsible for passing down meta_ac if we'll need it. 2329 */ 2330 static int ocfs2_mark_extent_refcounted(struct inode *inode, 2331 struct ocfs2_extent_tree *et, 2332 handle_t *handle, u32 cpos, 2333 u32 len, u32 phys, 2334 struct ocfs2_alloc_context *meta_ac, 2335 struct ocfs2_cached_dealloc_ctxt *dealloc) 2336 { 2337 int ret; 2338 2339 trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, 2340 cpos, len, phys); 2341 2342 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2343 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2344 inode->i_ino); 2345 goto out; 2346 } 2347 2348 ret = ocfs2_change_extent_flag(handle, et, cpos, 2349 len, phys, meta_ac, dealloc, 2350 OCFS2_EXT_REFCOUNTED, 0); 2351 if (ret) 2352 mlog_errno(ret); 2353 2354 out: 2355 return ret; 2356 } 2357 2358 /* 2359 * Given some contiguous physical clusters, calculate what we need 2360 * for modifying their refcount. 2361 */ 2362 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, 2363 struct ocfs2_caching_info *ci, 2364 struct buffer_head *ref_root_bh, 2365 u64 start_cpos, 2366 u32 clusters, 2367 int *meta_add, 2368 int *credits) 2369 { 2370 int ret = 0, index, ref_blocks = 0, recs_add = 0; 2371 u64 cpos = start_cpos; 2372 struct ocfs2_refcount_block *rb; 2373 struct ocfs2_refcount_rec rec; 2374 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2375 u32 len; 2376 2377 while (clusters) { 2378 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2379 cpos, clusters, &rec, 2380 &index, &ref_leaf_bh); 2381 if (ret) { 2382 mlog_errno(ret); 2383 goto out; 2384 } 2385 2386 if (ref_leaf_bh != prev_bh) { 2387 /* 2388 * Now we encounter a new leaf block, so calculate 2389 * whether we need to extend the old leaf. 2390 */ 2391 if (prev_bh) { 2392 rb = (struct ocfs2_refcount_block *) 2393 prev_bh->b_data; 2394 2395 if (le16_to_cpu(rb->rf_records.rl_used) + 2396 recs_add > 2397 le16_to_cpu(rb->rf_records.rl_count)) 2398 ref_blocks++; 2399 } 2400 2401 recs_add = 0; 2402 *credits += 1; 2403 brelse(prev_bh); 2404 prev_bh = ref_leaf_bh; 2405 get_bh(prev_bh); 2406 } 2407 2408 trace_ocfs2_calc_refcount_meta_credits_iterate( 2409 recs_add, (unsigned long long)cpos, clusters, 2410 (unsigned long long)le64_to_cpu(rec.r_cpos), 2411 le32_to_cpu(rec.r_clusters), 2412 le32_to_cpu(rec.r_refcount), index); 2413 2414 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2415 le32_to_cpu(rec.r_clusters)) - cpos; 2416 /* 2417 * We record all the records which will be inserted to the 2418 * same refcount block, so that we can tell exactly whether 2419 * we need a new refcount block or not. 2420 * 2421 * If we will insert a new one, this is easy and only happens 2422 * during adding refcounted flag to the extent, so we don't 2423 * have a chance of spliting. We just need one record. 2424 * 2425 * If the refcount rec already exists, that would be a little 2426 * complicated. we may have to: 2427 * 1) split at the beginning if the start pos isn't aligned. 2428 * we need 1 more record in this case. 2429 * 2) split int the end if the end pos isn't aligned. 2430 * we need 1 more record in this case. 2431 * 3) split in the middle because of file system fragmentation. 2432 * we need 2 more records in this case(we can't detect this 2433 * beforehand, so always think of the worst case). 2434 */ 2435 if (rec.r_refcount) { 2436 recs_add += 2; 2437 /* Check whether we need a split at the beginning. */ 2438 if (cpos == start_cpos && 2439 cpos != le64_to_cpu(rec.r_cpos)) 2440 recs_add++; 2441 2442 /* Check whether we need a split in the end. */ 2443 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + 2444 le32_to_cpu(rec.r_clusters)) 2445 recs_add++; 2446 } else 2447 recs_add++; 2448 2449 brelse(ref_leaf_bh); 2450 ref_leaf_bh = NULL; 2451 clusters -= len; 2452 cpos += len; 2453 } 2454 2455 if (prev_bh) { 2456 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; 2457 2458 if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > 2459 le16_to_cpu(rb->rf_records.rl_count)) 2460 ref_blocks++; 2461 2462 *credits += 1; 2463 } 2464 2465 if (!ref_blocks) 2466 goto out; 2467 2468 *meta_add += ref_blocks; 2469 *credits += ref_blocks; 2470 2471 /* 2472 * So we may need ref_blocks to insert into the tree. 2473 * That also means we need to change the b-tree and add that number 2474 * of records since we never merge them. 2475 * We need one more block for expansion since the new created leaf 2476 * block is also full and needs split. 2477 */ 2478 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2479 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { 2480 struct ocfs2_extent_tree et; 2481 2482 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2483 *meta_add += ocfs2_extend_meta_needed(et.et_root_el); 2484 *credits += ocfs2_calc_extend_credits(sb, 2485 et.et_root_el); 2486 } else { 2487 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 2488 *meta_add += 1; 2489 } 2490 2491 out: 2492 2493 trace_ocfs2_calc_refcount_meta_credits( 2494 (unsigned long long)start_cpos, clusters, 2495 *meta_add, *credits); 2496 brelse(ref_leaf_bh); 2497 brelse(prev_bh); 2498 return ret; 2499 } 2500 2501 /* 2502 * For refcount tree, we will decrease some contiguous clusters 2503 * refcount count, so just go through it to see how many blocks 2504 * we gonna touch and whether we need to create new blocks. 2505 * 2506 * Normally the refcount blocks store these refcount should be 2507 * contiguous also, so that we can get the number easily. 2508 * We will at most add split 2 refcount records and 2 more 2509 * refcount blocks, so just check it in a rough way. 2510 * 2511 * Caller must hold refcount tree lock. 2512 */ 2513 int ocfs2_prepare_refcount_change_for_del(struct inode *inode, 2514 u64 refcount_loc, 2515 u64 phys_blkno, 2516 u32 clusters, 2517 int *credits, 2518 int *ref_blocks) 2519 { 2520 int ret; 2521 struct buffer_head *ref_root_bh = NULL; 2522 struct ocfs2_refcount_tree *tree; 2523 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); 2524 2525 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2526 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2527 inode->i_ino); 2528 goto out; 2529 } 2530 2531 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2532 2533 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), 2534 refcount_loc, &tree); 2535 if (ret) { 2536 mlog_errno(ret); 2537 goto out; 2538 } 2539 2540 ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc, 2541 &ref_root_bh); 2542 if (ret) { 2543 mlog_errno(ret); 2544 goto out; 2545 } 2546 2547 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 2548 &tree->rf_ci, 2549 ref_root_bh, 2550 start_cpos, clusters, 2551 ref_blocks, credits); 2552 if (ret) { 2553 mlog_errno(ret); 2554 goto out; 2555 } 2556 2557 trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); 2558 2559 out: 2560 brelse(ref_root_bh); 2561 return ret; 2562 } 2563 2564 #define MAX_CONTIG_BYTES 1048576 2565 2566 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) 2567 { 2568 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); 2569 } 2570 2571 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) 2572 { 2573 return ~(ocfs2_cow_contig_clusters(sb) - 1); 2574 } 2575 2576 /* 2577 * Given an extent that starts at 'start' and an I/O that starts at 'cpos', 2578 * find an offset (start + (n * contig_clusters)) that is closest to cpos 2579 * while still being less than or equal to it. 2580 * 2581 * The goal is to break the extent at a multiple of contig_clusters. 2582 */ 2583 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, 2584 unsigned int start, 2585 unsigned int cpos) 2586 { 2587 BUG_ON(start > cpos); 2588 2589 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); 2590 } 2591 2592 /* 2593 * Given a cluster count of len, pad it out so that it is a multiple 2594 * of contig_clusters. 2595 */ 2596 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, 2597 unsigned int len) 2598 { 2599 unsigned int padded = 2600 (len + (ocfs2_cow_contig_clusters(sb) - 1)) & 2601 ocfs2_cow_contig_mask(sb); 2602 2603 /* Did we wrap? */ 2604 if (padded < len) 2605 padded = UINT_MAX; 2606 2607 return padded; 2608 } 2609 2610 /* 2611 * Calculate out the start and number of virtual clusters we need to CoW. 2612 * 2613 * cpos is vitual start cluster position we want to do CoW in a 2614 * file and write_len is the cluster length. 2615 * max_cpos is the place where we want to stop CoW intentionally. 2616 * 2617 * Normal we will start CoW from the beginning of extent record cotaining cpos. 2618 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we 2619 * get good I/O from the resulting extent tree. 2620 */ 2621 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, 2622 struct ocfs2_extent_list *el, 2623 u32 cpos, 2624 u32 write_len, 2625 u32 max_cpos, 2626 u32 *cow_start, 2627 u32 *cow_len) 2628 { 2629 int ret = 0; 2630 int tree_height = le16_to_cpu(el->l_tree_depth), i; 2631 struct buffer_head *eb_bh = NULL; 2632 struct ocfs2_extent_block *eb = NULL; 2633 struct ocfs2_extent_rec *rec; 2634 unsigned int want_clusters, rec_end = 0; 2635 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); 2636 int leaf_clusters; 2637 2638 BUG_ON(cpos + write_len > max_cpos); 2639 2640 if (tree_height > 0) { 2641 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); 2642 if (ret) { 2643 mlog_errno(ret); 2644 goto out; 2645 } 2646 2647 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2648 el = &eb->h_list; 2649 2650 if (el->l_tree_depth) { 2651 ret = ocfs2_error(inode->i_sb, 2652 "Inode %lu has non zero tree depth in leaf block %llu\n", 2653 inode->i_ino, 2654 (unsigned long long)eb_bh->b_blocknr); 2655 goto out; 2656 } 2657 } 2658 2659 *cow_len = 0; 2660 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 2661 rec = &el->l_recs[i]; 2662 2663 if (ocfs2_is_empty_extent(rec)) { 2664 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " 2665 "index %d\n", inode->i_ino, i); 2666 continue; 2667 } 2668 2669 if (le32_to_cpu(rec->e_cpos) + 2670 le16_to_cpu(rec->e_leaf_clusters) <= cpos) 2671 continue; 2672 2673 if (*cow_len == 0) { 2674 /* 2675 * We should find a refcounted record in the 2676 * first pass. 2677 */ 2678 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); 2679 *cow_start = le32_to_cpu(rec->e_cpos); 2680 } 2681 2682 /* 2683 * If we encounter a hole, a non-refcounted record or 2684 * pass the max_cpos, stop the search. 2685 */ 2686 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || 2687 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || 2688 (max_cpos <= le32_to_cpu(rec->e_cpos))) 2689 break; 2690 2691 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); 2692 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; 2693 if (rec_end > max_cpos) { 2694 rec_end = max_cpos; 2695 leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); 2696 } 2697 2698 /* 2699 * How many clusters do we actually need from 2700 * this extent? First we see how many we actually 2701 * need to complete the write. If that's smaller 2702 * than contig_clusters, we try for contig_clusters. 2703 */ 2704 if (!*cow_len) 2705 want_clusters = write_len; 2706 else 2707 want_clusters = (cpos + write_len) - 2708 (*cow_start + *cow_len); 2709 if (want_clusters < contig_clusters) 2710 want_clusters = contig_clusters; 2711 2712 /* 2713 * If the write does not cover the whole extent, we 2714 * need to calculate how we're going to split the extent. 2715 * We try to do it on contig_clusters boundaries. 2716 * 2717 * Any extent smaller than contig_clusters will be 2718 * CoWed in its entirety. 2719 */ 2720 if (leaf_clusters <= contig_clusters) 2721 *cow_len += leaf_clusters; 2722 else if (*cow_len || (*cow_start == cpos)) { 2723 /* 2724 * This extent needs to be CoW'd from its 2725 * beginning, so all we have to do is compute 2726 * how many clusters to grab. We align 2727 * want_clusters to the edge of contig_clusters 2728 * to get better I/O. 2729 */ 2730 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2731 want_clusters); 2732 2733 if (leaf_clusters < want_clusters) 2734 *cow_len += leaf_clusters; 2735 else 2736 *cow_len += want_clusters; 2737 } else if ((*cow_start + contig_clusters) >= 2738 (cpos + write_len)) { 2739 /* 2740 * Breaking off contig_clusters at the front 2741 * of the extent will cover our write. That's 2742 * easy. 2743 */ 2744 *cow_len = contig_clusters; 2745 } else if ((rec_end - cpos) <= contig_clusters) { 2746 /* 2747 * Breaking off contig_clusters at the tail of 2748 * this extent will cover cpos. 2749 */ 2750 *cow_start = rec_end - contig_clusters; 2751 *cow_len = contig_clusters; 2752 } else if ((rec_end - cpos) <= want_clusters) { 2753 /* 2754 * While we can't fit the entire write in this 2755 * extent, we know that the write goes from cpos 2756 * to the end of the extent. Break that off. 2757 * We try to break it at some multiple of 2758 * contig_clusters from the front of the extent. 2759 * Failing that (ie, cpos is within 2760 * contig_clusters of the front), we'll CoW the 2761 * entire extent. 2762 */ 2763 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2764 *cow_start, cpos); 2765 *cow_len = rec_end - *cow_start; 2766 } else { 2767 /* 2768 * Ok, the entire write lives in the middle of 2769 * this extent. Let's try to slice the extent up 2770 * nicely. Optimally, our CoW region starts at 2771 * m*contig_clusters from the beginning of the 2772 * extent and goes for n*contig_clusters, 2773 * covering the entire write. 2774 */ 2775 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2776 *cow_start, cpos); 2777 2778 want_clusters = (cpos + write_len) - *cow_start; 2779 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2780 want_clusters); 2781 if (*cow_start + want_clusters <= rec_end) 2782 *cow_len = want_clusters; 2783 else 2784 *cow_len = rec_end - *cow_start; 2785 } 2786 2787 /* Have we covered our entire write yet? */ 2788 if ((*cow_start + *cow_len) >= (cpos + write_len)) 2789 break; 2790 2791 /* 2792 * If we reach the end of the extent block and don't get enough 2793 * clusters, continue with the next extent block if possible. 2794 */ 2795 if (i + 1 == le16_to_cpu(el->l_next_free_rec) && 2796 eb && eb->h_next_leaf_blk) { 2797 brelse(eb_bh); 2798 eb_bh = NULL; 2799 2800 ret = ocfs2_read_extent_block(INODE_CACHE(inode), 2801 le64_to_cpu(eb->h_next_leaf_blk), 2802 &eb_bh); 2803 if (ret) { 2804 mlog_errno(ret); 2805 goto out; 2806 } 2807 2808 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2809 el = &eb->h_list; 2810 i = -1; 2811 } 2812 } 2813 2814 out: 2815 brelse(eb_bh); 2816 return ret; 2817 } 2818 2819 /* 2820 * Prepare meta_ac, data_ac and calculate credits when we want to add some 2821 * num_clusters in data_tree "et" and change the refcount for the old 2822 * clusters(starting form p_cluster) in the refcount tree. 2823 * 2824 * Note: 2825 * 1. since we may split the old tree, so we at most will need num_clusters + 2 2826 * more new leaf records. 2827 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so 2828 * just give data_ac = NULL. 2829 */ 2830 static int ocfs2_lock_refcount_allocators(struct super_block *sb, 2831 u32 p_cluster, u32 num_clusters, 2832 struct ocfs2_extent_tree *et, 2833 struct ocfs2_caching_info *ref_ci, 2834 struct buffer_head *ref_root_bh, 2835 struct ocfs2_alloc_context **meta_ac, 2836 struct ocfs2_alloc_context **data_ac, 2837 int *credits) 2838 { 2839 int ret = 0, meta_add = 0; 2840 int num_free_extents = ocfs2_num_free_extents(et); 2841 2842 if (num_free_extents < 0) { 2843 ret = num_free_extents; 2844 mlog_errno(ret); 2845 goto out; 2846 } 2847 2848 if (num_free_extents < num_clusters + 2) 2849 meta_add = 2850 ocfs2_extend_meta_needed(et->et_root_el); 2851 2852 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el); 2853 2854 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, 2855 p_cluster, num_clusters, 2856 &meta_add, credits); 2857 if (ret) { 2858 mlog_errno(ret); 2859 goto out; 2860 } 2861 2862 trace_ocfs2_lock_refcount_allocators(meta_add, *credits); 2863 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2864 meta_ac); 2865 if (ret) { 2866 mlog_errno(ret); 2867 goto out; 2868 } 2869 2870 if (data_ac) { 2871 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, 2872 data_ac); 2873 if (ret) 2874 mlog_errno(ret); 2875 } 2876 2877 out: 2878 if (ret) { 2879 if (*meta_ac) { 2880 ocfs2_free_alloc_context(*meta_ac); 2881 *meta_ac = NULL; 2882 } 2883 } 2884 2885 return ret; 2886 } 2887 2888 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) 2889 { 2890 BUG_ON(buffer_dirty(bh)); 2891 2892 clear_buffer_mapped(bh); 2893 2894 return 0; 2895 } 2896 2897 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2898 struct inode *inode, 2899 u32 cpos, u32 old_cluster, 2900 u32 new_cluster, u32 new_len) 2901 { 2902 int ret = 0, partial; 2903 struct super_block *sb = inode->i_sb; 2904 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2905 struct page *page; 2906 pgoff_t page_index; 2907 unsigned int from, to; 2908 loff_t offset, end, map_end; 2909 struct address_space *mapping = inode->i_mapping; 2910 2911 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2912 new_cluster, new_len); 2913 2914 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 2915 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); 2916 /* 2917 * We only duplicate pages until we reach the page contains i_size - 1. 2918 * So trim 'end' to i_size. 2919 */ 2920 if (end > i_size_read(inode)) 2921 end = i_size_read(inode); 2922 2923 while (offset < end) { 2924 page_index = offset >> PAGE_SHIFT; 2925 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 2926 if (map_end > end) 2927 map_end = end; 2928 2929 /* from, to is the offset within the page. */ 2930 from = offset & (PAGE_SIZE - 1); 2931 to = PAGE_SIZE; 2932 if (map_end & (PAGE_SIZE - 1)) 2933 to = map_end & (PAGE_SIZE - 1); 2934 2935 retry: 2936 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2937 if (!page) { 2938 ret = -ENOMEM; 2939 mlog_errno(ret); 2940 break; 2941 } 2942 2943 /* 2944 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty 2945 * page, so write it back. 2946 */ 2947 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { 2948 if (PageDirty(page)) { 2949 unlock_page(page); 2950 put_page(page); 2951 2952 ret = filemap_write_and_wait_range(mapping, 2953 offset, map_end - 1); 2954 goto retry; 2955 } 2956 } 2957 2958 if (!PageUptodate(page)) { 2959 struct folio *folio = page_folio(page); 2960 2961 ret = block_read_full_folio(folio, ocfs2_get_block); 2962 if (ret) { 2963 mlog_errno(ret); 2964 goto unlock; 2965 } 2966 folio_lock(folio); 2967 } 2968 2969 if (page_has_buffers(page)) { 2970 ret = walk_page_buffers(handle, page_buffers(page), 2971 from, to, &partial, 2972 ocfs2_clear_cow_buffer); 2973 if (ret) { 2974 mlog_errno(ret); 2975 goto unlock; 2976 } 2977 } 2978 2979 ocfs2_map_and_dirty_page(inode, 2980 handle, from, to, 2981 page, 0, &new_block); 2982 mark_page_accessed(page); 2983 unlock: 2984 unlock_page(page); 2985 put_page(page); 2986 page = NULL; 2987 offset = map_end; 2988 if (ret) 2989 break; 2990 } 2991 2992 return ret; 2993 } 2994 2995 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 2996 struct inode *inode, 2997 u32 cpos, u32 old_cluster, 2998 u32 new_cluster, u32 new_len) 2999 { 3000 int ret = 0; 3001 struct super_block *sb = inode->i_sb; 3002 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3003 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3004 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); 3005 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 3006 struct ocfs2_super *osb = OCFS2_SB(sb); 3007 struct buffer_head *old_bh = NULL; 3008 struct buffer_head *new_bh = NULL; 3009 3010 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 3011 new_cluster, new_len); 3012 3013 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3014 new_bh = sb_getblk(osb->sb, new_block); 3015 if (new_bh == NULL) { 3016 ret = -ENOMEM; 3017 mlog_errno(ret); 3018 break; 3019 } 3020 3021 ocfs2_set_new_buffer_uptodate(ci, new_bh); 3022 3023 ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); 3024 if (ret) { 3025 mlog_errno(ret); 3026 break; 3027 } 3028 3029 ret = ocfs2_journal_access(handle, ci, new_bh, 3030 OCFS2_JOURNAL_ACCESS_CREATE); 3031 if (ret) { 3032 mlog_errno(ret); 3033 break; 3034 } 3035 3036 memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); 3037 ocfs2_journal_dirty(handle, new_bh); 3038 3039 brelse(new_bh); 3040 brelse(old_bh); 3041 new_bh = NULL; 3042 old_bh = NULL; 3043 } 3044 3045 brelse(new_bh); 3046 brelse(old_bh); 3047 return ret; 3048 } 3049 3050 static int ocfs2_clear_ext_refcount(handle_t *handle, 3051 struct ocfs2_extent_tree *et, 3052 u32 cpos, u32 p_cluster, u32 len, 3053 unsigned int ext_flags, 3054 struct ocfs2_alloc_context *meta_ac, 3055 struct ocfs2_cached_dealloc_ctxt *dealloc) 3056 { 3057 int ret, index; 3058 struct ocfs2_extent_rec replace_rec; 3059 struct ocfs2_path *path = NULL; 3060 struct ocfs2_extent_list *el; 3061 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3062 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3063 3064 trace_ocfs2_clear_ext_refcount((unsigned long long)ino, 3065 cpos, len, p_cluster, ext_flags); 3066 3067 memset(&replace_rec, 0, sizeof(replace_rec)); 3068 replace_rec.e_cpos = cpu_to_le32(cpos); 3069 replace_rec.e_leaf_clusters = cpu_to_le16(len); 3070 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, 3071 p_cluster)); 3072 replace_rec.e_flags = ext_flags; 3073 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; 3074 3075 path = ocfs2_new_path_from_et(et); 3076 if (!path) { 3077 ret = -ENOMEM; 3078 mlog_errno(ret); 3079 goto out; 3080 } 3081 3082 ret = ocfs2_find_path(et->et_ci, path, cpos); 3083 if (ret) { 3084 mlog_errno(ret); 3085 goto out; 3086 } 3087 3088 el = path_leaf_el(path); 3089 3090 index = ocfs2_search_extent_list(el, cpos); 3091 if (index == -1) { 3092 ret = ocfs2_error(sb, 3093 "Inode %llu has an extent at cpos %u which can no longer be found\n", 3094 (unsigned long long)ino, cpos); 3095 goto out; 3096 } 3097 3098 ret = ocfs2_split_extent(handle, et, path, index, 3099 &replace_rec, meta_ac, dealloc); 3100 if (ret) 3101 mlog_errno(ret); 3102 3103 out: 3104 ocfs2_free_path(path); 3105 return ret; 3106 } 3107 3108 static int ocfs2_replace_clusters(handle_t *handle, 3109 struct ocfs2_cow_context *context, 3110 u32 cpos, u32 old, 3111 u32 new, u32 len, 3112 unsigned int ext_flags) 3113 { 3114 int ret; 3115 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3116 u64 ino = ocfs2_metadata_cache_owner(ci); 3117 3118 trace_ocfs2_replace_clusters((unsigned long long)ino, 3119 cpos, old, new, len, ext_flags); 3120 3121 /*If the old clusters is unwritten, no need to duplicate. */ 3122 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3123 ret = context->cow_duplicate_clusters(handle, context->inode, 3124 cpos, old, new, len); 3125 if (ret) { 3126 mlog_errno(ret); 3127 goto out; 3128 } 3129 } 3130 3131 ret = ocfs2_clear_ext_refcount(handle, &context->data_et, 3132 cpos, new, len, ext_flags, 3133 context->meta_ac, &context->dealloc); 3134 if (ret) 3135 mlog_errno(ret); 3136 out: 3137 return ret; 3138 } 3139 3140 int ocfs2_cow_sync_writeback(struct super_block *sb, 3141 struct inode *inode, 3142 u32 cpos, u32 num_clusters) 3143 { 3144 int ret; 3145 loff_t start, end; 3146 3147 if (ocfs2_should_order_data(inode)) 3148 return 0; 3149 3150 start = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 3151 end = start + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits) - 1; 3152 3153 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 3154 if (ret < 0) 3155 mlog_errno(ret); 3156 3157 return ret; 3158 } 3159 3160 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, 3161 u32 v_cluster, u32 *p_cluster, 3162 u32 *num_clusters, 3163 unsigned int *extent_flags) 3164 { 3165 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, 3166 num_clusters, extent_flags); 3167 } 3168 3169 static int ocfs2_make_clusters_writable(struct super_block *sb, 3170 struct ocfs2_cow_context *context, 3171 u32 cpos, u32 p_cluster, 3172 u32 num_clusters, unsigned int e_flags) 3173 { 3174 int ret, delete, index, credits = 0; 3175 u32 new_bit, new_len, orig_num_clusters; 3176 unsigned int set_len; 3177 struct ocfs2_super *osb = OCFS2_SB(sb); 3178 handle_t *handle; 3179 struct buffer_head *ref_leaf_bh = NULL; 3180 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3181 struct ocfs2_refcount_rec rec; 3182 3183 trace_ocfs2_make_clusters_writable(cpos, p_cluster, 3184 num_clusters, e_flags); 3185 3186 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3187 &context->data_et, 3188 ref_ci, 3189 context->ref_root_bh, 3190 &context->meta_ac, 3191 &context->data_ac, &credits); 3192 if (ret) { 3193 mlog_errno(ret); 3194 return ret; 3195 } 3196 3197 if (context->post_refcount) 3198 credits += context->post_refcount->credits; 3199 3200 credits += context->extra_credits; 3201 handle = ocfs2_start_trans(osb, credits); 3202 if (IS_ERR(handle)) { 3203 ret = PTR_ERR(handle); 3204 mlog_errno(ret); 3205 goto out; 3206 } 3207 3208 orig_num_clusters = num_clusters; 3209 3210 while (num_clusters) { 3211 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3212 p_cluster, num_clusters, 3213 &rec, &index, &ref_leaf_bh); 3214 if (ret) { 3215 mlog_errno(ret); 3216 goto out_commit; 3217 } 3218 3219 BUG_ON(!rec.r_refcount); 3220 set_len = min((u64)p_cluster + num_clusters, 3221 le64_to_cpu(rec.r_cpos) + 3222 le32_to_cpu(rec.r_clusters)) - p_cluster; 3223 3224 /* 3225 * There are many different situation here. 3226 * 1. If refcount == 1, remove the flag and don't COW. 3227 * 2. If refcount > 1, allocate clusters. 3228 * Here we may not allocate r_len once at a time, so continue 3229 * until we reach num_clusters. 3230 */ 3231 if (le32_to_cpu(rec.r_refcount) == 1) { 3232 delete = 0; 3233 ret = ocfs2_clear_ext_refcount(handle, 3234 &context->data_et, 3235 cpos, p_cluster, 3236 set_len, e_flags, 3237 context->meta_ac, 3238 &context->dealloc); 3239 if (ret) { 3240 mlog_errno(ret); 3241 goto out_commit; 3242 } 3243 } else { 3244 delete = 1; 3245 3246 ret = __ocfs2_claim_clusters(handle, 3247 context->data_ac, 3248 1, set_len, 3249 &new_bit, &new_len); 3250 if (ret) { 3251 mlog_errno(ret); 3252 goto out_commit; 3253 } 3254 3255 ret = ocfs2_replace_clusters(handle, context, 3256 cpos, p_cluster, new_bit, 3257 new_len, e_flags); 3258 if (ret) { 3259 mlog_errno(ret); 3260 goto out_commit; 3261 } 3262 set_len = new_len; 3263 } 3264 3265 ret = __ocfs2_decrease_refcount(handle, ref_ci, 3266 context->ref_root_bh, 3267 p_cluster, set_len, 3268 context->meta_ac, 3269 &context->dealloc, delete); 3270 if (ret) { 3271 mlog_errno(ret); 3272 goto out_commit; 3273 } 3274 3275 cpos += set_len; 3276 p_cluster += set_len; 3277 num_clusters -= set_len; 3278 brelse(ref_leaf_bh); 3279 ref_leaf_bh = NULL; 3280 } 3281 3282 /* handle any post_cow action. */ 3283 if (context->post_refcount && context->post_refcount->func) { 3284 ret = context->post_refcount->func(context->inode, handle, 3285 context->post_refcount->para); 3286 if (ret) { 3287 mlog_errno(ret); 3288 goto out_commit; 3289 } 3290 } 3291 3292 /* 3293 * Here we should write the new page out first if we are 3294 * in write-back mode. 3295 */ 3296 if (context->get_clusters == ocfs2_di_get_clusters) { 3297 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos, 3298 orig_num_clusters); 3299 if (ret) 3300 mlog_errno(ret); 3301 } 3302 3303 out_commit: 3304 ocfs2_commit_trans(osb, handle); 3305 3306 out: 3307 if (context->data_ac) { 3308 ocfs2_free_alloc_context(context->data_ac); 3309 context->data_ac = NULL; 3310 } 3311 if (context->meta_ac) { 3312 ocfs2_free_alloc_context(context->meta_ac); 3313 context->meta_ac = NULL; 3314 } 3315 brelse(ref_leaf_bh); 3316 3317 return ret; 3318 } 3319 3320 static int ocfs2_replace_cow(struct ocfs2_cow_context *context) 3321 { 3322 int ret = 0; 3323 struct inode *inode = context->inode; 3324 u32 cow_start = context->cow_start, cow_len = context->cow_len; 3325 u32 p_cluster, num_clusters; 3326 unsigned int ext_flags; 3327 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3328 3329 if (!ocfs2_refcount_tree(osb)) { 3330 return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 3331 inode->i_ino); 3332 } 3333 3334 ocfs2_init_dealloc_ctxt(&context->dealloc); 3335 3336 while (cow_len) { 3337 ret = context->get_clusters(context, cow_start, &p_cluster, 3338 &num_clusters, &ext_flags); 3339 if (ret) { 3340 mlog_errno(ret); 3341 break; 3342 } 3343 3344 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); 3345 3346 if (cow_len < num_clusters) 3347 num_clusters = cow_len; 3348 3349 ret = ocfs2_make_clusters_writable(inode->i_sb, context, 3350 cow_start, p_cluster, 3351 num_clusters, ext_flags); 3352 if (ret) { 3353 mlog_errno(ret); 3354 break; 3355 } 3356 3357 cow_len -= num_clusters; 3358 cow_start += num_clusters; 3359 } 3360 3361 if (ocfs2_dealloc_has_cluster(&context->dealloc)) { 3362 ocfs2_schedule_truncate_log_flush(osb, 1); 3363 ocfs2_run_deallocs(osb, &context->dealloc); 3364 } 3365 3366 return ret; 3367 } 3368 3369 /* 3370 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3371 * past max_cpos. This will stop when it runs into a hole or an 3372 * unrefcounted extent. 3373 */ 3374 static int ocfs2_refcount_cow_hunk(struct inode *inode, 3375 struct buffer_head *di_bh, 3376 u32 cpos, u32 write_len, u32 max_cpos) 3377 { 3378 int ret; 3379 u32 cow_start = 0, cow_len = 0; 3380 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3381 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3382 struct buffer_head *ref_root_bh = NULL; 3383 struct ocfs2_refcount_tree *ref_tree; 3384 struct ocfs2_cow_context *context = NULL; 3385 3386 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3387 3388 ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, 3389 cpos, write_len, max_cpos, 3390 &cow_start, &cow_len); 3391 if (ret) { 3392 mlog_errno(ret); 3393 goto out; 3394 } 3395 3396 trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, 3397 cpos, write_len, max_cpos, 3398 cow_start, cow_len); 3399 3400 BUG_ON(cow_len == 0); 3401 3402 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3403 if (!context) { 3404 ret = -ENOMEM; 3405 mlog_errno(ret); 3406 goto out; 3407 } 3408 3409 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 3410 1, &ref_tree, &ref_root_bh); 3411 if (ret) { 3412 mlog_errno(ret); 3413 goto out; 3414 } 3415 3416 context->inode = inode; 3417 context->cow_start = cow_start; 3418 context->cow_len = cow_len; 3419 context->ref_tree = ref_tree; 3420 context->ref_root_bh = ref_root_bh; 3421 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3422 context->get_clusters = ocfs2_di_get_clusters; 3423 3424 ocfs2_init_dinode_extent_tree(&context->data_et, 3425 INODE_CACHE(inode), di_bh); 3426 3427 ret = ocfs2_replace_cow(context); 3428 if (ret) 3429 mlog_errno(ret); 3430 3431 /* 3432 * truncate the extent map here since no matter whether we meet with 3433 * any error during the action, we shouldn't trust cached extent map 3434 * any more. 3435 */ 3436 ocfs2_extent_map_trunc(inode, cow_start); 3437 3438 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3439 brelse(ref_root_bh); 3440 out: 3441 kfree(context); 3442 return ret; 3443 } 3444 3445 /* 3446 * CoW any and all clusters between cpos and cpos+write_len. 3447 * Don't CoW past max_cpos. If this returns successfully, all 3448 * clusters between cpos and cpos+write_len are safe to modify. 3449 */ 3450 int ocfs2_refcount_cow(struct inode *inode, 3451 struct buffer_head *di_bh, 3452 u32 cpos, u32 write_len, u32 max_cpos) 3453 { 3454 int ret = 0; 3455 u32 p_cluster, num_clusters; 3456 unsigned int ext_flags; 3457 3458 while (write_len) { 3459 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3460 &num_clusters, &ext_flags); 3461 if (ret) { 3462 mlog_errno(ret); 3463 break; 3464 } 3465 3466 if (write_len < num_clusters) 3467 num_clusters = write_len; 3468 3469 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3470 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, 3471 num_clusters, max_cpos); 3472 if (ret) { 3473 mlog_errno(ret); 3474 break; 3475 } 3476 } 3477 3478 write_len -= num_clusters; 3479 cpos += num_clusters; 3480 } 3481 3482 return ret; 3483 } 3484 3485 static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, 3486 u32 v_cluster, u32 *p_cluster, 3487 u32 *num_clusters, 3488 unsigned int *extent_flags) 3489 { 3490 struct inode *inode = context->inode; 3491 struct ocfs2_xattr_value_root *xv = context->cow_object; 3492 3493 return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, 3494 num_clusters, &xv->xr_list, 3495 extent_flags); 3496 } 3497 3498 /* 3499 * Given a xattr value root, calculate the most meta/credits we need for 3500 * refcount tree change if we truncate it to 0. 3501 */ 3502 int ocfs2_refcounted_xattr_delete_need(struct inode *inode, 3503 struct ocfs2_caching_info *ref_ci, 3504 struct buffer_head *ref_root_bh, 3505 struct ocfs2_xattr_value_root *xv, 3506 int *meta_add, int *credits) 3507 { 3508 int ret = 0, index, ref_blocks = 0; 3509 u32 p_cluster, num_clusters; 3510 u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); 3511 struct ocfs2_refcount_block *rb; 3512 struct ocfs2_refcount_rec rec; 3513 struct buffer_head *ref_leaf_bh = NULL; 3514 3515 while (cpos < clusters) { 3516 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, 3517 &num_clusters, &xv->xr_list, 3518 NULL); 3519 if (ret) { 3520 mlog_errno(ret); 3521 goto out; 3522 } 3523 3524 cpos += num_clusters; 3525 3526 while (num_clusters) { 3527 ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, 3528 p_cluster, num_clusters, 3529 &rec, &index, 3530 &ref_leaf_bh); 3531 if (ret) { 3532 mlog_errno(ret); 3533 goto out; 3534 } 3535 3536 BUG_ON(!rec.r_refcount); 3537 3538 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 3539 3540 /* 3541 * We really don't know whether the other clusters is in 3542 * this refcount block or not, so just take the worst 3543 * case that all the clusters are in this block and each 3544 * one will split a refcount rec, so totally we need 3545 * clusters * 2 new refcount rec. 3546 */ 3547 if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3548 le16_to_cpu(rb->rf_records.rl_count)) 3549 ref_blocks++; 3550 3551 *credits += 1; 3552 brelse(ref_leaf_bh); 3553 ref_leaf_bh = NULL; 3554 3555 if (num_clusters <= le32_to_cpu(rec.r_clusters)) 3556 break; 3557 else 3558 num_clusters -= le32_to_cpu(rec.r_clusters); 3559 p_cluster += num_clusters; 3560 } 3561 } 3562 3563 *meta_add += ref_blocks; 3564 if (!ref_blocks) 3565 goto out; 3566 3567 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 3568 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) 3569 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 3570 else { 3571 struct ocfs2_extent_tree et; 3572 3573 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); 3574 *credits += ocfs2_calc_extend_credits(inode->i_sb, 3575 et.et_root_el); 3576 } 3577 3578 out: 3579 brelse(ref_leaf_bh); 3580 return ret; 3581 } 3582 3583 /* 3584 * Do CoW for xattr. 3585 */ 3586 int ocfs2_refcount_cow_xattr(struct inode *inode, 3587 struct ocfs2_dinode *di, 3588 struct ocfs2_xattr_value_buf *vb, 3589 struct ocfs2_refcount_tree *ref_tree, 3590 struct buffer_head *ref_root_bh, 3591 u32 cpos, u32 write_len, 3592 struct ocfs2_post_refcount *post) 3593 { 3594 int ret; 3595 struct ocfs2_xattr_value_root *xv = vb->vb_xv; 3596 struct ocfs2_cow_context *context = NULL; 3597 u32 cow_start, cow_len; 3598 3599 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3600 3601 ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, 3602 cpos, write_len, UINT_MAX, 3603 &cow_start, &cow_len); 3604 if (ret) { 3605 mlog_errno(ret); 3606 goto out; 3607 } 3608 3609 BUG_ON(cow_len == 0); 3610 3611 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3612 if (!context) { 3613 ret = -ENOMEM; 3614 mlog_errno(ret); 3615 goto out; 3616 } 3617 3618 context->inode = inode; 3619 context->cow_start = cow_start; 3620 context->cow_len = cow_len; 3621 context->ref_tree = ref_tree; 3622 context->ref_root_bh = ref_root_bh; 3623 context->cow_object = xv; 3624 3625 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; 3626 /* We need the extra credits for duplicate_clusters by jbd. */ 3627 context->extra_credits = 3628 ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; 3629 context->get_clusters = ocfs2_xattr_value_get_clusters; 3630 context->post_refcount = post; 3631 3632 ocfs2_init_xattr_value_extent_tree(&context->data_et, 3633 INODE_CACHE(inode), vb); 3634 3635 ret = ocfs2_replace_cow(context); 3636 if (ret) 3637 mlog_errno(ret); 3638 3639 out: 3640 kfree(context); 3641 return ret; 3642 } 3643 3644 /* 3645 * Insert a new extent into refcount tree and mark a extent rec 3646 * as refcounted in the dinode tree. 3647 */ 3648 int ocfs2_add_refcount_flag(struct inode *inode, 3649 struct ocfs2_extent_tree *data_et, 3650 struct ocfs2_caching_info *ref_ci, 3651 struct buffer_head *ref_root_bh, 3652 u32 cpos, u32 p_cluster, u32 num_clusters, 3653 struct ocfs2_cached_dealloc_ctxt *dealloc, 3654 struct ocfs2_post_refcount *post) 3655 { 3656 int ret; 3657 handle_t *handle; 3658 int credits = 1, ref_blocks = 0; 3659 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3660 struct ocfs2_alloc_context *meta_ac = NULL; 3661 3662 /* We need to be able to handle at least an extent tree split. */ 3663 ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el); 3664 3665 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 3666 ref_ci, ref_root_bh, 3667 p_cluster, num_clusters, 3668 &ref_blocks, &credits); 3669 if (ret) { 3670 mlog_errno(ret); 3671 goto out; 3672 } 3673 3674 trace_ocfs2_add_refcount_flag(ref_blocks, credits); 3675 3676 if (ref_blocks) { 3677 ret = ocfs2_reserve_new_metadata_blocks(osb, 3678 ref_blocks, &meta_ac); 3679 if (ret) { 3680 mlog_errno(ret); 3681 goto out; 3682 } 3683 } 3684 3685 if (post) 3686 credits += post->credits; 3687 3688 handle = ocfs2_start_trans(osb, credits); 3689 if (IS_ERR(handle)) { 3690 ret = PTR_ERR(handle); 3691 mlog_errno(ret); 3692 goto out; 3693 } 3694 3695 ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, 3696 cpos, num_clusters, p_cluster, 3697 meta_ac, dealloc); 3698 if (ret) { 3699 mlog_errno(ret); 3700 goto out_commit; 3701 } 3702 3703 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3704 p_cluster, num_clusters, 0, 3705 meta_ac, dealloc); 3706 if (ret) { 3707 mlog_errno(ret); 3708 goto out_commit; 3709 } 3710 3711 if (post && post->func) { 3712 ret = post->func(inode, handle, post->para); 3713 if (ret) 3714 mlog_errno(ret); 3715 } 3716 3717 out_commit: 3718 ocfs2_commit_trans(osb, handle); 3719 out: 3720 if (meta_ac) 3721 ocfs2_free_alloc_context(meta_ac); 3722 return ret; 3723 } 3724 3725 static int ocfs2_change_ctime(struct inode *inode, 3726 struct buffer_head *di_bh) 3727 { 3728 int ret; 3729 handle_t *handle; 3730 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3731 3732 handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), 3733 OCFS2_INODE_UPDATE_CREDITS); 3734 if (IS_ERR(handle)) { 3735 ret = PTR_ERR(handle); 3736 mlog_errno(ret); 3737 goto out; 3738 } 3739 3740 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 3741 OCFS2_JOURNAL_ACCESS_WRITE); 3742 if (ret) { 3743 mlog_errno(ret); 3744 goto out_commit; 3745 } 3746 3747 inode_set_ctime_current(inode); 3748 di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode)); 3749 di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 3750 3751 ocfs2_journal_dirty(handle, di_bh); 3752 3753 out_commit: 3754 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 3755 out: 3756 return ret; 3757 } 3758 3759 static int ocfs2_attach_refcount_tree(struct inode *inode, 3760 struct buffer_head *di_bh) 3761 { 3762 int ret, data_changed = 0; 3763 struct buffer_head *ref_root_bh = NULL; 3764 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3765 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3766 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3767 struct ocfs2_refcount_tree *ref_tree; 3768 unsigned int ext_flags; 3769 loff_t size; 3770 u32 cpos, num_clusters, clusters, p_cluster; 3771 struct ocfs2_cached_dealloc_ctxt dealloc; 3772 struct ocfs2_extent_tree di_et; 3773 3774 ocfs2_init_dealloc_ctxt(&dealloc); 3775 3776 if (!ocfs2_is_refcount_inode(inode)) { 3777 ret = ocfs2_create_refcount_tree(inode, di_bh); 3778 if (ret) { 3779 mlog_errno(ret); 3780 goto out; 3781 } 3782 } 3783 3784 BUG_ON(!di->i_refcount_loc); 3785 ret = ocfs2_lock_refcount_tree(osb, 3786 le64_to_cpu(di->i_refcount_loc), 1, 3787 &ref_tree, &ref_root_bh); 3788 if (ret) { 3789 mlog_errno(ret); 3790 goto out; 3791 } 3792 3793 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 3794 goto attach_xattr; 3795 3796 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3797 3798 size = i_size_read(inode); 3799 clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); 3800 3801 cpos = 0; 3802 while (cpos < clusters) { 3803 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3804 &num_clusters, &ext_flags); 3805 if (ret) { 3806 mlog_errno(ret); 3807 goto unlock; 3808 } 3809 if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { 3810 ret = ocfs2_add_refcount_flag(inode, &di_et, 3811 &ref_tree->rf_ci, 3812 ref_root_bh, cpos, 3813 p_cluster, num_clusters, 3814 &dealloc, NULL); 3815 if (ret) { 3816 mlog_errno(ret); 3817 goto unlock; 3818 } 3819 3820 data_changed = 1; 3821 } 3822 cpos += num_clusters; 3823 } 3824 3825 attach_xattr: 3826 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3827 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3828 &ref_tree->rf_ci, 3829 ref_root_bh, 3830 &dealloc); 3831 if (ret) { 3832 mlog_errno(ret); 3833 goto unlock; 3834 } 3835 } 3836 3837 if (data_changed) { 3838 ret = ocfs2_change_ctime(inode, di_bh); 3839 if (ret) 3840 mlog_errno(ret); 3841 } 3842 3843 unlock: 3844 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3845 brelse(ref_root_bh); 3846 3847 if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { 3848 ocfs2_schedule_truncate_log_flush(osb, 1); 3849 ocfs2_run_deallocs(osb, &dealloc); 3850 } 3851 out: 3852 /* 3853 * Empty the extent map so that we may get the right extent 3854 * record from the disk. 3855 */ 3856 ocfs2_extent_map_trunc(inode, 0); 3857 3858 return ret; 3859 } 3860 3861 static int ocfs2_add_refcounted_extent(struct inode *inode, 3862 struct ocfs2_extent_tree *et, 3863 struct ocfs2_caching_info *ref_ci, 3864 struct buffer_head *ref_root_bh, 3865 u32 cpos, u32 p_cluster, u32 num_clusters, 3866 unsigned int ext_flags, 3867 struct ocfs2_cached_dealloc_ctxt *dealloc) 3868 { 3869 int ret; 3870 handle_t *handle; 3871 int credits = 0; 3872 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3873 struct ocfs2_alloc_context *meta_ac = NULL; 3874 3875 ret = ocfs2_lock_refcount_allocators(inode->i_sb, 3876 p_cluster, num_clusters, 3877 et, ref_ci, 3878 ref_root_bh, &meta_ac, 3879 NULL, &credits); 3880 if (ret) { 3881 mlog_errno(ret); 3882 goto out; 3883 } 3884 3885 handle = ocfs2_start_trans(osb, credits); 3886 if (IS_ERR(handle)) { 3887 ret = PTR_ERR(handle); 3888 mlog_errno(ret); 3889 goto out; 3890 } 3891 3892 ret = ocfs2_insert_extent(handle, et, cpos, 3893 ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), 3894 num_clusters, ext_flags, meta_ac); 3895 if (ret) { 3896 mlog_errno(ret); 3897 goto out_commit; 3898 } 3899 3900 ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3901 p_cluster, num_clusters, 3902 meta_ac, dealloc); 3903 if (ret) { 3904 mlog_errno(ret); 3905 goto out_commit; 3906 } 3907 3908 ret = dquot_alloc_space_nodirty(inode, 3909 ocfs2_clusters_to_bytes(osb->sb, num_clusters)); 3910 if (ret) 3911 mlog_errno(ret); 3912 3913 out_commit: 3914 ocfs2_commit_trans(osb, handle); 3915 out: 3916 if (meta_ac) 3917 ocfs2_free_alloc_context(meta_ac); 3918 return ret; 3919 } 3920 3921 static int ocfs2_duplicate_inline_data(struct inode *s_inode, 3922 struct buffer_head *s_bh, 3923 struct inode *t_inode, 3924 struct buffer_head *t_bh) 3925 { 3926 int ret; 3927 handle_t *handle; 3928 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 3929 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 3930 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data; 3931 3932 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 3933 3934 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 3935 if (IS_ERR(handle)) { 3936 ret = PTR_ERR(handle); 3937 mlog_errno(ret); 3938 goto out; 3939 } 3940 3941 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 3942 OCFS2_JOURNAL_ACCESS_WRITE); 3943 if (ret) { 3944 mlog_errno(ret); 3945 goto out_commit; 3946 } 3947 3948 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count; 3949 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data, 3950 le16_to_cpu(s_di->id2.i_data.id_count)); 3951 spin_lock(&OCFS2_I(t_inode)->ip_lock); 3952 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL; 3953 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features); 3954 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 3955 3956 ocfs2_journal_dirty(handle, t_bh); 3957 3958 out_commit: 3959 ocfs2_commit_trans(osb, handle); 3960 out: 3961 return ret; 3962 } 3963 3964 static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3965 struct inode *t_inode, 3966 struct buffer_head *t_bh, 3967 struct ocfs2_caching_info *ref_ci, 3968 struct buffer_head *ref_root_bh, 3969 struct ocfs2_cached_dealloc_ctxt *dealloc) 3970 { 3971 int ret = 0; 3972 u32 p_cluster, num_clusters, clusters, cpos; 3973 loff_t size; 3974 unsigned int ext_flags; 3975 struct ocfs2_extent_tree et; 3976 3977 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); 3978 3979 size = i_size_read(s_inode); 3980 clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); 3981 3982 cpos = 0; 3983 while (cpos < clusters) { 3984 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, 3985 &num_clusters, &ext_flags); 3986 if (ret) { 3987 mlog_errno(ret); 3988 goto out; 3989 } 3990 if (p_cluster) { 3991 ret = ocfs2_add_refcounted_extent(t_inode, &et, 3992 ref_ci, ref_root_bh, 3993 cpos, p_cluster, 3994 num_clusters, 3995 ext_flags, 3996 dealloc); 3997 if (ret) { 3998 mlog_errno(ret); 3999 goto out; 4000 } 4001 } 4002 4003 cpos += num_clusters; 4004 } 4005 4006 out: 4007 return ret; 4008 } 4009 4010 /* 4011 * change the new file's attributes to the src. 4012 * 4013 * reflink creates a snapshot of a file, that means the attributes 4014 * must be identical except for three exceptions - nlink, ino, and ctime. 4015 */ 4016 static int ocfs2_complete_reflink(struct inode *s_inode, 4017 struct buffer_head *s_bh, 4018 struct inode *t_inode, 4019 struct buffer_head *t_bh, 4020 bool preserve) 4021 { 4022 int ret; 4023 handle_t *handle; 4024 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 4025 struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; 4026 loff_t size = i_size_read(s_inode); 4027 4028 handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), 4029 OCFS2_INODE_UPDATE_CREDITS); 4030 if (IS_ERR(handle)) { 4031 ret = PTR_ERR(handle); 4032 mlog_errno(ret); 4033 return ret; 4034 } 4035 4036 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 4037 OCFS2_JOURNAL_ACCESS_WRITE); 4038 if (ret) { 4039 mlog_errno(ret); 4040 goto out_commit; 4041 } 4042 4043 spin_lock(&OCFS2_I(t_inode)->ip_lock); 4044 OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; 4045 OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; 4046 OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; 4047 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 4048 i_size_write(t_inode, size); 4049 t_inode->i_blocks = s_inode->i_blocks; 4050 4051 di->i_xattr_inline_size = s_di->i_xattr_inline_size; 4052 di->i_clusters = s_di->i_clusters; 4053 di->i_size = s_di->i_size; 4054 di->i_dyn_features = s_di->i_dyn_features; 4055 di->i_attr = s_di->i_attr; 4056 4057 if (preserve) { 4058 t_inode->i_uid = s_inode->i_uid; 4059 t_inode->i_gid = s_inode->i_gid; 4060 t_inode->i_mode = s_inode->i_mode; 4061 di->i_uid = s_di->i_uid; 4062 di->i_gid = s_di->i_gid; 4063 di->i_mode = s_di->i_mode; 4064 4065 /* 4066 * update time. 4067 * we want mtime to appear identical to the source and 4068 * update ctime. 4069 */ 4070 inode_set_ctime_current(t_inode); 4071 4072 di->i_ctime = cpu_to_le64(inode_get_ctime_sec(t_inode)); 4073 di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(t_inode)); 4074 4075 inode_set_mtime_to_ts(t_inode, inode_get_mtime(s_inode)); 4076 di->i_mtime = s_di->i_mtime; 4077 di->i_mtime_nsec = s_di->i_mtime_nsec; 4078 } 4079 4080 ocfs2_journal_dirty(handle, t_bh); 4081 4082 out_commit: 4083 ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); 4084 return ret; 4085 } 4086 4087 static int ocfs2_create_reflink_node(struct inode *s_inode, 4088 struct buffer_head *s_bh, 4089 struct inode *t_inode, 4090 struct buffer_head *t_bh, 4091 bool preserve) 4092 { 4093 int ret; 4094 struct buffer_head *ref_root_bh = NULL; 4095 struct ocfs2_cached_dealloc_ctxt dealloc; 4096 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 4097 struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; 4098 struct ocfs2_refcount_tree *ref_tree; 4099 4100 ocfs2_init_dealloc_ctxt(&dealloc); 4101 4102 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4103 le64_to_cpu(di->i_refcount_loc)); 4104 if (ret) { 4105 mlog_errno(ret); 4106 goto out; 4107 } 4108 4109 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4110 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, 4111 t_inode, t_bh); 4112 if (ret) 4113 mlog_errno(ret); 4114 goto out; 4115 } 4116 4117 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4118 1, &ref_tree, &ref_root_bh); 4119 if (ret) { 4120 mlog_errno(ret); 4121 goto out; 4122 } 4123 4124 ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, 4125 &ref_tree->rf_ci, ref_root_bh, 4126 &dealloc); 4127 if (ret) { 4128 mlog_errno(ret); 4129 goto out_unlock_refcount; 4130 } 4131 4132 out_unlock_refcount: 4133 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4134 brelse(ref_root_bh); 4135 out: 4136 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4137 ocfs2_schedule_truncate_log_flush(osb, 1); 4138 ocfs2_run_deallocs(osb, &dealloc); 4139 } 4140 4141 return ret; 4142 } 4143 4144 static int __ocfs2_reflink(struct dentry *old_dentry, 4145 struct buffer_head *old_bh, 4146 struct inode *new_inode, 4147 bool preserve) 4148 { 4149 int ret; 4150 struct inode *inode = d_inode(old_dentry); 4151 struct buffer_head *new_bh = NULL; 4152 struct ocfs2_inode_info *oi = OCFS2_I(inode); 4153 4154 if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) { 4155 ret = -EINVAL; 4156 mlog_errno(ret); 4157 goto out; 4158 } 4159 4160 ret = filemap_fdatawrite(inode->i_mapping); 4161 if (ret) { 4162 mlog_errno(ret); 4163 goto out; 4164 } 4165 4166 ret = ocfs2_attach_refcount_tree(inode, old_bh); 4167 if (ret) { 4168 mlog_errno(ret); 4169 goto out; 4170 } 4171 4172 inode_lock_nested(new_inode, I_MUTEX_CHILD); 4173 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, 4174 OI_LS_REFLINK_TARGET); 4175 if (ret) { 4176 mlog_errno(ret); 4177 goto out_unlock; 4178 } 4179 4180 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && 4181 (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { 4182 /* 4183 * Adjust extent record count to reserve space for extended attribute. 4184 * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). 4185 */ 4186 struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode); 4187 4188 if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && 4189 !(ocfs2_inode_is_fast_symlink(new_inode))) { 4190 struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data; 4191 struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data; 4192 struct ocfs2_extent_list *el = &new_di->id2.i_list; 4193 int inline_size = le16_to_cpu(old_di->i_xattr_inline_size); 4194 4195 le16_add_cpu(&el->l_count, -(inline_size / 4196 sizeof(struct ocfs2_extent_rec))); 4197 } 4198 } 4199 4200 ret = ocfs2_create_reflink_node(inode, old_bh, 4201 new_inode, new_bh, preserve); 4202 if (ret) { 4203 mlog_errno(ret); 4204 goto inode_unlock; 4205 } 4206 4207 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 4208 ret = ocfs2_reflink_xattrs(inode, old_bh, 4209 new_inode, new_bh, 4210 preserve); 4211 if (ret) { 4212 mlog_errno(ret); 4213 goto inode_unlock; 4214 } 4215 } 4216 4217 ret = ocfs2_complete_reflink(inode, old_bh, 4218 new_inode, new_bh, preserve); 4219 if (ret) 4220 mlog_errno(ret); 4221 4222 inode_unlock: 4223 ocfs2_inode_unlock(new_inode, 1); 4224 brelse(new_bh); 4225 out_unlock: 4226 inode_unlock(new_inode); 4227 out: 4228 if (!ret) { 4229 ret = filemap_fdatawait(inode->i_mapping); 4230 if (ret) 4231 mlog_errno(ret); 4232 } 4233 return ret; 4234 } 4235 4236 static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, 4237 struct dentry *new_dentry, bool preserve) 4238 { 4239 int error, had_lock; 4240 struct inode *inode = d_inode(old_dentry); 4241 struct buffer_head *old_bh = NULL; 4242 struct inode *new_orphan_inode = NULL; 4243 struct ocfs2_lock_holder oh; 4244 4245 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4246 return -EOPNOTSUPP; 4247 4248 4249 error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, 4250 &new_orphan_inode); 4251 if (error) { 4252 mlog_errno(error); 4253 goto out; 4254 } 4255 4256 error = ocfs2_rw_lock(inode, 1); 4257 if (error) { 4258 mlog_errno(error); 4259 goto out; 4260 } 4261 4262 error = ocfs2_inode_lock(inode, &old_bh, 1); 4263 if (error) { 4264 mlog_errno(error); 4265 ocfs2_rw_unlock(inode, 1); 4266 goto out; 4267 } 4268 4269 down_write(&OCFS2_I(inode)->ip_xattr_sem); 4270 down_write(&OCFS2_I(inode)->ip_alloc_sem); 4271 error = __ocfs2_reflink(old_dentry, old_bh, 4272 new_orphan_inode, preserve); 4273 up_write(&OCFS2_I(inode)->ip_alloc_sem); 4274 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4275 4276 ocfs2_inode_unlock(inode, 1); 4277 ocfs2_rw_unlock(inode, 1); 4278 brelse(old_bh); 4279 4280 if (error) { 4281 mlog_errno(error); 4282 goto out; 4283 } 4284 4285 had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, 4286 &oh); 4287 if (had_lock < 0) { 4288 error = had_lock; 4289 mlog_errno(error); 4290 goto out; 4291 } 4292 4293 /* If the security isn't preserved, we need to re-initialize them. */ 4294 if (!preserve) { 4295 error = ocfs2_init_security_and_acl(dir, new_orphan_inode, 4296 &new_dentry->d_name); 4297 if (error) 4298 mlog_errno(error); 4299 } 4300 if (!error) { 4301 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, 4302 new_dentry); 4303 if (error) 4304 mlog_errno(error); 4305 } 4306 ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); 4307 4308 out: 4309 if (new_orphan_inode) { 4310 /* 4311 * We need to open_unlock the inode no matter whether we 4312 * succeed or not, so that other nodes can delete it later. 4313 */ 4314 ocfs2_open_unlock(new_orphan_inode); 4315 if (error) 4316 iput(new_orphan_inode); 4317 } 4318 4319 return error; 4320 } 4321 4322 /* 4323 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake 4324 * sys_reflink(). This will go away when vfs_reflink() exists in 4325 * fs/namei.c. 4326 */ 4327 4328 /* copied from may_create in VFS. */ 4329 static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) 4330 { 4331 if (d_really_is_positive(child)) 4332 return -EEXIST; 4333 if (IS_DEADDIR(dir)) 4334 return -ENOENT; 4335 return inode_permission(&nop_mnt_idmap, dir, MAY_WRITE | MAY_EXEC); 4336 } 4337 4338 /** 4339 * ocfs2_vfs_reflink - Create a reference-counted link 4340 * 4341 * @old_dentry: source dentry + inode 4342 * @dir: directory to create the target 4343 * @new_dentry: target dentry 4344 * @preserve: if true, preserve all file attributes 4345 */ 4346 static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, 4347 struct dentry *new_dentry, bool preserve) 4348 { 4349 struct inode *inode = d_inode(old_dentry); 4350 int error; 4351 4352 if (!inode) 4353 return -ENOENT; 4354 4355 error = ocfs2_may_create(dir, new_dentry); 4356 if (error) 4357 return error; 4358 4359 if (dir->i_sb != inode->i_sb) 4360 return -EXDEV; 4361 4362 /* 4363 * A reflink to an append-only or immutable file cannot be created. 4364 */ 4365 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4366 return -EPERM; 4367 4368 /* Only regular files can be reflinked. */ 4369 if (!S_ISREG(inode->i_mode)) 4370 return -EPERM; 4371 4372 /* 4373 * If the caller wants to preserve ownership, they require the 4374 * rights to do so. 4375 */ 4376 if (preserve) { 4377 if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN)) 4378 return -EPERM; 4379 if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) 4380 return -EPERM; 4381 } 4382 4383 /* 4384 * If the caller is modifying any aspect of the attributes, they 4385 * are not creating a snapshot. They need read permission on the 4386 * file. 4387 */ 4388 if (!preserve) { 4389 error = inode_permission(&nop_mnt_idmap, inode, MAY_READ); 4390 if (error) 4391 return error; 4392 } 4393 4394 inode_lock(inode); 4395 error = dquot_initialize(dir); 4396 if (!error) 4397 error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); 4398 inode_unlock(inode); 4399 if (!error) 4400 fsnotify_create(dir, new_dentry); 4401 return error; 4402 } 4403 /* 4404 * Most codes are copied from sys_linkat. 4405 */ 4406 int ocfs2_reflink_ioctl(struct inode *inode, 4407 const char __user *oldname, 4408 const char __user *newname, 4409 bool preserve) 4410 { 4411 struct dentry *new_dentry; 4412 struct path old_path, new_path; 4413 int error; 4414 4415 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4416 return -EOPNOTSUPP; 4417 4418 error = user_path_at(AT_FDCWD, oldname, 0, &old_path); 4419 if (error) { 4420 mlog_errno(error); 4421 return error; 4422 } 4423 4424 new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0); 4425 error = PTR_ERR(new_dentry); 4426 if (IS_ERR(new_dentry)) { 4427 mlog_errno(error); 4428 goto out; 4429 } 4430 4431 error = -EXDEV; 4432 if (old_path.mnt != new_path.mnt) { 4433 mlog_errno(error); 4434 goto out_dput; 4435 } 4436 4437 error = ocfs2_vfs_reflink(old_path.dentry, 4438 d_inode(new_path.dentry), 4439 new_dentry, preserve); 4440 out_dput: 4441 done_path_create(&new_path, new_dentry); 4442 out: 4443 path_put(&old_path); 4444 4445 return error; 4446 } 4447 4448 /* Update destination inode size, if necessary. */ 4449 int ocfs2_reflink_update_dest(struct inode *dest, 4450 struct buffer_head *d_bh, 4451 loff_t newlen) 4452 { 4453 handle_t *handle; 4454 int ret; 4455 4456 dest->i_blocks = ocfs2_inode_sector_count(dest); 4457 4458 if (newlen <= i_size_read(dest)) 4459 return 0; 4460 4461 handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb), 4462 OCFS2_INODE_UPDATE_CREDITS); 4463 if (IS_ERR(handle)) { 4464 ret = PTR_ERR(handle); 4465 mlog_errno(ret); 4466 return ret; 4467 } 4468 4469 /* Extend i_size if needed. */ 4470 spin_lock(&OCFS2_I(dest)->ip_lock); 4471 if (newlen > i_size_read(dest)) 4472 i_size_write(dest, newlen); 4473 spin_unlock(&OCFS2_I(dest)->ip_lock); 4474 inode_set_mtime_to_ts(dest, inode_set_ctime_current(dest)); 4475 4476 ret = ocfs2_mark_inode_dirty(handle, dest, d_bh); 4477 if (ret) { 4478 mlog_errno(ret); 4479 goto out_commit; 4480 } 4481 4482 out_commit: 4483 ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle); 4484 return ret; 4485 } 4486 4487 /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */ 4488 static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode, 4489 struct buffer_head *s_bh, 4490 loff_t pos_in, 4491 struct inode *t_inode, 4492 struct buffer_head *t_bh, 4493 loff_t pos_out, 4494 loff_t len, 4495 struct ocfs2_cached_dealloc_ctxt *dealloc) 4496 { 4497 struct ocfs2_extent_tree s_et; 4498 struct ocfs2_extent_tree t_et; 4499 struct ocfs2_dinode *dis; 4500 struct buffer_head *ref_root_bh = NULL; 4501 struct ocfs2_refcount_tree *ref_tree; 4502 struct ocfs2_super *osb; 4503 loff_t remapped_bytes = 0; 4504 loff_t pstart, plen; 4505 u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0; 4506 unsigned int ext_flags; 4507 int ret = 0; 4508 4509 osb = OCFS2_SB(s_inode->i_sb); 4510 dis = (struct ocfs2_dinode *)s_bh->b_data; 4511 ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh); 4512 ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh); 4513 4514 spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in); 4515 tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out); 4516 slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len); 4517 4518 while (spos < slast) { 4519 if (fatal_signal_pending(current)) { 4520 ret = -EINTR; 4521 goto out; 4522 } 4523 4524 /* Look up the extent. */ 4525 ret = ocfs2_get_clusters(s_inode, spos, &p_cluster, 4526 &num_clusters, &ext_flags); 4527 if (ret) { 4528 mlog_errno(ret); 4529 goto out; 4530 } 4531 4532 num_clusters = min_t(u32, num_clusters, slast - spos); 4533 4534 /* Punch out the dest range. */ 4535 pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos); 4536 plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters); 4537 ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen); 4538 if (ret) { 4539 mlog_errno(ret); 4540 goto out; 4541 } 4542 4543 if (p_cluster == 0) 4544 goto next_loop; 4545 4546 /* Lock the refcount btree... */ 4547 ret = ocfs2_lock_refcount_tree(osb, 4548 le64_to_cpu(dis->i_refcount_loc), 4549 1, &ref_tree, &ref_root_bh); 4550 if (ret) { 4551 mlog_errno(ret); 4552 goto out; 4553 } 4554 4555 /* Mark s_inode's extent as refcounted. */ 4556 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) { 4557 ret = ocfs2_add_refcount_flag(s_inode, &s_et, 4558 &ref_tree->rf_ci, 4559 ref_root_bh, spos, 4560 p_cluster, num_clusters, 4561 dealloc, NULL); 4562 if (ret) { 4563 mlog_errno(ret); 4564 goto out_unlock_refcount; 4565 } 4566 } 4567 4568 /* Map in the new extent. */ 4569 ext_flags |= OCFS2_EXT_REFCOUNTED; 4570 ret = ocfs2_add_refcounted_extent(t_inode, &t_et, 4571 &ref_tree->rf_ci, 4572 ref_root_bh, 4573 tpos, p_cluster, 4574 num_clusters, 4575 ext_flags, 4576 dealloc); 4577 if (ret) { 4578 mlog_errno(ret); 4579 goto out_unlock_refcount; 4580 } 4581 4582 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4583 brelse(ref_root_bh); 4584 next_loop: 4585 spos += num_clusters; 4586 tpos += num_clusters; 4587 remapped_clus += num_clusters; 4588 } 4589 4590 goto out; 4591 out_unlock_refcount: 4592 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4593 brelse(ref_root_bh); 4594 out: 4595 remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus); 4596 remapped_bytes = min_t(loff_t, len, remapped_bytes); 4597 4598 return remapped_bytes > 0 ? remapped_bytes : ret; 4599 } 4600 4601 /* Set up refcount tree and remap s_inode to t_inode. */ 4602 loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode, 4603 struct buffer_head *s_bh, 4604 loff_t pos_in, 4605 struct inode *t_inode, 4606 struct buffer_head *t_bh, 4607 loff_t pos_out, 4608 loff_t len) 4609 { 4610 struct ocfs2_cached_dealloc_ctxt dealloc; 4611 struct ocfs2_super *osb; 4612 struct ocfs2_dinode *dis; 4613 struct ocfs2_dinode *dit; 4614 loff_t ret; 4615 4616 osb = OCFS2_SB(s_inode->i_sb); 4617 dis = (struct ocfs2_dinode *)s_bh->b_data; 4618 dit = (struct ocfs2_dinode *)t_bh->b_data; 4619 ocfs2_init_dealloc_ctxt(&dealloc); 4620 4621 /* 4622 * If we're reflinking the entire file and the source is inline 4623 * data, just copy the contents. 4624 */ 4625 if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) && 4626 i_size_read(t_inode) <= len && 4627 (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) { 4628 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh); 4629 if (ret) 4630 mlog_errno(ret); 4631 goto out; 4632 } 4633 4634 /* 4635 * If both inodes belong to two different refcount groups then 4636 * forget it because we don't know how (or want) to go merging 4637 * refcount trees. 4638 */ 4639 ret = -EOPNOTSUPP; 4640 if (ocfs2_is_refcount_inode(s_inode) && 4641 ocfs2_is_refcount_inode(t_inode) && 4642 le64_to_cpu(dis->i_refcount_loc) != 4643 le64_to_cpu(dit->i_refcount_loc)) 4644 goto out; 4645 4646 /* Neither inode has a refcount tree. Add one to s_inode. */ 4647 if (!ocfs2_is_refcount_inode(s_inode) && 4648 !ocfs2_is_refcount_inode(t_inode)) { 4649 ret = ocfs2_create_refcount_tree(s_inode, s_bh); 4650 if (ret) { 4651 mlog_errno(ret); 4652 goto out; 4653 } 4654 } 4655 4656 /* Ensure that both inodes end up with the same refcount tree. */ 4657 if (!ocfs2_is_refcount_inode(s_inode)) { 4658 ret = ocfs2_set_refcount_tree(s_inode, s_bh, 4659 le64_to_cpu(dit->i_refcount_loc)); 4660 if (ret) { 4661 mlog_errno(ret); 4662 goto out; 4663 } 4664 } 4665 if (!ocfs2_is_refcount_inode(t_inode)) { 4666 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4667 le64_to_cpu(dis->i_refcount_loc)); 4668 if (ret) { 4669 mlog_errno(ret); 4670 goto out; 4671 } 4672 } 4673 4674 /* Turn off inline data in the dest file. */ 4675 if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4676 ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh); 4677 if (ret) { 4678 mlog_errno(ret); 4679 goto out; 4680 } 4681 } 4682 4683 /* Actually remap extents now. */ 4684 ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh, 4685 pos_out, len, &dealloc); 4686 if (ret < 0) { 4687 mlog_errno(ret); 4688 goto out; 4689 } 4690 4691 out: 4692 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4693 ocfs2_schedule_truncate_log_flush(osb, 1); 4694 ocfs2_run_deallocs(osb, &dealloc); 4695 } 4696 4697 return ret; 4698 } 4699 4700 /* Lock an inode and grab a bh pointing to the inode. */ 4701 int ocfs2_reflink_inodes_lock(struct inode *s_inode, 4702 struct buffer_head **bh_s, 4703 struct inode *t_inode, 4704 struct buffer_head **bh_t) 4705 { 4706 struct inode *inode1 = s_inode; 4707 struct inode *inode2 = t_inode; 4708 struct ocfs2_inode_info *oi1; 4709 struct ocfs2_inode_info *oi2; 4710 struct buffer_head *bh1 = NULL; 4711 struct buffer_head *bh2 = NULL; 4712 bool same_inode = (s_inode == t_inode); 4713 bool need_swap = (inode1->i_ino > inode2->i_ino); 4714 int status; 4715 4716 /* First grab the VFS and rw locks. */ 4717 lock_two_nondirectories(s_inode, t_inode); 4718 if (need_swap) 4719 swap(inode1, inode2); 4720 4721 status = ocfs2_rw_lock(inode1, 1); 4722 if (status) { 4723 mlog_errno(status); 4724 goto out_i1; 4725 } 4726 if (!same_inode) { 4727 status = ocfs2_rw_lock(inode2, 1); 4728 if (status) { 4729 mlog_errno(status); 4730 goto out_i2; 4731 } 4732 } 4733 4734 /* Now go for the cluster locks */ 4735 oi1 = OCFS2_I(inode1); 4736 oi2 = OCFS2_I(inode2); 4737 4738 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 4739 (unsigned long long)oi2->ip_blkno); 4740 4741 /* We always want to lock the one with the lower lockid first. */ 4742 if (oi1->ip_blkno > oi2->ip_blkno) 4743 mlog_errno(-ENOLCK); 4744 4745 /* lock id1 */ 4746 status = ocfs2_inode_lock_nested(inode1, &bh1, 1, 4747 OI_LS_REFLINK_TARGET); 4748 if (status < 0) { 4749 if (status != -ENOENT) 4750 mlog_errno(status); 4751 goto out_rw2; 4752 } 4753 4754 /* lock id2 */ 4755 if (!same_inode) { 4756 status = ocfs2_inode_lock_nested(inode2, &bh2, 1, 4757 OI_LS_REFLINK_TARGET); 4758 if (status < 0) { 4759 if (status != -ENOENT) 4760 mlog_errno(status); 4761 goto out_cl1; 4762 } 4763 } else { 4764 bh2 = bh1; 4765 } 4766 4767 /* 4768 * If we swapped inode order above, we have to swap the buffer heads 4769 * before passing them back to the caller. 4770 */ 4771 if (need_swap) 4772 swap(bh1, bh2); 4773 *bh_s = bh1; 4774 *bh_t = bh2; 4775 4776 trace_ocfs2_double_lock_end( 4777 (unsigned long long)oi1->ip_blkno, 4778 (unsigned long long)oi2->ip_blkno); 4779 4780 return 0; 4781 4782 out_cl1: 4783 ocfs2_inode_unlock(inode1, 1); 4784 brelse(bh1); 4785 out_rw2: 4786 ocfs2_rw_unlock(inode2, 1); 4787 out_i2: 4788 ocfs2_rw_unlock(inode1, 1); 4789 out_i1: 4790 unlock_two_nondirectories(s_inode, t_inode); 4791 return status; 4792 } 4793 4794 /* Unlock both inodes and release buffers. */ 4795 void ocfs2_reflink_inodes_unlock(struct inode *s_inode, 4796 struct buffer_head *s_bh, 4797 struct inode *t_inode, 4798 struct buffer_head *t_bh) 4799 { 4800 ocfs2_inode_unlock(s_inode, 1); 4801 ocfs2_rw_unlock(s_inode, 1); 4802 brelse(s_bh); 4803 if (s_inode != t_inode) { 4804 ocfs2_inode_unlock(t_inode, 1); 4805 ocfs2_rw_unlock(t_inode, 1); 4806 brelse(t_bh); 4807 } 4808 unlock_two_nondirectories(s_inode, t_inode); 4809 } 4810