1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * refcounttree.c 4 * 5 * Copyright (C) 2009 Oracle. All rights reserved. 6 */ 7 8 #include <linux/sort.h> 9 #include <cluster/masklog.h> 10 #include "ocfs2.h" 11 #include "inode.h" 12 #include "alloc.h" 13 #include "suballoc.h" 14 #include "journal.h" 15 #include "uptodate.h" 16 #include "super.h" 17 #include "buffer_head_io.h" 18 #include "blockcheck.h" 19 #include "refcounttree.h" 20 #include "sysfile.h" 21 #include "dlmglue.h" 22 #include "extent_map.h" 23 #include "aops.h" 24 #include "xattr.h" 25 #include "namei.h" 26 #include "ocfs2_trace.h" 27 #include "file.h" 28 #include "symlink.h" 29 30 #include <linux/bio.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/writeback.h> 34 #include <linux/pagevec.h> 35 #include <linux/swap.h> 36 #include <linux/security.h> 37 #include <linux/string.h> 38 #include <linux/fsnotify.h> 39 #include <linux/quotaops.h> 40 #include <linux/namei.h> 41 #include <linux/mount.h> 42 #include <linux/posix_acl.h> 43 44 struct ocfs2_cow_context { 45 struct inode *inode; 46 u32 cow_start; 47 u32 cow_len; 48 struct ocfs2_extent_tree data_et; 49 struct ocfs2_refcount_tree *ref_tree; 50 struct buffer_head *ref_root_bh; 51 struct ocfs2_alloc_context *meta_ac; 52 struct ocfs2_alloc_context *data_ac; 53 struct ocfs2_cached_dealloc_ctxt dealloc; 54 void *cow_object; 55 struct ocfs2_post_refcount *post_refcount; 56 int extra_credits; 57 int (*get_clusters)(struct ocfs2_cow_context *context, 58 u32 v_cluster, u32 *p_cluster, 59 u32 *num_clusters, 60 unsigned int *extent_flags); 61 int (*cow_duplicate_clusters)(handle_t *handle, 62 struct inode *inode, 63 u32 cpos, u32 old_cluster, 64 u32 new_cluster, u32 new_len); 65 }; 66 67 static inline struct ocfs2_refcount_tree * 68 cache_info_to_refcount(struct ocfs2_caching_info *ci) 69 { 70 return container_of(ci, struct ocfs2_refcount_tree, rf_ci); 71 } 72 73 static int ocfs2_validate_refcount_block(struct super_block *sb, 74 struct buffer_head *bh) 75 { 76 int rc; 77 struct ocfs2_refcount_block *rb = 78 (struct ocfs2_refcount_block *)bh->b_data; 79 80 trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); 81 82 BUG_ON(!buffer_uptodate(bh)); 83 84 /* 85 * If the ecc fails, we return the error but otherwise 86 * leave the filesystem running. We know any error is 87 * local to this block. 88 */ 89 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); 90 if (rc) { 91 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", 92 (unsigned long long)bh->b_blocknr); 93 return rc; 94 } 95 96 97 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { 98 rc = ocfs2_error(sb, 99 "Refcount block #%llu has bad signature %.*s\n", 100 (unsigned long long)bh->b_blocknr, 7, 101 rb->rf_signature); 102 goto out; 103 } 104 105 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { 106 rc = ocfs2_error(sb, 107 "Refcount block #%llu has an invalid rf_blkno of %llu\n", 108 (unsigned long long)bh->b_blocknr, 109 (unsigned long long)le64_to_cpu(rb->rf_blkno)); 110 goto out; 111 } 112 113 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { 114 rc = ocfs2_error(sb, 115 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n", 116 (unsigned long long)bh->b_blocknr, 117 le32_to_cpu(rb->rf_fs_generation)); 118 goto out; 119 } 120 out: 121 return rc; 122 } 123 124 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, 125 u64 rb_blkno, 126 struct buffer_head **bh) 127 { 128 int rc; 129 struct buffer_head *tmp = *bh; 130 131 rc = ocfs2_read_block(ci, rb_blkno, &tmp, 132 ocfs2_validate_refcount_block); 133 134 /* If ocfs2_read_block() got us a new bh, pass it up. */ 135 if (!rc && !*bh) 136 *bh = tmp; 137 138 return rc; 139 } 140 141 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) 142 { 143 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 144 145 return rf->rf_blkno; 146 } 147 148 static struct super_block * 149 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) 150 { 151 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 152 153 return rf->rf_sb; 154 } 155 156 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) 157 __acquires(&rf->rf_lock) 158 { 159 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 160 161 spin_lock(&rf->rf_lock); 162 } 163 164 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) 165 __releases(&rf->rf_lock) 166 { 167 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 168 169 spin_unlock(&rf->rf_lock); 170 } 171 172 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) 173 { 174 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 175 176 mutex_lock(&rf->rf_io_mutex); 177 } 178 179 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) 180 { 181 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 182 183 mutex_unlock(&rf->rf_io_mutex); 184 } 185 186 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { 187 .co_owner = ocfs2_refcount_cache_owner, 188 .co_get_super = ocfs2_refcount_cache_get_super, 189 .co_cache_lock = ocfs2_refcount_cache_lock, 190 .co_cache_unlock = ocfs2_refcount_cache_unlock, 191 .co_io_lock = ocfs2_refcount_cache_io_lock, 192 .co_io_unlock = ocfs2_refcount_cache_io_unlock, 193 }; 194 195 static struct ocfs2_refcount_tree * 196 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) 197 { 198 struct rb_node *n = osb->osb_rf_lock_tree.rb_node; 199 struct ocfs2_refcount_tree *tree = NULL; 200 201 while (n) { 202 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); 203 204 if (blkno < tree->rf_blkno) 205 n = n->rb_left; 206 else if (blkno > tree->rf_blkno) 207 n = n->rb_right; 208 else 209 return tree; 210 } 211 212 return NULL; 213 } 214 215 /* osb_lock is already locked. */ 216 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, 217 struct ocfs2_refcount_tree *new) 218 { 219 u64 rf_blkno = new->rf_blkno; 220 struct rb_node *parent = NULL; 221 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; 222 struct ocfs2_refcount_tree *tmp; 223 224 while (*p) { 225 parent = *p; 226 227 tmp = rb_entry(parent, struct ocfs2_refcount_tree, 228 rf_node); 229 230 if (rf_blkno < tmp->rf_blkno) 231 p = &(*p)->rb_left; 232 else if (rf_blkno > tmp->rf_blkno) 233 p = &(*p)->rb_right; 234 else { 235 /* This should never happen! */ 236 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", 237 (unsigned long long)rf_blkno); 238 BUG(); 239 } 240 } 241 242 rb_link_node(&new->rf_node, parent, p); 243 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); 244 } 245 246 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) 247 { 248 ocfs2_metadata_cache_exit(&tree->rf_ci); 249 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); 250 ocfs2_lock_res_free(&tree->rf_lockres); 251 kfree(tree); 252 } 253 254 static inline void 255 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, 256 struct ocfs2_refcount_tree *tree) 257 { 258 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); 259 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) 260 osb->osb_ref_tree_lru = NULL; 261 } 262 263 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, 264 struct ocfs2_refcount_tree *tree) 265 { 266 spin_lock(&osb->osb_lock); 267 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 268 spin_unlock(&osb->osb_lock); 269 } 270 271 static void ocfs2_kref_remove_refcount_tree(struct kref *kref) 272 { 273 struct ocfs2_refcount_tree *tree = 274 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); 275 276 ocfs2_free_refcount_tree(tree); 277 } 278 279 static inline void 280 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) 281 { 282 kref_get(&tree->rf_getcnt); 283 } 284 285 static inline void 286 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) 287 { 288 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); 289 } 290 291 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, 292 struct super_block *sb) 293 { 294 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); 295 mutex_init(&new->rf_io_mutex); 296 new->rf_sb = sb; 297 spin_lock_init(&new->rf_lock); 298 } 299 300 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, 301 struct ocfs2_refcount_tree *new, 302 u64 rf_blkno, u32 generation) 303 { 304 init_rwsem(&new->rf_sem); 305 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, 306 rf_blkno, generation); 307 } 308 309 static struct ocfs2_refcount_tree* 310 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) 311 { 312 struct ocfs2_refcount_tree *new; 313 314 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); 315 if (!new) 316 return NULL; 317 318 new->rf_blkno = rf_blkno; 319 kref_init(&new->rf_getcnt); 320 ocfs2_init_refcount_tree_ci(new, osb->sb); 321 322 return new; 323 } 324 325 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, 326 struct ocfs2_refcount_tree **ret_tree) 327 { 328 int ret = 0; 329 struct ocfs2_refcount_tree *tree, *new = NULL; 330 struct buffer_head *ref_root_bh = NULL; 331 struct ocfs2_refcount_block *ref_rb; 332 333 spin_lock(&osb->osb_lock); 334 if (osb->osb_ref_tree_lru && 335 osb->osb_ref_tree_lru->rf_blkno == rf_blkno) 336 tree = osb->osb_ref_tree_lru; 337 else 338 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 339 if (tree) 340 goto out; 341 342 spin_unlock(&osb->osb_lock); 343 344 new = ocfs2_allocate_refcount_tree(osb, rf_blkno); 345 if (!new) { 346 ret = -ENOMEM; 347 mlog_errno(ret); 348 return ret; 349 } 350 /* 351 * We need the generation to create the refcount tree lock and since 352 * it isn't changed during the tree modification, we are safe here to 353 * read without protection. 354 * We also have to purge the cache after we create the lock since the 355 * refcount block may have the stale data. It can only be trusted when 356 * we hold the refcount lock. 357 */ 358 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); 359 if (ret) { 360 mlog_errno(ret); 361 ocfs2_metadata_cache_exit(&new->rf_ci); 362 kfree(new); 363 return ret; 364 } 365 366 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 367 new->rf_generation = le32_to_cpu(ref_rb->rf_generation); 368 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, 369 new->rf_generation); 370 ocfs2_metadata_cache_purge(&new->rf_ci); 371 372 spin_lock(&osb->osb_lock); 373 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 374 if (tree) 375 goto out; 376 377 ocfs2_insert_refcount_tree(osb, new); 378 379 tree = new; 380 new = NULL; 381 382 out: 383 *ret_tree = tree; 384 385 osb->osb_ref_tree_lru = tree; 386 387 spin_unlock(&osb->osb_lock); 388 389 if (new) 390 ocfs2_free_refcount_tree(new); 391 392 brelse(ref_root_bh); 393 return ret; 394 } 395 396 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) 397 { 398 int ret; 399 struct buffer_head *di_bh = NULL; 400 struct ocfs2_dinode *di; 401 402 ret = ocfs2_read_inode_block(inode, &di_bh); 403 if (ret) { 404 mlog_errno(ret); 405 goto out; 406 } 407 408 BUG_ON(!ocfs2_is_refcount_inode(inode)); 409 410 di = (struct ocfs2_dinode *)di_bh->b_data; 411 *ref_blkno = le64_to_cpu(di->i_refcount_loc); 412 brelse(di_bh); 413 out: 414 return ret; 415 } 416 417 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 418 struct ocfs2_refcount_tree *tree, int rw) 419 { 420 int ret; 421 422 ret = ocfs2_refcount_lock(tree, rw); 423 if (ret) { 424 mlog_errno(ret); 425 goto out; 426 } 427 428 if (rw) 429 down_write(&tree->rf_sem); 430 else 431 down_read(&tree->rf_sem); 432 433 out: 434 return ret; 435 } 436 437 /* 438 * Lock the refcount tree pointed by ref_blkno and return the tree. 439 * In most case, we lock the tree and read the refcount block. 440 * So read it here if the caller really needs it. 441 * 442 * If the tree has been re-created by other node, it will free the 443 * old one and re-create it. 444 */ 445 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 446 u64 ref_blkno, int rw, 447 struct ocfs2_refcount_tree **ret_tree, 448 struct buffer_head **ref_bh) 449 { 450 int ret, delete_tree = 0; 451 struct ocfs2_refcount_tree *tree = NULL; 452 struct buffer_head *ref_root_bh = NULL; 453 struct ocfs2_refcount_block *rb; 454 455 again: 456 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); 457 if (ret) { 458 mlog_errno(ret); 459 return ret; 460 } 461 462 ocfs2_refcount_tree_get(tree); 463 464 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); 465 if (ret) { 466 mlog_errno(ret); 467 ocfs2_refcount_tree_put(tree); 468 goto out; 469 } 470 471 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 472 &ref_root_bh); 473 if (ret) { 474 mlog_errno(ret); 475 ocfs2_unlock_refcount_tree(osb, tree, rw); 476 goto out; 477 } 478 479 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 480 /* 481 * If the refcount block has been freed and re-created, we may need 482 * to recreate the refcount tree also. 483 * 484 * Here we just remove the tree from the rb-tree, and the last 485 * kref holder will unlock and delete this refcount_tree. 486 * Then we goto "again" and ocfs2_get_refcount_tree will create 487 * the new refcount tree for us. 488 */ 489 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { 490 if (!tree->rf_removed) { 491 ocfs2_erase_refcount_tree_from_list(osb, tree); 492 tree->rf_removed = 1; 493 delete_tree = 1; 494 } 495 496 ocfs2_unlock_refcount_tree(osb, tree, rw); 497 /* 498 * We get an extra reference when we create the refcount 499 * tree, so another put will destroy it. 500 */ 501 if (delete_tree) 502 ocfs2_refcount_tree_put(tree); 503 brelse(ref_root_bh); 504 ref_root_bh = NULL; 505 goto again; 506 } 507 508 *ret_tree = tree; 509 if (ref_bh) { 510 *ref_bh = ref_root_bh; 511 ref_root_bh = NULL; 512 } 513 out: 514 brelse(ref_root_bh); 515 return ret; 516 } 517 518 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, 519 struct ocfs2_refcount_tree *tree, int rw) 520 { 521 if (rw) 522 up_write(&tree->rf_sem); 523 else 524 up_read(&tree->rf_sem); 525 526 ocfs2_refcount_unlock(tree, rw); 527 ocfs2_refcount_tree_put(tree); 528 } 529 530 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) 531 { 532 struct rb_node *node; 533 struct ocfs2_refcount_tree *tree; 534 struct rb_root *root = &osb->osb_rf_lock_tree; 535 536 while ((node = rb_last(root)) != NULL) { 537 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 538 539 trace_ocfs2_purge_refcount_trees( 540 (unsigned long long) tree->rf_blkno); 541 542 rb_erase(&tree->rf_node, root); 543 ocfs2_free_refcount_tree(tree); 544 } 545 } 546 547 /* 548 * Create a refcount tree for an inode. 549 * We take for granted that the inode is already locked. 550 */ 551 static int ocfs2_create_refcount_tree(struct inode *inode, 552 struct buffer_head *di_bh) 553 { 554 int ret; 555 handle_t *handle = NULL; 556 struct ocfs2_alloc_context *meta_ac = NULL; 557 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 558 struct ocfs2_inode_info *oi = OCFS2_I(inode); 559 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 560 struct buffer_head *new_bh = NULL; 561 struct ocfs2_refcount_block *rb; 562 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; 563 u16 suballoc_bit_start; 564 u32 num_got; 565 u64 suballoc_loc, first_blkno; 566 567 BUG_ON(ocfs2_is_refcount_inode(inode)); 568 569 trace_ocfs2_create_refcount_tree( 570 (unsigned long long)oi->ip_blkno); 571 572 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 573 if (ret) { 574 mlog_errno(ret); 575 goto out; 576 } 577 578 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); 579 if (IS_ERR(handle)) { 580 ret = PTR_ERR(handle); 581 mlog_errno(ret); 582 goto out; 583 } 584 585 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 586 OCFS2_JOURNAL_ACCESS_WRITE); 587 if (ret) { 588 mlog_errno(ret); 589 goto out_commit; 590 } 591 592 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 593 &suballoc_bit_start, &num_got, 594 &first_blkno); 595 if (ret) { 596 mlog_errno(ret); 597 goto out_commit; 598 } 599 600 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); 601 if (!new_tree) { 602 ret = -ENOMEM; 603 mlog_errno(ret); 604 goto out_commit; 605 } 606 607 new_bh = sb_getblk(inode->i_sb, first_blkno); 608 if (!new_bh) { 609 ret = -ENOMEM; 610 mlog_errno(ret); 611 goto out_commit; 612 } 613 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); 614 615 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, 616 OCFS2_JOURNAL_ACCESS_CREATE); 617 if (ret) { 618 mlog_errno(ret); 619 goto out_commit; 620 } 621 622 /* Initialize ocfs2_refcount_block. */ 623 rb = (struct ocfs2_refcount_block *)new_bh->b_data; 624 memset(rb, 0, inode->i_sb->s_blocksize); 625 strscpy(rb->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 626 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 627 rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 628 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 629 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); 630 rb->rf_blkno = cpu_to_le64(first_blkno); 631 rb->rf_count = cpu_to_le32(1); 632 rb->rf_records.rl_count = 633 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); 634 spin_lock(&osb->osb_lock); 635 rb->rf_generation = cpu_to_le32(osb->s_next_generation++); 636 spin_unlock(&osb->osb_lock); 637 638 ocfs2_journal_dirty(handle, new_bh); 639 640 spin_lock(&oi->ip_lock); 641 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 642 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 643 di->i_refcount_loc = cpu_to_le64(first_blkno); 644 spin_unlock(&oi->ip_lock); 645 646 trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); 647 648 ocfs2_journal_dirty(handle, di_bh); 649 650 /* 651 * We have to init the tree lock here since it will use 652 * the generation number to create it. 653 */ 654 new_tree->rf_generation = le32_to_cpu(rb->rf_generation); 655 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, 656 new_tree->rf_generation); 657 658 spin_lock(&osb->osb_lock); 659 tree = ocfs2_find_refcount_tree(osb, first_blkno); 660 661 /* 662 * We've just created a new refcount tree in this block. If 663 * we found a refcount tree on the ocfs2_super, it must be 664 * one we just deleted. We free the old tree before 665 * inserting the new tree. 666 */ 667 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); 668 if (tree) 669 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 670 ocfs2_insert_refcount_tree(osb, new_tree); 671 spin_unlock(&osb->osb_lock); 672 new_tree = NULL; 673 if (tree) 674 ocfs2_refcount_tree_put(tree); 675 676 out_commit: 677 ocfs2_commit_trans(osb, handle); 678 679 out: 680 if (new_tree) { 681 ocfs2_metadata_cache_exit(&new_tree->rf_ci); 682 kfree(new_tree); 683 } 684 685 brelse(new_bh); 686 if (meta_ac) 687 ocfs2_free_alloc_context(meta_ac); 688 689 return ret; 690 } 691 692 static int ocfs2_set_refcount_tree(struct inode *inode, 693 struct buffer_head *di_bh, 694 u64 refcount_loc) 695 { 696 int ret; 697 handle_t *handle = NULL; 698 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 699 struct ocfs2_inode_info *oi = OCFS2_I(inode); 700 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 701 struct buffer_head *ref_root_bh = NULL; 702 struct ocfs2_refcount_block *rb; 703 struct ocfs2_refcount_tree *ref_tree; 704 705 BUG_ON(ocfs2_is_refcount_inode(inode)); 706 707 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, 708 &ref_tree, &ref_root_bh); 709 if (ret) { 710 mlog_errno(ret); 711 return ret; 712 } 713 714 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); 715 if (IS_ERR(handle)) { 716 ret = PTR_ERR(handle); 717 mlog_errno(ret); 718 goto out; 719 } 720 721 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 722 OCFS2_JOURNAL_ACCESS_WRITE); 723 if (ret) { 724 mlog_errno(ret); 725 goto out_commit; 726 } 727 728 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, 729 OCFS2_JOURNAL_ACCESS_WRITE); 730 if (ret) { 731 mlog_errno(ret); 732 goto out_commit; 733 } 734 735 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 736 le32_add_cpu(&rb->rf_count, 1); 737 738 ocfs2_journal_dirty(handle, ref_root_bh); 739 740 spin_lock(&oi->ip_lock); 741 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 742 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 743 di->i_refcount_loc = cpu_to_le64(refcount_loc); 744 spin_unlock(&oi->ip_lock); 745 ocfs2_journal_dirty(handle, di_bh); 746 747 out_commit: 748 ocfs2_commit_trans(osb, handle); 749 out: 750 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 751 brelse(ref_root_bh); 752 753 return ret; 754 } 755 756 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) 757 { 758 int ret, delete_tree = 0; 759 handle_t *handle = NULL; 760 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 761 struct ocfs2_inode_info *oi = OCFS2_I(inode); 762 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 763 struct ocfs2_refcount_block *rb; 764 struct inode *alloc_inode = NULL; 765 struct buffer_head *alloc_bh = NULL; 766 struct buffer_head *blk_bh = NULL; 767 struct ocfs2_refcount_tree *ref_tree; 768 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; 769 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); 770 u16 bit = 0; 771 772 if (!ocfs2_is_refcount_inode(inode)) 773 return 0; 774 775 BUG_ON(!ref_blkno); 776 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); 777 if (ret) { 778 mlog_errno(ret); 779 return ret; 780 } 781 782 rb = (struct ocfs2_refcount_block *)blk_bh->b_data; 783 784 /* 785 * If we are the last user, we need to free the block. 786 * So lock the allocator ahead. 787 */ 788 if (le32_to_cpu(rb->rf_count) == 1) { 789 blk = le64_to_cpu(rb->rf_blkno); 790 bit = le16_to_cpu(rb->rf_suballoc_bit); 791 if (rb->rf_suballoc_loc) 792 bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); 793 else 794 bg_blkno = ocfs2_which_suballoc_group(blk, bit); 795 796 alloc_inode = ocfs2_get_system_file_inode(osb, 797 EXTENT_ALLOC_SYSTEM_INODE, 798 le16_to_cpu(rb->rf_suballoc_slot)); 799 if (!alloc_inode) { 800 ret = -ENOMEM; 801 mlog_errno(ret); 802 goto out; 803 } 804 inode_lock(alloc_inode); 805 806 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); 807 if (ret) { 808 mlog_errno(ret); 809 goto out_mutex; 810 } 811 812 credits += OCFS2_SUBALLOC_FREE; 813 } 814 815 handle = ocfs2_start_trans(osb, credits); 816 if (IS_ERR(handle)) { 817 ret = PTR_ERR(handle); 818 mlog_errno(ret); 819 goto out_unlock; 820 } 821 822 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 823 OCFS2_JOURNAL_ACCESS_WRITE); 824 if (ret) { 825 mlog_errno(ret); 826 goto out_commit; 827 } 828 829 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, 830 OCFS2_JOURNAL_ACCESS_WRITE); 831 if (ret) { 832 mlog_errno(ret); 833 goto out_commit; 834 } 835 836 spin_lock(&oi->ip_lock); 837 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; 838 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 839 di->i_refcount_loc = 0; 840 spin_unlock(&oi->ip_lock); 841 ocfs2_journal_dirty(handle, di_bh); 842 843 le32_add_cpu(&rb->rf_count , -1); 844 ocfs2_journal_dirty(handle, blk_bh); 845 846 if (!rb->rf_count) { 847 delete_tree = 1; 848 ocfs2_erase_refcount_tree_from_list(osb, ref_tree); 849 ret = ocfs2_free_suballoc_bits(handle, alloc_inode, 850 alloc_bh, bit, bg_blkno, 1); 851 if (ret) 852 mlog_errno(ret); 853 } 854 855 out_commit: 856 ocfs2_commit_trans(osb, handle); 857 out_unlock: 858 if (alloc_inode) { 859 ocfs2_inode_unlock(alloc_inode, 1); 860 brelse(alloc_bh); 861 } 862 out_mutex: 863 if (alloc_inode) { 864 inode_unlock(alloc_inode); 865 iput(alloc_inode); 866 } 867 out: 868 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 869 if (delete_tree) 870 ocfs2_refcount_tree_put(ref_tree); 871 brelse(blk_bh); 872 873 return ret; 874 } 875 876 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, 877 struct buffer_head *ref_leaf_bh, 878 u64 cpos, unsigned int len, 879 struct ocfs2_refcount_rec *ret_rec, 880 int *index) 881 { 882 int i = 0; 883 struct ocfs2_refcount_block *rb = 884 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 885 struct ocfs2_refcount_rec *rec = NULL; 886 887 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { 888 rec = &rb->rf_records.rl_recs[i]; 889 890 if (le64_to_cpu(rec->r_cpos) + 891 le32_to_cpu(rec->r_clusters) <= cpos) 892 continue; 893 else if (le64_to_cpu(rec->r_cpos) > cpos) 894 break; 895 896 /* ok, cpos fail in this rec. Just return. */ 897 if (ret_rec) 898 *ret_rec = *rec; 899 goto out; 900 } 901 902 if (ret_rec) { 903 /* We meet with a hole here, so fake the rec. */ 904 ret_rec->r_cpos = cpu_to_le64(cpos); 905 ret_rec->r_refcount = 0; 906 if (i < le16_to_cpu(rb->rf_records.rl_used) && 907 le64_to_cpu(rec->r_cpos) < cpos + len) 908 ret_rec->r_clusters = 909 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); 910 else 911 ret_rec->r_clusters = cpu_to_le32(len); 912 } 913 914 out: 915 *index = i; 916 } 917 918 /* 919 * Try to remove refcount tree. The mechanism is: 920 * 1) Check whether i_clusters == 0, if no, exit. 921 * 2) check whether we have i_xattr_loc in dinode. if yes, exit. 922 * 3) Check whether we have inline xattr stored outside, if yes, exit. 923 * 4) Remove the tree. 924 */ 925 int ocfs2_try_remove_refcount_tree(struct inode *inode, 926 struct buffer_head *di_bh) 927 { 928 int ret; 929 struct ocfs2_inode_info *oi = OCFS2_I(inode); 930 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 931 932 down_write(&oi->ip_xattr_sem); 933 down_write(&oi->ip_alloc_sem); 934 935 if (oi->ip_clusters) 936 goto out; 937 938 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) 939 goto out; 940 941 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && 942 ocfs2_has_inline_xattr_value_outside(inode, di)) 943 goto out; 944 945 ret = ocfs2_remove_refcount_tree(inode, di_bh); 946 if (ret) 947 mlog_errno(ret); 948 out: 949 up_write(&oi->ip_alloc_sem); 950 up_write(&oi->ip_xattr_sem); 951 return 0; 952 } 953 954 /* 955 * Find the end range for a leaf refcount block indicated by 956 * el->l_recs[index].e_blkno. 957 */ 958 static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, 959 struct buffer_head *ref_root_bh, 960 struct ocfs2_extent_block *eb, 961 struct ocfs2_extent_list *el, 962 int index, u32 *cpos_end) 963 { 964 int ret, i, subtree_root; 965 u32 cpos; 966 u64 blkno; 967 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 968 struct ocfs2_path *left_path = NULL, *right_path = NULL; 969 struct ocfs2_extent_tree et; 970 struct ocfs2_extent_list *tmp_el; 971 972 if (index < le16_to_cpu(el->l_next_free_rec) - 1) { 973 /* 974 * We have a extent rec after index, so just use the e_cpos 975 * of the next extent rec. 976 */ 977 *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); 978 return 0; 979 } 980 981 if (!eb || !eb->h_next_leaf_blk) { 982 /* 983 * We are the last extent rec, so any high cpos should 984 * be stored in this leaf refcount block. 985 */ 986 *cpos_end = UINT_MAX; 987 return 0; 988 } 989 990 /* 991 * If the extent block isn't the last one, we have to find 992 * the subtree root between this extent block and the next 993 * leaf extent block and get the corresponding e_cpos from 994 * the subroot. Otherwise we may corrupt the b-tree. 995 */ 996 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 997 998 left_path = ocfs2_new_path_from_et(&et); 999 if (!left_path) { 1000 ret = -ENOMEM; 1001 mlog_errno(ret); 1002 goto out; 1003 } 1004 1005 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); 1006 ret = ocfs2_find_path(ci, left_path, cpos); 1007 if (ret) { 1008 mlog_errno(ret); 1009 goto out; 1010 } 1011 1012 right_path = ocfs2_new_path_from_path(left_path); 1013 if (!right_path) { 1014 ret = -ENOMEM; 1015 mlog_errno(ret); 1016 goto out; 1017 } 1018 1019 ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); 1020 if (ret) { 1021 mlog_errno(ret); 1022 goto out; 1023 } 1024 1025 ret = ocfs2_find_path(ci, right_path, cpos); 1026 if (ret) { 1027 mlog_errno(ret); 1028 goto out; 1029 } 1030 1031 subtree_root = ocfs2_find_subtree_root(&et, left_path, 1032 right_path); 1033 1034 tmp_el = left_path->p_node[subtree_root].el; 1035 blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; 1036 for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) { 1037 if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { 1038 *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); 1039 break; 1040 } 1041 } 1042 1043 BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec)); 1044 1045 out: 1046 ocfs2_free_path(left_path); 1047 ocfs2_free_path(right_path); 1048 return ret; 1049 } 1050 1051 /* 1052 * Given a cpos and len, try to find the refcount record which contains cpos. 1053 * 1. If cpos can be found in one refcount record, return the record. 1054 * 2. If cpos can't be found, return a fake record which start from cpos 1055 * and end at a small value between cpos+len and start of the next record. 1056 * This fake record has r_refcount = 0. 1057 */ 1058 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, 1059 struct buffer_head *ref_root_bh, 1060 u64 cpos, unsigned int len, 1061 struct ocfs2_refcount_rec *ret_rec, 1062 int *index, 1063 struct buffer_head **ret_bh) 1064 { 1065 int ret = 0, i, found; 1066 u32 low_cpos, cpos_end; 1067 struct ocfs2_extent_list *el; 1068 struct ocfs2_extent_rec *rec = NULL; 1069 struct ocfs2_extent_block *eb = NULL; 1070 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; 1071 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1072 struct ocfs2_refcount_block *rb = 1073 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1074 1075 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { 1076 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, 1077 ret_rec, index); 1078 *ret_bh = ref_root_bh; 1079 get_bh(ref_root_bh); 1080 return 0; 1081 } 1082 1083 el = &rb->rf_list; 1084 low_cpos = cpos & OCFS2_32BIT_POS_MASK; 1085 1086 if (el->l_tree_depth) { 1087 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); 1088 if (ret) { 1089 mlog_errno(ret); 1090 goto out; 1091 } 1092 1093 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 1094 el = &eb->h_list; 1095 1096 if (el->l_tree_depth) { 1097 ret = ocfs2_error(sb, 1098 "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n", 1099 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1100 (unsigned long long)eb_bh->b_blocknr); 1101 goto out; 1102 } 1103 } 1104 1105 found = 0; 1106 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { 1107 rec = &el->l_recs[i]; 1108 1109 if (le32_to_cpu(rec->e_cpos) <= low_cpos) { 1110 found = 1; 1111 break; 1112 } 1113 } 1114 1115 if (found) { 1116 ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, 1117 eb, el, i, &cpos_end); 1118 if (ret) { 1119 mlog_errno(ret); 1120 goto out; 1121 } 1122 1123 if (cpos_end < low_cpos + len) 1124 len = cpos_end - low_cpos; 1125 } 1126 1127 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), 1128 &ref_leaf_bh); 1129 if (ret) { 1130 mlog_errno(ret); 1131 goto out; 1132 } 1133 1134 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, 1135 ret_rec, index); 1136 *ret_bh = ref_leaf_bh; 1137 out: 1138 brelse(eb_bh); 1139 return ret; 1140 } 1141 1142 enum ocfs2_ref_rec_contig { 1143 REF_CONTIG_NONE = 0, 1144 REF_CONTIG_LEFT, 1145 REF_CONTIG_RIGHT, 1146 REF_CONTIG_LEFTRIGHT, 1147 }; 1148 1149 static enum ocfs2_ref_rec_contig 1150 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, 1151 int index) 1152 { 1153 if ((rb->rf_records.rl_recs[index].r_refcount == 1154 rb->rf_records.rl_recs[index + 1].r_refcount) && 1155 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + 1156 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == 1157 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) 1158 return REF_CONTIG_RIGHT; 1159 1160 return REF_CONTIG_NONE; 1161 } 1162 1163 static enum ocfs2_ref_rec_contig 1164 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, 1165 int index) 1166 { 1167 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; 1168 1169 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) 1170 ret = ocfs2_refcount_rec_adjacent(rb, index); 1171 1172 if (index > 0) { 1173 enum ocfs2_ref_rec_contig tmp; 1174 1175 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); 1176 1177 if (tmp == REF_CONTIG_RIGHT) { 1178 if (ret == REF_CONTIG_RIGHT) 1179 ret = REF_CONTIG_LEFTRIGHT; 1180 else 1181 ret = REF_CONTIG_LEFT; 1182 } 1183 } 1184 1185 return ret; 1186 } 1187 1188 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, 1189 int index) 1190 { 1191 BUG_ON(rb->rf_records.rl_recs[index].r_refcount != 1192 rb->rf_records.rl_recs[index+1].r_refcount); 1193 1194 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, 1195 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); 1196 1197 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) 1198 memmove(&rb->rf_records.rl_recs[index + 1], 1199 &rb->rf_records.rl_recs[index + 2], 1200 sizeof(struct ocfs2_refcount_rec) * 1201 (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); 1202 1203 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], 1204 0, sizeof(struct ocfs2_refcount_rec)); 1205 le16_add_cpu(&rb->rf_records.rl_used, -1); 1206 } 1207 1208 /* 1209 * Merge the refcount rec if we are contiguous with the adjacent recs. 1210 */ 1211 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, 1212 int index) 1213 { 1214 enum ocfs2_ref_rec_contig contig = 1215 ocfs2_refcount_rec_contig(rb, index); 1216 1217 if (contig == REF_CONTIG_NONE) 1218 return; 1219 1220 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { 1221 BUG_ON(index == 0); 1222 index--; 1223 } 1224 1225 ocfs2_rotate_refcount_rec_left(rb, index); 1226 1227 if (contig == REF_CONTIG_LEFTRIGHT) 1228 ocfs2_rotate_refcount_rec_left(rb, index); 1229 } 1230 1231 /* 1232 * Change the refcount indexed by "index" in ref_bh. 1233 * If refcount reaches 0, remove it. 1234 */ 1235 static int ocfs2_change_refcount_rec(handle_t *handle, 1236 struct ocfs2_caching_info *ci, 1237 struct buffer_head *ref_leaf_bh, 1238 int index, int merge, int change) 1239 { 1240 int ret; 1241 struct ocfs2_refcount_block *rb = 1242 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1243 struct ocfs2_refcount_list *rl = &rb->rf_records; 1244 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; 1245 1246 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1247 OCFS2_JOURNAL_ACCESS_WRITE); 1248 if (ret) { 1249 mlog_errno(ret); 1250 goto out; 1251 } 1252 1253 trace_ocfs2_change_refcount_rec( 1254 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1255 index, le32_to_cpu(rec->r_refcount), change); 1256 le32_add_cpu(&rec->r_refcount, change); 1257 1258 if (!rec->r_refcount) { 1259 if (index != le16_to_cpu(rl->rl_used) - 1) { 1260 memmove(rec, rec + 1, 1261 (le16_to_cpu(rl->rl_used) - index - 1) * 1262 sizeof(struct ocfs2_refcount_rec)); 1263 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], 1264 0, sizeof(struct ocfs2_refcount_rec)); 1265 } 1266 1267 le16_add_cpu(&rl->rl_used, -1); 1268 } else if (merge) 1269 ocfs2_refcount_rec_merge(rb, index); 1270 1271 ocfs2_journal_dirty(handle, ref_leaf_bh); 1272 out: 1273 return ret; 1274 } 1275 1276 static int ocfs2_expand_inline_ref_root(handle_t *handle, 1277 struct ocfs2_caching_info *ci, 1278 struct buffer_head *ref_root_bh, 1279 struct buffer_head **ref_leaf_bh, 1280 struct ocfs2_alloc_context *meta_ac) 1281 { 1282 int ret; 1283 u16 suballoc_bit_start; 1284 u32 num_got; 1285 u64 suballoc_loc, blkno; 1286 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1287 struct buffer_head *new_bh = NULL; 1288 struct ocfs2_refcount_block *new_rb; 1289 struct ocfs2_refcount_block *root_rb = 1290 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1291 1292 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1293 OCFS2_JOURNAL_ACCESS_WRITE); 1294 if (ret) { 1295 mlog_errno(ret); 1296 goto out; 1297 } 1298 1299 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1300 &suballoc_bit_start, &num_got, 1301 &blkno); 1302 if (ret) { 1303 mlog_errno(ret); 1304 goto out; 1305 } 1306 1307 new_bh = sb_getblk(sb, blkno); 1308 if (new_bh == NULL) { 1309 ret = -ENOMEM; 1310 mlog_errno(ret); 1311 goto out; 1312 } 1313 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1314 1315 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1316 OCFS2_JOURNAL_ACCESS_CREATE); 1317 if (ret) { 1318 mlog_errno(ret); 1319 goto out; 1320 } 1321 1322 /* 1323 * Initialize ocfs2_refcount_block. 1324 * It should contain the same information as the old root. 1325 * so just memcpy it and change the corresponding field. 1326 */ 1327 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); 1328 1329 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1330 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1331 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1332 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1333 new_rb->rf_blkno = cpu_to_le64(blkno); 1334 new_rb->rf_cpos = cpu_to_le32(0); 1335 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1336 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1337 ocfs2_journal_dirty(handle, new_bh); 1338 1339 /* Now change the root. */ 1340 memset(&root_rb->rf_list, 0, sb->s_blocksize - 1341 offsetof(struct ocfs2_refcount_block, rf_list)); 1342 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); 1343 root_rb->rf_clusters = cpu_to_le32(1); 1344 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); 1345 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); 1346 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); 1347 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); 1348 1349 ocfs2_journal_dirty(handle, ref_root_bh); 1350 1351 trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, 1352 le16_to_cpu(new_rb->rf_records.rl_used)); 1353 1354 *ref_leaf_bh = new_bh; 1355 new_bh = NULL; 1356 out: 1357 brelse(new_bh); 1358 return ret; 1359 } 1360 1361 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, 1362 struct ocfs2_refcount_rec *next) 1363 { 1364 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= 1365 ocfs2_get_ref_rec_low_cpos(next)) 1366 return 1; 1367 1368 return 0; 1369 } 1370 1371 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) 1372 { 1373 const struct ocfs2_refcount_rec *l = a, *r = b; 1374 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); 1375 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); 1376 1377 if (l_cpos > r_cpos) 1378 return 1; 1379 if (l_cpos < r_cpos) 1380 return -1; 1381 return 0; 1382 } 1383 1384 static int cmp_refcount_rec_by_cpos(const void *a, const void *b) 1385 { 1386 const struct ocfs2_refcount_rec *l = a, *r = b; 1387 u64 l_cpos = le64_to_cpu(l->r_cpos); 1388 u64 r_cpos = le64_to_cpu(r->r_cpos); 1389 1390 if (l_cpos > r_cpos) 1391 return 1; 1392 if (l_cpos < r_cpos) 1393 return -1; 1394 return 0; 1395 } 1396 1397 /* 1398 * The refcount cpos are ordered by their 64bit cpos, 1399 * But we will use the low 32 bit to be the e_cpos in the b-tree. 1400 * So we need to make sure that this pos isn't intersected with others. 1401 * 1402 * Note: The refcount block is already sorted by their low 32 bit cpos, 1403 * So just try the middle pos first, and we will exit when we find 1404 * the good position. 1405 */ 1406 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, 1407 u32 *split_pos, int *split_index) 1408 { 1409 int num_used = le16_to_cpu(rl->rl_used); 1410 int delta, middle = num_used / 2; 1411 1412 for (delta = 0; delta < middle; delta++) { 1413 /* Let's check delta earlier than middle */ 1414 if (ocfs2_refcount_rec_no_intersect( 1415 &rl->rl_recs[middle - delta - 1], 1416 &rl->rl_recs[middle - delta])) { 1417 *split_index = middle - delta; 1418 break; 1419 } 1420 1421 /* For even counts, don't walk off the end */ 1422 if ((middle + delta + 1) == num_used) 1423 continue; 1424 1425 /* Now try delta past middle */ 1426 if (ocfs2_refcount_rec_no_intersect( 1427 &rl->rl_recs[middle + delta], 1428 &rl->rl_recs[middle + delta + 1])) { 1429 *split_index = middle + delta + 1; 1430 break; 1431 } 1432 } 1433 1434 if (delta >= middle) 1435 return -ENOSPC; 1436 1437 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); 1438 return 0; 1439 } 1440 1441 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, 1442 struct buffer_head *new_bh, 1443 u32 *split_cpos) 1444 { 1445 int split_index = 0, num_moved, ret; 1446 u32 cpos = 0; 1447 struct ocfs2_refcount_block *rb = 1448 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1449 struct ocfs2_refcount_list *rl = &rb->rf_records; 1450 struct ocfs2_refcount_block *new_rb = 1451 (struct ocfs2_refcount_block *)new_bh->b_data; 1452 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1453 1454 trace_ocfs2_divide_leaf_refcount_block( 1455 (unsigned long long)ref_leaf_bh->b_blocknr, 1456 le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); 1457 1458 /* 1459 * XXX: Improvement later. 1460 * If we know all the high 32 bit cpos is the same, no need to sort. 1461 * 1462 * In order to make the whole process safe, we do: 1463 * 1. sort the entries by their low 32 bit cpos first so that we can 1464 * find the split cpos easily. 1465 * 2. call ocfs2_insert_extent to insert the new refcount block. 1466 * 3. move the refcount rec to the new block. 1467 * 4. sort the entries by their 64 bit cpos. 1468 * 5. dirty the new_rb and rb. 1469 */ 1470 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1471 sizeof(struct ocfs2_refcount_rec), 1472 cmp_refcount_rec_by_low_cpos, NULL); 1473 1474 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); 1475 if (ret) { 1476 mlog_errno(ret); 1477 return ret; 1478 } 1479 1480 new_rb->rf_cpos = cpu_to_le32(cpos); 1481 1482 /* move refcount records starting from split_index to the new block. */ 1483 num_moved = le16_to_cpu(rl->rl_used) - split_index; 1484 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], 1485 num_moved * sizeof(struct ocfs2_refcount_rec)); 1486 1487 /*ok, remove the entries we just moved over to the other block. */ 1488 memset(&rl->rl_recs[split_index], 0, 1489 num_moved * sizeof(struct ocfs2_refcount_rec)); 1490 1491 /* change old and new rl_used accordingly. */ 1492 le16_add_cpu(&rl->rl_used, -num_moved); 1493 new_rl->rl_used = cpu_to_le16(num_moved); 1494 1495 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1496 sizeof(struct ocfs2_refcount_rec), 1497 cmp_refcount_rec_by_cpos, NULL); 1498 1499 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), 1500 sizeof(struct ocfs2_refcount_rec), 1501 cmp_refcount_rec_by_cpos, NULL); 1502 1503 *split_cpos = cpos; 1504 return 0; 1505 } 1506 1507 static int ocfs2_new_leaf_refcount_block(handle_t *handle, 1508 struct ocfs2_caching_info *ci, 1509 struct buffer_head *ref_root_bh, 1510 struct buffer_head *ref_leaf_bh, 1511 struct ocfs2_alloc_context *meta_ac) 1512 { 1513 int ret; 1514 u16 suballoc_bit_start; 1515 u32 num_got, new_cpos; 1516 u64 suballoc_loc, blkno; 1517 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1518 struct ocfs2_refcount_block *root_rb = 1519 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1520 struct buffer_head *new_bh = NULL; 1521 struct ocfs2_refcount_block *new_rb; 1522 struct ocfs2_extent_tree ref_et; 1523 1524 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); 1525 1526 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1527 OCFS2_JOURNAL_ACCESS_WRITE); 1528 if (ret) { 1529 mlog_errno(ret); 1530 goto out; 1531 } 1532 1533 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1534 OCFS2_JOURNAL_ACCESS_WRITE); 1535 if (ret) { 1536 mlog_errno(ret); 1537 goto out; 1538 } 1539 1540 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1541 &suballoc_bit_start, &num_got, 1542 &blkno); 1543 if (ret) { 1544 mlog_errno(ret); 1545 goto out; 1546 } 1547 1548 new_bh = sb_getblk(sb, blkno); 1549 if (new_bh == NULL) { 1550 ret = -ENOMEM; 1551 mlog_errno(ret); 1552 goto out; 1553 } 1554 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1555 1556 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1557 OCFS2_JOURNAL_ACCESS_CREATE); 1558 if (ret) { 1559 mlog_errno(ret); 1560 goto out; 1561 } 1562 1563 /* Initialize ocfs2_refcount_block. */ 1564 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1565 memset(new_rb, 0, sb->s_blocksize); 1566 strscpy(new_rb->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 1567 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1568 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1569 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1570 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); 1571 new_rb->rf_blkno = cpu_to_le64(blkno); 1572 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1573 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1574 new_rb->rf_records.rl_count = 1575 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 1576 new_rb->rf_generation = root_rb->rf_generation; 1577 1578 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); 1579 if (ret) { 1580 mlog_errno(ret); 1581 goto out; 1582 } 1583 1584 ocfs2_journal_dirty(handle, ref_leaf_bh); 1585 ocfs2_journal_dirty(handle, new_bh); 1586 1587 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1588 1589 trace_ocfs2_new_leaf_refcount_block( 1590 (unsigned long long)new_bh->b_blocknr, new_cpos); 1591 1592 /* Insert the new leaf block with the specific offset cpos. */ 1593 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1594 1, 0, meta_ac); 1595 if (ret) 1596 mlog_errno(ret); 1597 1598 out: 1599 brelse(new_bh); 1600 return ret; 1601 } 1602 1603 static int ocfs2_expand_refcount_tree(handle_t *handle, 1604 struct ocfs2_caching_info *ci, 1605 struct buffer_head *ref_root_bh, 1606 struct buffer_head *ref_leaf_bh, 1607 struct ocfs2_alloc_context *meta_ac) 1608 { 1609 int ret; 1610 struct buffer_head *expand_bh = NULL; 1611 1612 if (ref_root_bh == ref_leaf_bh) { 1613 /* 1614 * the old root bh hasn't been expanded to a b-tree, 1615 * so expand it first. 1616 */ 1617 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, 1618 &expand_bh, meta_ac); 1619 if (ret) { 1620 mlog_errno(ret); 1621 goto out; 1622 } 1623 } else { 1624 expand_bh = ref_leaf_bh; 1625 get_bh(expand_bh); 1626 } 1627 1628 1629 /* Now add a new refcount block into the tree.*/ 1630 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, 1631 expand_bh, meta_ac); 1632 if (ret) 1633 mlog_errno(ret); 1634 out: 1635 brelse(expand_bh); 1636 return ret; 1637 } 1638 1639 /* 1640 * Adjust the extent rec in b-tree representing ref_leaf_bh. 1641 * 1642 * Only called when we have inserted a new refcount rec at index 0 1643 * which means ocfs2_extent_rec.e_cpos may need some change. 1644 */ 1645 static int ocfs2_adjust_refcount_rec(handle_t *handle, 1646 struct ocfs2_caching_info *ci, 1647 struct buffer_head *ref_root_bh, 1648 struct buffer_head *ref_leaf_bh, 1649 struct ocfs2_refcount_rec *rec) 1650 { 1651 int ret = 0, i; 1652 u32 new_cpos, old_cpos; 1653 struct ocfs2_path *path = NULL; 1654 struct ocfs2_extent_tree et; 1655 struct ocfs2_refcount_block *rb = 1656 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1657 struct ocfs2_extent_list *el; 1658 1659 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) 1660 goto out; 1661 1662 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1663 old_cpos = le32_to_cpu(rb->rf_cpos); 1664 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; 1665 if (old_cpos <= new_cpos) 1666 goto out; 1667 1668 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1669 1670 path = ocfs2_new_path_from_et(&et); 1671 if (!path) { 1672 ret = -ENOMEM; 1673 mlog_errno(ret); 1674 goto out; 1675 } 1676 1677 ret = ocfs2_find_path(ci, path, old_cpos); 1678 if (ret) { 1679 mlog_errno(ret); 1680 goto out; 1681 } 1682 1683 /* 1684 * 2 more credits, one for the leaf refcount block, one for 1685 * the extent block contains the extent rec. 1686 */ 1687 ret = ocfs2_extend_trans(handle, 2); 1688 if (ret < 0) { 1689 mlog_errno(ret); 1690 goto out; 1691 } 1692 1693 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1694 OCFS2_JOURNAL_ACCESS_WRITE); 1695 if (ret < 0) { 1696 mlog_errno(ret); 1697 goto out; 1698 } 1699 1700 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), 1701 OCFS2_JOURNAL_ACCESS_WRITE); 1702 if (ret < 0) { 1703 mlog_errno(ret); 1704 goto out; 1705 } 1706 1707 /* change the leaf extent block first. */ 1708 el = path_leaf_el(path); 1709 1710 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) 1711 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) 1712 break; 1713 1714 BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); 1715 1716 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); 1717 1718 /* change the r_cpos in the leaf block. */ 1719 rb->rf_cpos = cpu_to_le32(new_cpos); 1720 1721 ocfs2_journal_dirty(handle, path_leaf_bh(path)); 1722 ocfs2_journal_dirty(handle, ref_leaf_bh); 1723 1724 out: 1725 ocfs2_free_path(path); 1726 return ret; 1727 } 1728 1729 static int ocfs2_insert_refcount_rec(handle_t *handle, 1730 struct ocfs2_caching_info *ci, 1731 struct buffer_head *ref_root_bh, 1732 struct buffer_head *ref_leaf_bh, 1733 struct ocfs2_refcount_rec *rec, 1734 int index, int merge, 1735 struct ocfs2_alloc_context *meta_ac) 1736 { 1737 int ret; 1738 struct ocfs2_refcount_block *rb = 1739 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1740 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1741 struct buffer_head *new_bh = NULL; 1742 1743 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1744 1745 if (rf_list->rl_used == rf_list->rl_count) { 1746 u64 cpos = le64_to_cpu(rec->r_cpos); 1747 u32 len = le32_to_cpu(rec->r_clusters); 1748 1749 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1750 ref_leaf_bh, meta_ac); 1751 if (ret) { 1752 mlog_errno(ret); 1753 goto out; 1754 } 1755 1756 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1757 cpos, len, NULL, &index, 1758 &new_bh); 1759 if (ret) { 1760 mlog_errno(ret); 1761 goto out; 1762 } 1763 1764 ref_leaf_bh = new_bh; 1765 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1766 rf_list = &rb->rf_records; 1767 } 1768 1769 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1770 OCFS2_JOURNAL_ACCESS_WRITE); 1771 if (ret) { 1772 mlog_errno(ret); 1773 goto out; 1774 } 1775 1776 if (index < le16_to_cpu(rf_list->rl_used)) 1777 memmove(&rf_list->rl_recs[index + 1], 1778 &rf_list->rl_recs[index], 1779 (le16_to_cpu(rf_list->rl_used) - index) * 1780 sizeof(struct ocfs2_refcount_rec)); 1781 1782 trace_ocfs2_insert_refcount_rec( 1783 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1784 (unsigned long long)le64_to_cpu(rec->r_cpos), 1785 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); 1786 1787 rf_list->rl_recs[index] = *rec; 1788 1789 le16_add_cpu(&rf_list->rl_used, 1); 1790 1791 if (merge) 1792 ocfs2_refcount_rec_merge(rb, index); 1793 1794 ocfs2_journal_dirty(handle, ref_leaf_bh); 1795 1796 if (index == 0) { 1797 ret = ocfs2_adjust_refcount_rec(handle, ci, 1798 ref_root_bh, 1799 ref_leaf_bh, rec); 1800 if (ret) 1801 mlog_errno(ret); 1802 } 1803 out: 1804 brelse(new_bh); 1805 return ret; 1806 } 1807 1808 /* 1809 * Split the refcount_rec indexed by "index" in ref_leaf_bh. 1810 * This is much simple than our b-tree code. 1811 * split_rec is the new refcount rec we want to insert. 1812 * If split_rec->r_refcount > 0, we are changing the refcount(in case we 1813 * increase refcount or decrease a refcount to non-zero). 1814 * If split_rec->r_refcount == 0, we are punching a hole in current refcount 1815 * rec( in case we decrease a refcount to zero). 1816 */ 1817 static int ocfs2_split_refcount_rec(handle_t *handle, 1818 struct ocfs2_caching_info *ci, 1819 struct buffer_head *ref_root_bh, 1820 struct buffer_head *ref_leaf_bh, 1821 struct ocfs2_refcount_rec *split_rec, 1822 int index, int merge, 1823 struct ocfs2_alloc_context *meta_ac, 1824 struct ocfs2_cached_dealloc_ctxt *dealloc) 1825 { 1826 int ret, recs_need; 1827 u32 len; 1828 struct ocfs2_refcount_block *rb = 1829 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1830 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1831 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; 1832 struct ocfs2_refcount_rec *tail_rec = NULL; 1833 struct buffer_head *new_bh = NULL; 1834 1835 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1836 1837 trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), 1838 le32_to_cpu(orig_rec->r_clusters), 1839 le32_to_cpu(orig_rec->r_refcount), 1840 le64_to_cpu(split_rec->r_cpos), 1841 le32_to_cpu(split_rec->r_clusters), 1842 le32_to_cpu(split_rec->r_refcount)); 1843 1844 /* 1845 * If we just need to split the header or tail clusters, 1846 * no more recs are needed, just split is OK. 1847 * Otherwise we at least need one new recs. 1848 */ 1849 if (!split_rec->r_refcount && 1850 (split_rec->r_cpos == orig_rec->r_cpos || 1851 le64_to_cpu(split_rec->r_cpos) + 1852 le32_to_cpu(split_rec->r_clusters) == 1853 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1854 recs_need = 0; 1855 else 1856 recs_need = 1; 1857 1858 /* 1859 * We need one more rec if we split in the middle and the new rec have 1860 * some refcount in it. 1861 */ 1862 if (split_rec->r_refcount && 1863 (split_rec->r_cpos != orig_rec->r_cpos && 1864 le64_to_cpu(split_rec->r_cpos) + 1865 le32_to_cpu(split_rec->r_clusters) != 1866 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1867 recs_need++; 1868 1869 /* If the leaf block don't have enough record, expand it. */ 1870 if (le16_to_cpu(rf_list->rl_used) + recs_need > 1871 le16_to_cpu(rf_list->rl_count)) { 1872 struct ocfs2_refcount_rec tmp_rec; 1873 u64 cpos = le64_to_cpu(orig_rec->r_cpos); 1874 len = le32_to_cpu(orig_rec->r_clusters); 1875 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1876 ref_leaf_bh, meta_ac); 1877 if (ret) { 1878 mlog_errno(ret); 1879 goto out; 1880 } 1881 1882 /* 1883 * We have to re-get it since now cpos may be moved to 1884 * another leaf block. 1885 */ 1886 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1887 cpos, len, &tmp_rec, &index, 1888 &new_bh); 1889 if (ret) { 1890 mlog_errno(ret); 1891 goto out; 1892 } 1893 1894 ref_leaf_bh = new_bh; 1895 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1896 rf_list = &rb->rf_records; 1897 orig_rec = &rf_list->rl_recs[index]; 1898 } 1899 1900 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1901 OCFS2_JOURNAL_ACCESS_WRITE); 1902 if (ret) { 1903 mlog_errno(ret); 1904 goto out; 1905 } 1906 1907 /* 1908 * We have calculated out how many new records we need and store 1909 * in recs_need, so spare enough space first by moving the records 1910 * after "index" to the end. 1911 */ 1912 if (index != le16_to_cpu(rf_list->rl_used) - 1) 1913 memmove(&rf_list->rl_recs[index + 1 + recs_need], 1914 &rf_list->rl_recs[index + 1], 1915 (le16_to_cpu(rf_list->rl_used) - index - 1) * 1916 sizeof(struct ocfs2_refcount_rec)); 1917 1918 len = (le64_to_cpu(orig_rec->r_cpos) + 1919 le32_to_cpu(orig_rec->r_clusters)) - 1920 (le64_to_cpu(split_rec->r_cpos) + 1921 le32_to_cpu(split_rec->r_clusters)); 1922 1923 /* 1924 * If we have "len", the we will split in the tail and move it 1925 * to the end of the space we have just spared. 1926 */ 1927 if (len) { 1928 tail_rec = &rf_list->rl_recs[index + recs_need]; 1929 1930 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); 1931 le64_add_cpu(&tail_rec->r_cpos, 1932 le32_to_cpu(tail_rec->r_clusters) - len); 1933 tail_rec->r_clusters = cpu_to_le32(len); 1934 } 1935 1936 /* 1937 * If the split pos isn't the same as the original one, we need to 1938 * split in the head. 1939 * 1940 * Note: We have the chance that split_rec.r_refcount = 0, 1941 * recs_need = 0 and len > 0, which means we just cut the head from 1942 * the orig_rec and in that case we have done some modification in 1943 * orig_rec above, so the check for r_cpos is faked. 1944 */ 1945 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { 1946 len = le64_to_cpu(split_rec->r_cpos) - 1947 le64_to_cpu(orig_rec->r_cpos); 1948 orig_rec->r_clusters = cpu_to_le32(len); 1949 index++; 1950 } 1951 1952 le16_add_cpu(&rf_list->rl_used, recs_need); 1953 1954 if (split_rec->r_refcount) { 1955 rf_list->rl_recs[index] = *split_rec; 1956 trace_ocfs2_split_refcount_rec_insert( 1957 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1958 (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1959 le32_to_cpu(split_rec->r_clusters), 1960 le32_to_cpu(split_rec->r_refcount)); 1961 1962 if (merge) 1963 ocfs2_refcount_rec_merge(rb, index); 1964 } 1965 1966 ocfs2_journal_dirty(handle, ref_leaf_bh); 1967 1968 out: 1969 brelse(new_bh); 1970 return ret; 1971 } 1972 1973 static int __ocfs2_increase_refcount(handle_t *handle, 1974 struct ocfs2_caching_info *ci, 1975 struct buffer_head *ref_root_bh, 1976 u64 cpos, u32 len, int merge, 1977 struct ocfs2_alloc_context *meta_ac, 1978 struct ocfs2_cached_dealloc_ctxt *dealloc) 1979 { 1980 int ret = 0, index; 1981 struct buffer_head *ref_leaf_bh = NULL; 1982 struct ocfs2_refcount_rec rec; 1983 unsigned int set_len = 0; 1984 1985 trace_ocfs2_increase_refcount_begin( 1986 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1987 (unsigned long long)cpos, len); 1988 1989 while (len) { 1990 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1991 cpos, len, &rec, &index, 1992 &ref_leaf_bh); 1993 if (ret) { 1994 mlog_errno(ret); 1995 goto out; 1996 } 1997 1998 set_len = le32_to_cpu(rec.r_clusters); 1999 2000 /* 2001 * Here we may meet with 3 situations: 2002 * 2003 * 1. If we find an already existing record, and the length 2004 * is the same, cool, we just need to increase the r_refcount 2005 * and it is OK. 2006 * 2. If we find a hole, just insert it with r_refcount = 1. 2007 * 3. If we are in the middle of one extent record, split 2008 * it. 2009 */ 2010 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2011 set_len <= len) { 2012 trace_ocfs2_increase_refcount_change( 2013 (unsigned long long)cpos, set_len, 2014 le32_to_cpu(rec.r_refcount)); 2015 ret = ocfs2_change_refcount_rec(handle, ci, 2016 ref_leaf_bh, index, 2017 merge, 1); 2018 if (ret) { 2019 mlog_errno(ret); 2020 goto out; 2021 } 2022 } else if (!rec.r_refcount) { 2023 rec.r_refcount = cpu_to_le32(1); 2024 2025 trace_ocfs2_increase_refcount_insert( 2026 (unsigned long long)le64_to_cpu(rec.r_cpos), 2027 set_len); 2028 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, 2029 ref_leaf_bh, 2030 &rec, index, 2031 merge, meta_ac); 2032 if (ret) { 2033 mlog_errno(ret); 2034 goto out; 2035 } 2036 } else { 2037 set_len = min((u64)(cpos + len), 2038 le64_to_cpu(rec.r_cpos) + set_len) - cpos; 2039 rec.r_cpos = cpu_to_le64(cpos); 2040 rec.r_clusters = cpu_to_le32(set_len); 2041 le32_add_cpu(&rec.r_refcount, 1); 2042 2043 trace_ocfs2_increase_refcount_split( 2044 (unsigned long long)le64_to_cpu(rec.r_cpos), 2045 set_len, le32_to_cpu(rec.r_refcount)); 2046 ret = ocfs2_split_refcount_rec(handle, ci, 2047 ref_root_bh, ref_leaf_bh, 2048 &rec, index, merge, 2049 meta_ac, dealloc); 2050 if (ret) { 2051 mlog_errno(ret); 2052 goto out; 2053 } 2054 } 2055 2056 cpos += set_len; 2057 len -= set_len; 2058 brelse(ref_leaf_bh); 2059 ref_leaf_bh = NULL; 2060 } 2061 2062 out: 2063 brelse(ref_leaf_bh); 2064 return ret; 2065 } 2066 2067 static int ocfs2_remove_refcount_extent(handle_t *handle, 2068 struct ocfs2_caching_info *ci, 2069 struct buffer_head *ref_root_bh, 2070 struct buffer_head *ref_leaf_bh, 2071 struct ocfs2_alloc_context *meta_ac, 2072 struct ocfs2_cached_dealloc_ctxt *dealloc) 2073 { 2074 int ret; 2075 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2076 struct ocfs2_refcount_block *rb = 2077 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2078 struct ocfs2_extent_tree et; 2079 2080 BUG_ON(rb->rf_records.rl_used); 2081 2082 trace_ocfs2_remove_refcount_extent( 2083 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2084 (unsigned long long)ref_leaf_bh->b_blocknr, 2085 le32_to_cpu(rb->rf_cpos)); 2086 2087 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2088 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 2089 1, meta_ac, dealloc); 2090 if (ret) { 2091 mlog_errno(ret); 2092 goto out; 2093 } 2094 2095 ocfs2_remove_from_cache(ci, ref_leaf_bh); 2096 2097 /* 2098 * add the freed block to the dealloc so that it will be freed 2099 * when we run dealloc. 2100 */ 2101 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, 2102 le16_to_cpu(rb->rf_suballoc_slot), 2103 le64_to_cpu(rb->rf_suballoc_loc), 2104 le64_to_cpu(rb->rf_blkno), 2105 le16_to_cpu(rb->rf_suballoc_bit)); 2106 if (ret) { 2107 mlog_errno(ret); 2108 goto out; 2109 } 2110 2111 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 2112 OCFS2_JOURNAL_ACCESS_WRITE); 2113 if (ret) { 2114 mlog_errno(ret); 2115 goto out; 2116 } 2117 2118 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2119 2120 le32_add_cpu(&rb->rf_clusters, -1); 2121 2122 /* 2123 * check whether we need to restore the root refcount block if 2124 * there is no leaf extent block at atll. 2125 */ 2126 if (!rb->rf_list.l_next_free_rec) { 2127 BUG_ON(rb->rf_clusters); 2128 2129 trace_ocfs2_restore_refcount_block( 2130 (unsigned long long)ref_root_bh->b_blocknr); 2131 2132 rb->rf_flags = 0; 2133 rb->rf_parent = 0; 2134 rb->rf_cpos = 0; 2135 memset(&rb->rf_records, 0, sb->s_blocksize - 2136 offsetof(struct ocfs2_refcount_block, rf_records)); 2137 rb->rf_records.rl_count = 2138 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 2139 } 2140 2141 ocfs2_journal_dirty(handle, ref_root_bh); 2142 2143 out: 2144 return ret; 2145 } 2146 2147 int ocfs2_increase_refcount(handle_t *handle, 2148 struct ocfs2_caching_info *ci, 2149 struct buffer_head *ref_root_bh, 2150 u64 cpos, u32 len, 2151 struct ocfs2_alloc_context *meta_ac, 2152 struct ocfs2_cached_dealloc_ctxt *dealloc) 2153 { 2154 return __ocfs2_increase_refcount(handle, ci, ref_root_bh, 2155 cpos, len, 1, 2156 meta_ac, dealloc); 2157 } 2158 2159 static int ocfs2_decrease_refcount_rec(handle_t *handle, 2160 struct ocfs2_caching_info *ci, 2161 struct buffer_head *ref_root_bh, 2162 struct buffer_head *ref_leaf_bh, 2163 int index, u64 cpos, unsigned int len, 2164 struct ocfs2_alloc_context *meta_ac, 2165 struct ocfs2_cached_dealloc_ctxt *dealloc) 2166 { 2167 int ret; 2168 struct ocfs2_refcount_block *rb = 2169 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2170 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; 2171 2172 BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); 2173 BUG_ON(cpos + len > 2174 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2175 2176 trace_ocfs2_decrease_refcount_rec( 2177 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2178 (unsigned long long)cpos, len); 2179 2180 if (cpos == le64_to_cpu(rec->r_cpos) && 2181 len == le32_to_cpu(rec->r_clusters)) 2182 ret = ocfs2_change_refcount_rec(handle, ci, 2183 ref_leaf_bh, index, 1, -1); 2184 else { 2185 struct ocfs2_refcount_rec split = *rec; 2186 split.r_cpos = cpu_to_le64(cpos); 2187 split.r_clusters = cpu_to_le32(len); 2188 2189 le32_add_cpu(&split.r_refcount, -1); 2190 2191 ret = ocfs2_split_refcount_rec(handle, ci, 2192 ref_root_bh, ref_leaf_bh, 2193 &split, index, 1, 2194 meta_ac, dealloc); 2195 } 2196 2197 if (ret) { 2198 mlog_errno(ret); 2199 goto out; 2200 } 2201 2202 /* Remove the leaf refcount block if it contains no refcount record. */ 2203 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { 2204 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, 2205 ref_leaf_bh, meta_ac, 2206 dealloc); 2207 if (ret) 2208 mlog_errno(ret); 2209 } 2210 2211 out: 2212 return ret; 2213 } 2214 2215 static int __ocfs2_decrease_refcount(handle_t *handle, 2216 struct ocfs2_caching_info *ci, 2217 struct buffer_head *ref_root_bh, 2218 u64 cpos, u32 len, 2219 struct ocfs2_alloc_context *meta_ac, 2220 struct ocfs2_cached_dealloc_ctxt *dealloc, 2221 int delete) 2222 { 2223 int ret = 0, index = 0; 2224 struct ocfs2_refcount_rec rec; 2225 unsigned int r_count = 0, r_len; 2226 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2227 struct buffer_head *ref_leaf_bh = NULL; 2228 2229 trace_ocfs2_decrease_refcount( 2230 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2231 (unsigned long long)cpos, len, delete); 2232 2233 while (len) { 2234 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2235 cpos, len, &rec, &index, 2236 &ref_leaf_bh); 2237 if (ret) { 2238 mlog_errno(ret); 2239 goto out; 2240 } 2241 2242 r_count = le32_to_cpu(rec.r_refcount); 2243 BUG_ON(r_count == 0); 2244 if (!delete) 2245 BUG_ON(r_count > 1); 2246 2247 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + 2248 le32_to_cpu(rec.r_clusters)) - cpos; 2249 2250 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, 2251 ref_leaf_bh, index, 2252 cpos, r_len, 2253 meta_ac, dealloc); 2254 if (ret) { 2255 mlog_errno(ret); 2256 goto out; 2257 } 2258 2259 if (le32_to_cpu(rec.r_refcount) == 1 && delete) { 2260 ret = ocfs2_cache_cluster_dealloc(dealloc, 2261 ocfs2_clusters_to_blocks(sb, cpos), 2262 r_len); 2263 if (ret) { 2264 mlog_errno(ret); 2265 goto out; 2266 } 2267 } 2268 2269 cpos += r_len; 2270 len -= r_len; 2271 brelse(ref_leaf_bh); 2272 ref_leaf_bh = NULL; 2273 } 2274 2275 out: 2276 brelse(ref_leaf_bh); 2277 return ret; 2278 } 2279 2280 /* Caller must hold refcount tree lock. */ 2281 int ocfs2_decrease_refcount(struct inode *inode, 2282 handle_t *handle, u32 cpos, u32 len, 2283 struct ocfs2_alloc_context *meta_ac, 2284 struct ocfs2_cached_dealloc_ctxt *dealloc, 2285 int delete) 2286 { 2287 int ret; 2288 u64 ref_blkno; 2289 struct buffer_head *ref_root_bh = NULL; 2290 struct ocfs2_refcount_tree *tree; 2291 2292 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2293 2294 ret = ocfs2_get_refcount_block(inode, &ref_blkno); 2295 if (ret) { 2296 mlog_errno(ret); 2297 goto out; 2298 } 2299 2300 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); 2301 if (ret) { 2302 mlog_errno(ret); 2303 goto out; 2304 } 2305 2306 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 2307 &ref_root_bh); 2308 if (ret) { 2309 mlog_errno(ret); 2310 goto out; 2311 } 2312 2313 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, 2314 cpos, len, meta_ac, dealloc, delete); 2315 if (ret) 2316 mlog_errno(ret); 2317 out: 2318 brelse(ref_root_bh); 2319 return ret; 2320 } 2321 2322 /* 2323 * Mark the already-existing extent at cpos as refcounted for len clusters. 2324 * This adds the refcount extent flag. 2325 * 2326 * If the existing extent is larger than the request, initiate a 2327 * split. An attempt will be made at merging with adjacent extents. 2328 * 2329 * The caller is responsible for passing down meta_ac if we'll need it. 2330 */ 2331 static int ocfs2_mark_extent_refcounted(struct inode *inode, 2332 struct ocfs2_extent_tree *et, 2333 handle_t *handle, u32 cpos, 2334 u32 len, u32 phys, 2335 struct ocfs2_alloc_context *meta_ac, 2336 struct ocfs2_cached_dealloc_ctxt *dealloc) 2337 { 2338 int ret; 2339 2340 trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, 2341 cpos, len, phys); 2342 2343 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2344 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2345 inode->i_ino); 2346 goto out; 2347 } 2348 2349 ret = ocfs2_change_extent_flag(handle, et, cpos, 2350 len, phys, meta_ac, dealloc, 2351 OCFS2_EXT_REFCOUNTED, 0); 2352 if (ret) 2353 mlog_errno(ret); 2354 2355 out: 2356 return ret; 2357 } 2358 2359 /* 2360 * Given some contiguous physical clusters, calculate what we need 2361 * for modifying their refcount. 2362 */ 2363 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, 2364 struct ocfs2_caching_info *ci, 2365 struct buffer_head *ref_root_bh, 2366 u64 start_cpos, 2367 u32 clusters, 2368 int *meta_add, 2369 int *credits) 2370 { 2371 int ret = 0, index, ref_blocks = 0, recs_add = 0; 2372 u64 cpos = start_cpos; 2373 struct ocfs2_refcount_block *rb; 2374 struct ocfs2_refcount_rec rec; 2375 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2376 u32 len; 2377 2378 while (clusters) { 2379 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2380 cpos, clusters, &rec, 2381 &index, &ref_leaf_bh); 2382 if (ret) { 2383 mlog_errno(ret); 2384 goto out; 2385 } 2386 2387 if (ref_leaf_bh != prev_bh) { 2388 /* 2389 * Now we encounter a new leaf block, so calculate 2390 * whether we need to extend the old leaf. 2391 */ 2392 if (prev_bh) { 2393 rb = (struct ocfs2_refcount_block *) 2394 prev_bh->b_data; 2395 2396 if (le16_to_cpu(rb->rf_records.rl_used) + 2397 recs_add > 2398 le16_to_cpu(rb->rf_records.rl_count)) 2399 ref_blocks++; 2400 } 2401 2402 recs_add = 0; 2403 *credits += 1; 2404 brelse(prev_bh); 2405 prev_bh = ref_leaf_bh; 2406 get_bh(prev_bh); 2407 } 2408 2409 trace_ocfs2_calc_refcount_meta_credits_iterate( 2410 recs_add, (unsigned long long)cpos, clusters, 2411 (unsigned long long)le64_to_cpu(rec.r_cpos), 2412 le32_to_cpu(rec.r_clusters), 2413 le32_to_cpu(rec.r_refcount), index); 2414 2415 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2416 le32_to_cpu(rec.r_clusters)) - cpos; 2417 /* 2418 * We record all the records which will be inserted to the 2419 * same refcount block, so that we can tell exactly whether 2420 * we need a new refcount block or not. 2421 * 2422 * If we will insert a new one, this is easy and only happens 2423 * during adding refcounted flag to the extent, so we don't 2424 * have a chance of splitting. We just need one record. 2425 * 2426 * If the refcount rec already exists, that would be a little 2427 * complicated. we may have to: 2428 * 1) split at the beginning if the start pos isn't aligned. 2429 * we need 1 more record in this case. 2430 * 2) split int the end if the end pos isn't aligned. 2431 * we need 1 more record in this case. 2432 * 3) split in the middle because of file system fragmentation. 2433 * we need 2 more records in this case(we can't detect this 2434 * beforehand, so always think of the worst case). 2435 */ 2436 if (rec.r_refcount) { 2437 recs_add += 2; 2438 /* Check whether we need a split at the beginning. */ 2439 if (cpos == start_cpos && 2440 cpos != le64_to_cpu(rec.r_cpos)) 2441 recs_add++; 2442 2443 /* Check whether we need a split in the end. */ 2444 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + 2445 le32_to_cpu(rec.r_clusters)) 2446 recs_add++; 2447 } else 2448 recs_add++; 2449 2450 brelse(ref_leaf_bh); 2451 ref_leaf_bh = NULL; 2452 clusters -= len; 2453 cpos += len; 2454 } 2455 2456 if (prev_bh) { 2457 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; 2458 2459 if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > 2460 le16_to_cpu(rb->rf_records.rl_count)) 2461 ref_blocks++; 2462 2463 *credits += 1; 2464 } 2465 2466 if (!ref_blocks) 2467 goto out; 2468 2469 *meta_add += ref_blocks; 2470 *credits += ref_blocks; 2471 2472 /* 2473 * So we may need ref_blocks to insert into the tree. 2474 * That also means we need to change the b-tree and add that number 2475 * of records since we never merge them. 2476 * We need one more block for expansion since the new created leaf 2477 * block is also full and needs split. 2478 */ 2479 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2480 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { 2481 struct ocfs2_extent_tree et; 2482 2483 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2484 *meta_add += ocfs2_extend_meta_needed(et.et_root_el); 2485 *credits += ocfs2_calc_extend_credits(sb, 2486 et.et_root_el); 2487 } else { 2488 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 2489 *meta_add += 1; 2490 } 2491 2492 out: 2493 2494 trace_ocfs2_calc_refcount_meta_credits( 2495 (unsigned long long)start_cpos, clusters, 2496 *meta_add, *credits); 2497 brelse(ref_leaf_bh); 2498 brelse(prev_bh); 2499 return ret; 2500 } 2501 2502 /* 2503 * For refcount tree, we will decrease some contiguous clusters 2504 * refcount count, so just go through it to see how many blocks 2505 * we gonna touch and whether we need to create new blocks. 2506 * 2507 * Normally the refcount blocks store these refcount should be 2508 * contiguous also, so that we can get the number easily. 2509 * We will at most add split 2 refcount records and 2 more 2510 * refcount blocks, so just check it in a rough way. 2511 * 2512 * Caller must hold refcount tree lock. 2513 */ 2514 int ocfs2_prepare_refcount_change_for_del(struct inode *inode, 2515 u64 refcount_loc, 2516 u64 phys_blkno, 2517 u32 clusters, 2518 int *credits, 2519 int *ref_blocks) 2520 { 2521 int ret; 2522 struct buffer_head *ref_root_bh = NULL; 2523 struct ocfs2_refcount_tree *tree; 2524 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); 2525 2526 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2527 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2528 inode->i_ino); 2529 goto out; 2530 } 2531 2532 BUG_ON(!ocfs2_is_refcount_inode(inode)); 2533 2534 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), 2535 refcount_loc, &tree); 2536 if (ret) { 2537 mlog_errno(ret); 2538 goto out; 2539 } 2540 2541 ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc, 2542 &ref_root_bh); 2543 if (ret) { 2544 mlog_errno(ret); 2545 goto out; 2546 } 2547 2548 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 2549 &tree->rf_ci, 2550 ref_root_bh, 2551 start_cpos, clusters, 2552 ref_blocks, credits); 2553 if (ret) { 2554 mlog_errno(ret); 2555 goto out; 2556 } 2557 2558 trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); 2559 2560 out: 2561 brelse(ref_root_bh); 2562 return ret; 2563 } 2564 2565 #define MAX_CONTIG_BYTES 1048576 2566 2567 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) 2568 { 2569 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); 2570 } 2571 2572 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) 2573 { 2574 return ~(ocfs2_cow_contig_clusters(sb) - 1); 2575 } 2576 2577 /* 2578 * Given an extent that starts at 'start' and an I/O that starts at 'cpos', 2579 * find an offset (start + (n * contig_clusters)) that is closest to cpos 2580 * while still being less than or equal to it. 2581 * 2582 * The goal is to break the extent at a multiple of contig_clusters. 2583 */ 2584 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, 2585 unsigned int start, 2586 unsigned int cpos) 2587 { 2588 BUG_ON(start > cpos); 2589 2590 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); 2591 } 2592 2593 /* 2594 * Given a cluster count of len, pad it out so that it is a multiple 2595 * of contig_clusters. 2596 */ 2597 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, 2598 unsigned int len) 2599 { 2600 unsigned int padded = 2601 (len + (ocfs2_cow_contig_clusters(sb) - 1)) & 2602 ocfs2_cow_contig_mask(sb); 2603 2604 /* Did we wrap? */ 2605 if (padded < len) 2606 padded = UINT_MAX; 2607 2608 return padded; 2609 } 2610 2611 /* 2612 * Calculate out the start and number of virtual clusters we need to CoW. 2613 * 2614 * cpos is virtual start cluster position we want to do CoW in a 2615 * file and write_len is the cluster length. 2616 * max_cpos is the place where we want to stop CoW intentionally. 2617 * 2618 * Normal we will start CoW from the beginning of extent record containing cpos. 2619 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we 2620 * get good I/O from the resulting extent tree. 2621 */ 2622 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, 2623 struct ocfs2_extent_list *el, 2624 u32 cpos, 2625 u32 write_len, 2626 u32 max_cpos, 2627 u32 *cow_start, 2628 u32 *cow_len) 2629 { 2630 int ret = 0; 2631 int tree_height = le16_to_cpu(el->l_tree_depth), i; 2632 struct buffer_head *eb_bh = NULL; 2633 struct ocfs2_extent_block *eb = NULL; 2634 struct ocfs2_extent_rec *rec; 2635 unsigned int want_clusters, rec_end = 0; 2636 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); 2637 int leaf_clusters; 2638 2639 BUG_ON(cpos + write_len > max_cpos); 2640 2641 if (tree_height > 0) { 2642 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); 2643 if (ret) { 2644 mlog_errno(ret); 2645 goto out; 2646 } 2647 2648 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2649 el = &eb->h_list; 2650 2651 if (el->l_tree_depth) { 2652 ret = ocfs2_error(inode->i_sb, 2653 "Inode %lu has non zero tree depth in leaf block %llu\n", 2654 inode->i_ino, 2655 (unsigned long long)eb_bh->b_blocknr); 2656 goto out; 2657 } 2658 } 2659 2660 *cow_len = 0; 2661 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 2662 rec = &el->l_recs[i]; 2663 2664 if (ocfs2_is_empty_extent(rec)) { 2665 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " 2666 "index %d\n", inode->i_ino, i); 2667 continue; 2668 } 2669 2670 if (le32_to_cpu(rec->e_cpos) + 2671 le16_to_cpu(rec->e_leaf_clusters) <= cpos) 2672 continue; 2673 2674 if (*cow_len == 0) { 2675 /* 2676 * We should find a refcounted record in the 2677 * first pass. 2678 */ 2679 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); 2680 *cow_start = le32_to_cpu(rec->e_cpos); 2681 } 2682 2683 /* 2684 * If we encounter a hole, a non-refcounted record or 2685 * pass the max_cpos, stop the search. 2686 */ 2687 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || 2688 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || 2689 (max_cpos <= le32_to_cpu(rec->e_cpos))) 2690 break; 2691 2692 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); 2693 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; 2694 if (rec_end > max_cpos) { 2695 rec_end = max_cpos; 2696 leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); 2697 } 2698 2699 /* 2700 * How many clusters do we actually need from 2701 * this extent? First we see how many we actually 2702 * need to complete the write. If that's smaller 2703 * than contig_clusters, we try for contig_clusters. 2704 */ 2705 if (!*cow_len) 2706 want_clusters = write_len; 2707 else 2708 want_clusters = (cpos + write_len) - 2709 (*cow_start + *cow_len); 2710 if (want_clusters < contig_clusters) 2711 want_clusters = contig_clusters; 2712 2713 /* 2714 * If the write does not cover the whole extent, we 2715 * need to calculate how we're going to split the extent. 2716 * We try to do it on contig_clusters boundaries. 2717 * 2718 * Any extent smaller than contig_clusters will be 2719 * CoWed in its entirety. 2720 */ 2721 if (leaf_clusters <= contig_clusters) 2722 *cow_len += leaf_clusters; 2723 else if (*cow_len || (*cow_start == cpos)) { 2724 /* 2725 * This extent needs to be CoW'd from its 2726 * beginning, so all we have to do is compute 2727 * how many clusters to grab. We align 2728 * want_clusters to the edge of contig_clusters 2729 * to get better I/O. 2730 */ 2731 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2732 want_clusters); 2733 2734 if (leaf_clusters < want_clusters) 2735 *cow_len += leaf_clusters; 2736 else 2737 *cow_len += want_clusters; 2738 } else if ((*cow_start + contig_clusters) >= 2739 (cpos + write_len)) { 2740 /* 2741 * Breaking off contig_clusters at the front 2742 * of the extent will cover our write. That's 2743 * easy. 2744 */ 2745 *cow_len = contig_clusters; 2746 } else if ((rec_end - cpos) <= contig_clusters) { 2747 /* 2748 * Breaking off contig_clusters at the tail of 2749 * this extent will cover cpos. 2750 */ 2751 *cow_start = rec_end - contig_clusters; 2752 *cow_len = contig_clusters; 2753 } else if ((rec_end - cpos) <= want_clusters) { 2754 /* 2755 * While we can't fit the entire write in this 2756 * extent, we know that the write goes from cpos 2757 * to the end of the extent. Break that off. 2758 * We try to break it at some multiple of 2759 * contig_clusters from the front of the extent. 2760 * Failing that (ie, cpos is within 2761 * contig_clusters of the front), we'll CoW the 2762 * entire extent. 2763 */ 2764 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2765 *cow_start, cpos); 2766 *cow_len = rec_end - *cow_start; 2767 } else { 2768 /* 2769 * Ok, the entire write lives in the middle of 2770 * this extent. Let's try to slice the extent up 2771 * nicely. Optimally, our CoW region starts at 2772 * m*contig_clusters from the beginning of the 2773 * extent and goes for n*contig_clusters, 2774 * covering the entire write. 2775 */ 2776 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2777 *cow_start, cpos); 2778 2779 want_clusters = (cpos + write_len) - *cow_start; 2780 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2781 want_clusters); 2782 if (*cow_start + want_clusters <= rec_end) 2783 *cow_len = want_clusters; 2784 else 2785 *cow_len = rec_end - *cow_start; 2786 } 2787 2788 /* Have we covered our entire write yet? */ 2789 if ((*cow_start + *cow_len) >= (cpos + write_len)) 2790 break; 2791 2792 /* 2793 * If we reach the end of the extent block and don't get enough 2794 * clusters, continue with the next extent block if possible. 2795 */ 2796 if (i + 1 == le16_to_cpu(el->l_next_free_rec) && 2797 eb && eb->h_next_leaf_blk) { 2798 brelse(eb_bh); 2799 eb_bh = NULL; 2800 2801 ret = ocfs2_read_extent_block(INODE_CACHE(inode), 2802 le64_to_cpu(eb->h_next_leaf_blk), 2803 &eb_bh); 2804 if (ret) { 2805 mlog_errno(ret); 2806 goto out; 2807 } 2808 2809 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2810 el = &eb->h_list; 2811 i = -1; 2812 } 2813 } 2814 2815 out: 2816 brelse(eb_bh); 2817 return ret; 2818 } 2819 2820 /* 2821 * Prepare meta_ac, data_ac and calculate credits when we want to add some 2822 * num_clusters in data_tree "et" and change the refcount for the old 2823 * clusters(starting form p_cluster) in the refcount tree. 2824 * 2825 * Note: 2826 * 1. since we may split the old tree, so we at most will need num_clusters + 2 2827 * more new leaf records. 2828 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so 2829 * just give data_ac = NULL. 2830 */ 2831 static int ocfs2_lock_refcount_allocators(struct super_block *sb, 2832 u32 p_cluster, u32 num_clusters, 2833 struct ocfs2_extent_tree *et, 2834 struct ocfs2_caching_info *ref_ci, 2835 struct buffer_head *ref_root_bh, 2836 struct ocfs2_alloc_context **meta_ac, 2837 struct ocfs2_alloc_context **data_ac, 2838 int *credits) 2839 { 2840 int ret = 0, meta_add = 0; 2841 int num_free_extents = ocfs2_num_free_extents(et); 2842 2843 if (num_free_extents < 0) { 2844 ret = num_free_extents; 2845 mlog_errno(ret); 2846 goto out; 2847 } 2848 2849 if (num_free_extents < num_clusters + 2) 2850 meta_add = 2851 ocfs2_extend_meta_needed(et->et_root_el); 2852 2853 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el); 2854 2855 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, 2856 p_cluster, num_clusters, 2857 &meta_add, credits); 2858 if (ret) { 2859 mlog_errno(ret); 2860 goto out; 2861 } 2862 2863 trace_ocfs2_lock_refcount_allocators(meta_add, *credits); 2864 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2865 meta_ac); 2866 if (ret) { 2867 mlog_errno(ret); 2868 goto out; 2869 } 2870 2871 if (data_ac) { 2872 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, 2873 data_ac); 2874 if (ret) 2875 mlog_errno(ret); 2876 } 2877 2878 out: 2879 if (ret) { 2880 if (*meta_ac) { 2881 ocfs2_free_alloc_context(*meta_ac); 2882 *meta_ac = NULL; 2883 } 2884 } 2885 2886 return ret; 2887 } 2888 2889 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) 2890 { 2891 BUG_ON(buffer_dirty(bh)); 2892 2893 clear_buffer_mapped(bh); 2894 2895 return 0; 2896 } 2897 2898 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2899 struct inode *inode, 2900 u32 cpos, u32 old_cluster, 2901 u32 new_cluster, u32 new_len) 2902 { 2903 int ret = 0, partial; 2904 struct super_block *sb = inode->i_sb; 2905 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2906 pgoff_t page_index; 2907 unsigned int from, to; 2908 loff_t offset, end, map_end; 2909 struct address_space *mapping = inode->i_mapping; 2910 2911 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2912 new_cluster, new_len); 2913 2914 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 2915 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); 2916 /* 2917 * We only duplicate pages until we reach the page contains i_size - 1. 2918 * So trim 'end' to i_size. 2919 */ 2920 if (end > i_size_read(inode)) 2921 end = i_size_read(inode); 2922 2923 while (offset < end) { 2924 struct folio *folio; 2925 page_index = offset >> PAGE_SHIFT; 2926 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 2927 if (map_end > end) 2928 map_end = end; 2929 2930 /* from, to is the offset within the page. */ 2931 from = offset & (PAGE_SIZE - 1); 2932 to = PAGE_SIZE; 2933 if (map_end & (PAGE_SIZE - 1)) 2934 to = map_end & (PAGE_SIZE - 1); 2935 2936 retry: 2937 folio = __filemap_get_folio(mapping, page_index, 2938 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); 2939 if (IS_ERR(folio)) { 2940 ret = PTR_ERR(folio); 2941 mlog_errno(ret); 2942 break; 2943 } 2944 2945 /* 2946 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty 2947 * page, so write it back. 2948 */ 2949 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { 2950 if (folio_test_dirty(folio)) { 2951 folio_unlock(folio); 2952 folio_put(folio); 2953 2954 ret = filemap_write_and_wait_range(mapping, 2955 offset, map_end - 1); 2956 goto retry; 2957 } 2958 } 2959 2960 if (!folio_test_uptodate(folio)) { 2961 ret = block_read_full_folio(folio, ocfs2_get_block); 2962 if (ret) { 2963 mlog_errno(ret); 2964 goto unlock; 2965 } 2966 folio_lock(folio); 2967 } 2968 2969 if (folio_buffers(folio)) { 2970 ret = walk_page_buffers(handle, folio_buffers(folio), 2971 from, to, &partial, 2972 ocfs2_clear_cow_buffer); 2973 if (ret) { 2974 mlog_errno(ret); 2975 goto unlock; 2976 } 2977 } 2978 2979 ocfs2_map_and_dirty_folio(inode, handle, from, to, 2980 folio, 0, &new_block); 2981 folio_mark_accessed(folio); 2982 unlock: 2983 folio_unlock(folio); 2984 folio_put(folio); 2985 offset = map_end; 2986 if (ret) 2987 break; 2988 } 2989 2990 return ret; 2991 } 2992 2993 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 2994 struct inode *inode, 2995 u32 cpos, u32 old_cluster, 2996 u32 new_cluster, u32 new_len) 2997 { 2998 int ret = 0; 2999 struct super_block *sb = inode->i_sb; 3000 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3001 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3002 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); 3003 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 3004 struct ocfs2_super *osb = OCFS2_SB(sb); 3005 struct buffer_head *old_bh = NULL; 3006 struct buffer_head *new_bh = NULL; 3007 3008 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 3009 new_cluster, new_len); 3010 3011 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3012 new_bh = sb_getblk(osb->sb, new_block); 3013 if (new_bh == NULL) { 3014 ret = -ENOMEM; 3015 mlog_errno(ret); 3016 break; 3017 } 3018 3019 ocfs2_set_new_buffer_uptodate(ci, new_bh); 3020 3021 ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); 3022 if (ret) { 3023 mlog_errno(ret); 3024 break; 3025 } 3026 3027 ret = ocfs2_journal_access(handle, ci, new_bh, 3028 OCFS2_JOURNAL_ACCESS_CREATE); 3029 if (ret) { 3030 mlog_errno(ret); 3031 break; 3032 } 3033 3034 memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); 3035 ocfs2_journal_dirty(handle, new_bh); 3036 3037 brelse(new_bh); 3038 brelse(old_bh); 3039 new_bh = NULL; 3040 old_bh = NULL; 3041 } 3042 3043 brelse(new_bh); 3044 brelse(old_bh); 3045 return ret; 3046 } 3047 3048 static int ocfs2_clear_ext_refcount(handle_t *handle, 3049 struct ocfs2_extent_tree *et, 3050 u32 cpos, u32 p_cluster, u32 len, 3051 unsigned int ext_flags, 3052 struct ocfs2_alloc_context *meta_ac, 3053 struct ocfs2_cached_dealloc_ctxt *dealloc) 3054 { 3055 int ret, index; 3056 struct ocfs2_extent_rec replace_rec; 3057 struct ocfs2_path *path = NULL; 3058 struct ocfs2_extent_list *el; 3059 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3060 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3061 3062 trace_ocfs2_clear_ext_refcount((unsigned long long)ino, 3063 cpos, len, p_cluster, ext_flags); 3064 3065 memset(&replace_rec, 0, sizeof(replace_rec)); 3066 replace_rec.e_cpos = cpu_to_le32(cpos); 3067 replace_rec.e_leaf_clusters = cpu_to_le16(len); 3068 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, 3069 p_cluster)); 3070 replace_rec.e_flags = ext_flags; 3071 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; 3072 3073 path = ocfs2_new_path_from_et(et); 3074 if (!path) { 3075 ret = -ENOMEM; 3076 mlog_errno(ret); 3077 goto out; 3078 } 3079 3080 ret = ocfs2_find_path(et->et_ci, path, cpos); 3081 if (ret) { 3082 mlog_errno(ret); 3083 goto out; 3084 } 3085 3086 el = path_leaf_el(path); 3087 3088 index = ocfs2_search_extent_list(el, cpos); 3089 if (index == -1) { 3090 ret = ocfs2_error(sb, 3091 "Inode %llu has an extent at cpos %u which can no longer be found\n", 3092 (unsigned long long)ino, cpos); 3093 goto out; 3094 } 3095 3096 ret = ocfs2_split_extent(handle, et, path, index, 3097 &replace_rec, meta_ac, dealloc); 3098 if (ret) 3099 mlog_errno(ret); 3100 3101 out: 3102 ocfs2_free_path(path); 3103 return ret; 3104 } 3105 3106 static int ocfs2_replace_clusters(handle_t *handle, 3107 struct ocfs2_cow_context *context, 3108 u32 cpos, u32 old, 3109 u32 new, u32 len, 3110 unsigned int ext_flags) 3111 { 3112 int ret; 3113 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3114 u64 ino = ocfs2_metadata_cache_owner(ci); 3115 3116 trace_ocfs2_replace_clusters((unsigned long long)ino, 3117 cpos, old, new, len, ext_flags); 3118 3119 /*If the old clusters is unwritten, no need to duplicate. */ 3120 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3121 ret = context->cow_duplicate_clusters(handle, context->inode, 3122 cpos, old, new, len); 3123 if (ret) { 3124 mlog_errno(ret); 3125 goto out; 3126 } 3127 } 3128 3129 ret = ocfs2_clear_ext_refcount(handle, &context->data_et, 3130 cpos, new, len, ext_flags, 3131 context->meta_ac, &context->dealloc); 3132 if (ret) 3133 mlog_errno(ret); 3134 out: 3135 return ret; 3136 } 3137 3138 int ocfs2_cow_sync_writeback(struct super_block *sb, 3139 struct inode *inode, 3140 u32 cpos, u32 num_clusters) 3141 { 3142 int ret; 3143 loff_t start, end; 3144 3145 if (ocfs2_should_order_data(inode)) 3146 return 0; 3147 3148 start = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 3149 end = start + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits) - 1; 3150 3151 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 3152 if (ret < 0) 3153 mlog_errno(ret); 3154 3155 return ret; 3156 } 3157 3158 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, 3159 u32 v_cluster, u32 *p_cluster, 3160 u32 *num_clusters, 3161 unsigned int *extent_flags) 3162 { 3163 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, 3164 num_clusters, extent_flags); 3165 } 3166 3167 static int ocfs2_make_clusters_writable(struct super_block *sb, 3168 struct ocfs2_cow_context *context, 3169 u32 cpos, u32 p_cluster, 3170 u32 num_clusters, unsigned int e_flags) 3171 { 3172 int ret, delete, index, credits = 0; 3173 u32 new_bit, new_len, orig_num_clusters; 3174 unsigned int set_len; 3175 struct ocfs2_super *osb = OCFS2_SB(sb); 3176 handle_t *handle; 3177 struct buffer_head *ref_leaf_bh = NULL; 3178 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3179 struct ocfs2_refcount_rec rec; 3180 3181 trace_ocfs2_make_clusters_writable(cpos, p_cluster, 3182 num_clusters, e_flags); 3183 3184 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3185 &context->data_et, 3186 ref_ci, 3187 context->ref_root_bh, 3188 &context->meta_ac, 3189 &context->data_ac, &credits); 3190 if (ret) { 3191 mlog_errno(ret); 3192 return ret; 3193 } 3194 3195 if (context->post_refcount) 3196 credits += context->post_refcount->credits; 3197 3198 credits += context->extra_credits; 3199 handle = ocfs2_start_trans(osb, credits); 3200 if (IS_ERR(handle)) { 3201 ret = PTR_ERR(handle); 3202 mlog_errno(ret); 3203 goto out; 3204 } 3205 3206 orig_num_clusters = num_clusters; 3207 3208 while (num_clusters) { 3209 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3210 p_cluster, num_clusters, 3211 &rec, &index, &ref_leaf_bh); 3212 if (ret) { 3213 mlog_errno(ret); 3214 goto out_commit; 3215 } 3216 3217 BUG_ON(!rec.r_refcount); 3218 set_len = min((u64)p_cluster + num_clusters, 3219 le64_to_cpu(rec.r_cpos) + 3220 le32_to_cpu(rec.r_clusters)) - p_cluster; 3221 3222 /* 3223 * There are many different situation here. 3224 * 1. If refcount == 1, remove the flag and don't COW. 3225 * 2. If refcount > 1, allocate clusters. 3226 * Here we may not allocate r_len once at a time, so continue 3227 * until we reach num_clusters. 3228 */ 3229 if (le32_to_cpu(rec.r_refcount) == 1) { 3230 delete = 0; 3231 ret = ocfs2_clear_ext_refcount(handle, 3232 &context->data_et, 3233 cpos, p_cluster, 3234 set_len, e_flags, 3235 context->meta_ac, 3236 &context->dealloc); 3237 if (ret) { 3238 mlog_errno(ret); 3239 goto out_commit; 3240 } 3241 } else { 3242 delete = 1; 3243 3244 ret = __ocfs2_claim_clusters(handle, 3245 context->data_ac, 3246 1, set_len, 3247 &new_bit, &new_len); 3248 if (ret) { 3249 mlog_errno(ret); 3250 goto out_commit; 3251 } 3252 3253 ret = ocfs2_replace_clusters(handle, context, 3254 cpos, p_cluster, new_bit, 3255 new_len, e_flags); 3256 if (ret) { 3257 mlog_errno(ret); 3258 goto out_commit; 3259 } 3260 set_len = new_len; 3261 } 3262 3263 ret = __ocfs2_decrease_refcount(handle, ref_ci, 3264 context->ref_root_bh, 3265 p_cluster, set_len, 3266 context->meta_ac, 3267 &context->dealloc, delete); 3268 if (ret) { 3269 mlog_errno(ret); 3270 goto out_commit; 3271 } 3272 3273 cpos += set_len; 3274 p_cluster += set_len; 3275 num_clusters -= set_len; 3276 brelse(ref_leaf_bh); 3277 ref_leaf_bh = NULL; 3278 } 3279 3280 /* handle any post_cow action. */ 3281 if (context->post_refcount && context->post_refcount->func) { 3282 ret = context->post_refcount->func(context->inode, handle, 3283 context->post_refcount->para); 3284 if (ret) { 3285 mlog_errno(ret); 3286 goto out_commit; 3287 } 3288 } 3289 3290 /* 3291 * Here we should write the new page out first if we are 3292 * in write-back mode. 3293 */ 3294 if (context->get_clusters == ocfs2_di_get_clusters) { 3295 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos, 3296 orig_num_clusters); 3297 if (ret) 3298 mlog_errno(ret); 3299 } 3300 3301 out_commit: 3302 ocfs2_commit_trans(osb, handle); 3303 3304 out: 3305 if (context->data_ac) { 3306 ocfs2_free_alloc_context(context->data_ac); 3307 context->data_ac = NULL; 3308 } 3309 if (context->meta_ac) { 3310 ocfs2_free_alloc_context(context->meta_ac); 3311 context->meta_ac = NULL; 3312 } 3313 brelse(ref_leaf_bh); 3314 3315 return ret; 3316 } 3317 3318 static int ocfs2_replace_cow(struct ocfs2_cow_context *context) 3319 { 3320 int ret = 0; 3321 struct inode *inode = context->inode; 3322 u32 cow_start = context->cow_start, cow_len = context->cow_len; 3323 u32 p_cluster, num_clusters; 3324 unsigned int ext_flags; 3325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3326 3327 if (!ocfs2_refcount_tree(osb)) { 3328 return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 3329 inode->i_ino); 3330 } 3331 3332 ocfs2_init_dealloc_ctxt(&context->dealloc); 3333 3334 while (cow_len) { 3335 ret = context->get_clusters(context, cow_start, &p_cluster, 3336 &num_clusters, &ext_flags); 3337 if (ret) { 3338 mlog_errno(ret); 3339 break; 3340 } 3341 3342 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); 3343 3344 if (cow_len < num_clusters) 3345 num_clusters = cow_len; 3346 3347 ret = ocfs2_make_clusters_writable(inode->i_sb, context, 3348 cow_start, p_cluster, 3349 num_clusters, ext_flags); 3350 if (ret) { 3351 mlog_errno(ret); 3352 break; 3353 } 3354 3355 cow_len -= num_clusters; 3356 cow_start += num_clusters; 3357 } 3358 3359 if (ocfs2_dealloc_has_cluster(&context->dealloc)) { 3360 ocfs2_schedule_truncate_log_flush(osb, 1); 3361 ocfs2_run_deallocs(osb, &context->dealloc); 3362 } 3363 3364 return ret; 3365 } 3366 3367 /* 3368 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3369 * past max_cpos. This will stop when it runs into a hole or an 3370 * unrefcounted extent. 3371 */ 3372 static int ocfs2_refcount_cow_hunk(struct inode *inode, 3373 struct buffer_head *di_bh, 3374 u32 cpos, u32 write_len, u32 max_cpos) 3375 { 3376 int ret; 3377 u32 cow_start = 0, cow_len = 0; 3378 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3379 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3380 struct buffer_head *ref_root_bh = NULL; 3381 struct ocfs2_refcount_tree *ref_tree; 3382 struct ocfs2_cow_context *context = NULL; 3383 3384 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3385 3386 ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, 3387 cpos, write_len, max_cpos, 3388 &cow_start, &cow_len); 3389 if (ret) { 3390 mlog_errno(ret); 3391 goto out; 3392 } 3393 3394 trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, 3395 cpos, write_len, max_cpos, 3396 cow_start, cow_len); 3397 3398 BUG_ON(cow_len == 0); 3399 3400 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3401 if (!context) { 3402 ret = -ENOMEM; 3403 mlog_errno(ret); 3404 goto out; 3405 } 3406 3407 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 3408 1, &ref_tree, &ref_root_bh); 3409 if (ret) { 3410 mlog_errno(ret); 3411 goto out; 3412 } 3413 3414 context->inode = inode; 3415 context->cow_start = cow_start; 3416 context->cow_len = cow_len; 3417 context->ref_tree = ref_tree; 3418 context->ref_root_bh = ref_root_bh; 3419 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3420 context->get_clusters = ocfs2_di_get_clusters; 3421 3422 ocfs2_init_dinode_extent_tree(&context->data_et, 3423 INODE_CACHE(inode), di_bh); 3424 3425 ret = ocfs2_replace_cow(context); 3426 if (ret) 3427 mlog_errno(ret); 3428 3429 /* 3430 * truncate the extent map here since no matter whether we meet with 3431 * any error during the action, we shouldn't trust cached extent map 3432 * any more. 3433 */ 3434 ocfs2_extent_map_trunc(inode, cow_start); 3435 3436 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3437 brelse(ref_root_bh); 3438 out: 3439 kfree(context); 3440 return ret; 3441 } 3442 3443 /* 3444 * CoW any and all clusters between cpos and cpos+write_len. 3445 * Don't CoW past max_cpos. If this returns successfully, all 3446 * clusters between cpos and cpos+write_len are safe to modify. 3447 */ 3448 int ocfs2_refcount_cow(struct inode *inode, 3449 struct buffer_head *di_bh, 3450 u32 cpos, u32 write_len, u32 max_cpos) 3451 { 3452 int ret = 0; 3453 u32 p_cluster, num_clusters; 3454 unsigned int ext_flags; 3455 3456 while (write_len) { 3457 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3458 &num_clusters, &ext_flags); 3459 if (ret) { 3460 mlog_errno(ret); 3461 break; 3462 } 3463 3464 if (write_len < num_clusters) 3465 num_clusters = write_len; 3466 3467 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3468 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, 3469 num_clusters, max_cpos); 3470 if (ret) { 3471 mlog_errno(ret); 3472 break; 3473 } 3474 } 3475 3476 write_len -= num_clusters; 3477 cpos += num_clusters; 3478 } 3479 3480 return ret; 3481 } 3482 3483 static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, 3484 u32 v_cluster, u32 *p_cluster, 3485 u32 *num_clusters, 3486 unsigned int *extent_flags) 3487 { 3488 struct inode *inode = context->inode; 3489 struct ocfs2_xattr_value_root *xv = context->cow_object; 3490 3491 return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, 3492 num_clusters, &xv->xr_list, 3493 extent_flags); 3494 } 3495 3496 /* 3497 * Given a xattr value root, calculate the most meta/credits we need for 3498 * refcount tree change if we truncate it to 0. 3499 */ 3500 int ocfs2_refcounted_xattr_delete_need(struct inode *inode, 3501 struct ocfs2_caching_info *ref_ci, 3502 struct buffer_head *ref_root_bh, 3503 struct ocfs2_xattr_value_root *xv, 3504 int *meta_add, int *credits) 3505 { 3506 int ret = 0, index, ref_blocks = 0; 3507 u32 p_cluster, num_clusters; 3508 u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); 3509 struct ocfs2_refcount_block *rb; 3510 struct ocfs2_refcount_rec rec; 3511 struct buffer_head *ref_leaf_bh = NULL; 3512 3513 while (cpos < clusters) { 3514 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, 3515 &num_clusters, &xv->xr_list, 3516 NULL); 3517 if (ret) { 3518 mlog_errno(ret); 3519 goto out; 3520 } 3521 3522 cpos += num_clusters; 3523 3524 while (num_clusters) { 3525 ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, 3526 p_cluster, num_clusters, 3527 &rec, &index, 3528 &ref_leaf_bh); 3529 if (ret) { 3530 mlog_errno(ret); 3531 goto out; 3532 } 3533 3534 BUG_ON(!rec.r_refcount); 3535 3536 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 3537 3538 /* 3539 * We really don't know whether the other clusters is in 3540 * this refcount block or not, so just take the worst 3541 * case that all the clusters are in this block and each 3542 * one will split a refcount rec, so totally we need 3543 * clusters * 2 new refcount rec. 3544 */ 3545 if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3546 le16_to_cpu(rb->rf_records.rl_count)) 3547 ref_blocks++; 3548 3549 *credits += 1; 3550 brelse(ref_leaf_bh); 3551 ref_leaf_bh = NULL; 3552 3553 if (num_clusters <= le32_to_cpu(rec.r_clusters)) 3554 break; 3555 else 3556 num_clusters -= le32_to_cpu(rec.r_clusters); 3557 p_cluster += num_clusters; 3558 } 3559 } 3560 3561 *meta_add += ref_blocks; 3562 if (!ref_blocks) 3563 goto out; 3564 3565 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 3566 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) 3567 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 3568 else { 3569 struct ocfs2_extent_tree et; 3570 3571 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); 3572 *credits += ocfs2_calc_extend_credits(inode->i_sb, 3573 et.et_root_el); 3574 } 3575 3576 out: 3577 brelse(ref_leaf_bh); 3578 return ret; 3579 } 3580 3581 /* 3582 * Do CoW for xattr. 3583 */ 3584 int ocfs2_refcount_cow_xattr(struct inode *inode, 3585 struct ocfs2_dinode *di, 3586 struct ocfs2_xattr_value_buf *vb, 3587 struct ocfs2_refcount_tree *ref_tree, 3588 struct buffer_head *ref_root_bh, 3589 u32 cpos, u32 write_len, 3590 struct ocfs2_post_refcount *post) 3591 { 3592 int ret; 3593 struct ocfs2_xattr_value_root *xv = vb->vb_xv; 3594 struct ocfs2_cow_context *context = NULL; 3595 u32 cow_start, cow_len; 3596 3597 BUG_ON(!ocfs2_is_refcount_inode(inode)); 3598 3599 ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, 3600 cpos, write_len, UINT_MAX, 3601 &cow_start, &cow_len); 3602 if (ret) { 3603 mlog_errno(ret); 3604 goto out; 3605 } 3606 3607 BUG_ON(cow_len == 0); 3608 3609 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3610 if (!context) { 3611 ret = -ENOMEM; 3612 mlog_errno(ret); 3613 goto out; 3614 } 3615 3616 context->inode = inode; 3617 context->cow_start = cow_start; 3618 context->cow_len = cow_len; 3619 context->ref_tree = ref_tree; 3620 context->ref_root_bh = ref_root_bh; 3621 context->cow_object = xv; 3622 3623 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; 3624 /* We need the extra credits for duplicate_clusters by jbd. */ 3625 context->extra_credits = 3626 ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; 3627 context->get_clusters = ocfs2_xattr_value_get_clusters; 3628 context->post_refcount = post; 3629 3630 ocfs2_init_xattr_value_extent_tree(&context->data_et, 3631 INODE_CACHE(inode), vb); 3632 3633 ret = ocfs2_replace_cow(context); 3634 if (ret) 3635 mlog_errno(ret); 3636 3637 out: 3638 kfree(context); 3639 return ret; 3640 } 3641 3642 /* 3643 * Insert a new extent into refcount tree and mark a extent rec 3644 * as refcounted in the dinode tree. 3645 */ 3646 int ocfs2_add_refcount_flag(struct inode *inode, 3647 struct ocfs2_extent_tree *data_et, 3648 struct ocfs2_caching_info *ref_ci, 3649 struct buffer_head *ref_root_bh, 3650 u32 cpos, u32 p_cluster, u32 num_clusters, 3651 struct ocfs2_cached_dealloc_ctxt *dealloc, 3652 struct ocfs2_post_refcount *post) 3653 { 3654 int ret; 3655 handle_t *handle; 3656 int credits = 1, ref_blocks = 0; 3657 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3658 struct ocfs2_alloc_context *meta_ac = NULL; 3659 3660 /* We need to be able to handle at least an extent tree split. */ 3661 ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el); 3662 3663 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 3664 ref_ci, ref_root_bh, 3665 p_cluster, num_clusters, 3666 &ref_blocks, &credits); 3667 if (ret) { 3668 mlog_errno(ret); 3669 goto out; 3670 } 3671 3672 trace_ocfs2_add_refcount_flag(ref_blocks, credits); 3673 3674 if (ref_blocks) { 3675 ret = ocfs2_reserve_new_metadata_blocks(osb, 3676 ref_blocks, &meta_ac); 3677 if (ret) { 3678 mlog_errno(ret); 3679 goto out; 3680 } 3681 } 3682 3683 if (post) 3684 credits += post->credits; 3685 3686 handle = ocfs2_start_trans(osb, credits); 3687 if (IS_ERR(handle)) { 3688 ret = PTR_ERR(handle); 3689 mlog_errno(ret); 3690 goto out; 3691 } 3692 3693 ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, 3694 cpos, num_clusters, p_cluster, 3695 meta_ac, dealloc); 3696 if (ret) { 3697 mlog_errno(ret); 3698 goto out_commit; 3699 } 3700 3701 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3702 p_cluster, num_clusters, 0, 3703 meta_ac, dealloc); 3704 if (ret) { 3705 mlog_errno(ret); 3706 goto out_commit; 3707 } 3708 3709 if (post && post->func) { 3710 ret = post->func(inode, handle, post->para); 3711 if (ret) 3712 mlog_errno(ret); 3713 } 3714 3715 out_commit: 3716 ocfs2_commit_trans(osb, handle); 3717 out: 3718 if (meta_ac) 3719 ocfs2_free_alloc_context(meta_ac); 3720 return ret; 3721 } 3722 3723 static int ocfs2_change_ctime(struct inode *inode, 3724 struct buffer_head *di_bh) 3725 { 3726 int ret; 3727 handle_t *handle; 3728 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3729 3730 handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), 3731 OCFS2_INODE_UPDATE_CREDITS); 3732 if (IS_ERR(handle)) { 3733 ret = PTR_ERR(handle); 3734 mlog_errno(ret); 3735 goto out; 3736 } 3737 3738 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 3739 OCFS2_JOURNAL_ACCESS_WRITE); 3740 if (ret) { 3741 mlog_errno(ret); 3742 goto out_commit; 3743 } 3744 3745 inode_set_ctime_current(inode); 3746 di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode)); 3747 di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 3748 3749 ocfs2_journal_dirty(handle, di_bh); 3750 3751 out_commit: 3752 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 3753 out: 3754 return ret; 3755 } 3756 3757 static int ocfs2_attach_refcount_tree(struct inode *inode, 3758 struct buffer_head *di_bh) 3759 { 3760 int ret, data_changed = 0; 3761 struct buffer_head *ref_root_bh = NULL; 3762 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3763 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3764 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3765 struct ocfs2_refcount_tree *ref_tree; 3766 unsigned int ext_flags; 3767 loff_t size; 3768 u32 cpos, num_clusters, clusters, p_cluster; 3769 struct ocfs2_cached_dealloc_ctxt dealloc; 3770 struct ocfs2_extent_tree di_et; 3771 3772 ocfs2_init_dealloc_ctxt(&dealloc); 3773 3774 if (!ocfs2_is_refcount_inode(inode)) { 3775 ret = ocfs2_create_refcount_tree(inode, di_bh); 3776 if (ret) { 3777 mlog_errno(ret); 3778 goto out; 3779 } 3780 } 3781 3782 BUG_ON(!di->i_refcount_loc); 3783 ret = ocfs2_lock_refcount_tree(osb, 3784 le64_to_cpu(di->i_refcount_loc), 1, 3785 &ref_tree, &ref_root_bh); 3786 if (ret) { 3787 mlog_errno(ret); 3788 goto out; 3789 } 3790 3791 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 3792 goto attach_xattr; 3793 3794 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3795 3796 size = i_size_read(inode); 3797 clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); 3798 3799 cpos = 0; 3800 while (cpos < clusters) { 3801 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3802 &num_clusters, &ext_flags); 3803 if (ret) { 3804 mlog_errno(ret); 3805 goto unlock; 3806 } 3807 if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { 3808 ret = ocfs2_add_refcount_flag(inode, &di_et, 3809 &ref_tree->rf_ci, 3810 ref_root_bh, cpos, 3811 p_cluster, num_clusters, 3812 &dealloc, NULL); 3813 if (ret) { 3814 mlog_errno(ret); 3815 goto unlock; 3816 } 3817 3818 data_changed = 1; 3819 } 3820 cpos += num_clusters; 3821 } 3822 3823 attach_xattr: 3824 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3825 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3826 &ref_tree->rf_ci, 3827 ref_root_bh, 3828 &dealloc); 3829 if (ret) { 3830 mlog_errno(ret); 3831 goto unlock; 3832 } 3833 } 3834 3835 if (data_changed) { 3836 ret = ocfs2_change_ctime(inode, di_bh); 3837 if (ret) 3838 mlog_errno(ret); 3839 } 3840 3841 unlock: 3842 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3843 brelse(ref_root_bh); 3844 3845 if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { 3846 ocfs2_schedule_truncate_log_flush(osb, 1); 3847 ocfs2_run_deallocs(osb, &dealloc); 3848 } 3849 out: 3850 /* 3851 * Empty the extent map so that we may get the right extent 3852 * record from the disk. 3853 */ 3854 ocfs2_extent_map_trunc(inode, 0); 3855 3856 return ret; 3857 } 3858 3859 static int ocfs2_add_refcounted_extent(struct inode *inode, 3860 struct ocfs2_extent_tree *et, 3861 struct ocfs2_caching_info *ref_ci, 3862 struct buffer_head *ref_root_bh, 3863 u32 cpos, u32 p_cluster, u32 num_clusters, 3864 unsigned int ext_flags, 3865 struct ocfs2_cached_dealloc_ctxt *dealloc) 3866 { 3867 int ret; 3868 handle_t *handle; 3869 int credits = 0; 3870 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3871 struct ocfs2_alloc_context *meta_ac = NULL; 3872 3873 ret = ocfs2_lock_refcount_allocators(inode->i_sb, 3874 p_cluster, num_clusters, 3875 et, ref_ci, 3876 ref_root_bh, &meta_ac, 3877 NULL, &credits); 3878 if (ret) { 3879 mlog_errno(ret); 3880 goto out; 3881 } 3882 3883 handle = ocfs2_start_trans(osb, credits); 3884 if (IS_ERR(handle)) { 3885 ret = PTR_ERR(handle); 3886 mlog_errno(ret); 3887 goto out; 3888 } 3889 3890 ret = ocfs2_insert_extent(handle, et, cpos, 3891 ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), 3892 num_clusters, ext_flags, meta_ac); 3893 if (ret) { 3894 mlog_errno(ret); 3895 goto out_commit; 3896 } 3897 3898 ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3899 p_cluster, num_clusters, 3900 meta_ac, dealloc); 3901 if (ret) { 3902 mlog_errno(ret); 3903 goto out_commit; 3904 } 3905 3906 ret = dquot_alloc_space_nodirty(inode, 3907 ocfs2_clusters_to_bytes(osb->sb, num_clusters)); 3908 if (ret) 3909 mlog_errno(ret); 3910 3911 out_commit: 3912 ocfs2_commit_trans(osb, handle); 3913 out: 3914 if (meta_ac) 3915 ocfs2_free_alloc_context(meta_ac); 3916 return ret; 3917 } 3918 3919 static int ocfs2_duplicate_inline_data(struct inode *s_inode, 3920 struct buffer_head *s_bh, 3921 struct inode *t_inode, 3922 struct buffer_head *t_bh) 3923 { 3924 int ret; 3925 handle_t *handle; 3926 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 3927 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 3928 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data; 3929 3930 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 3931 3932 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 3933 if (IS_ERR(handle)) { 3934 ret = PTR_ERR(handle); 3935 mlog_errno(ret); 3936 goto out; 3937 } 3938 3939 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 3940 OCFS2_JOURNAL_ACCESS_WRITE); 3941 if (ret) { 3942 mlog_errno(ret); 3943 goto out_commit; 3944 } 3945 3946 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count; 3947 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data, 3948 le16_to_cpu(s_di->id2.i_data.id_count)); 3949 spin_lock(&OCFS2_I(t_inode)->ip_lock); 3950 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL; 3951 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features); 3952 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 3953 3954 ocfs2_journal_dirty(handle, t_bh); 3955 3956 out_commit: 3957 ocfs2_commit_trans(osb, handle); 3958 out: 3959 return ret; 3960 } 3961 3962 static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3963 struct inode *t_inode, 3964 struct buffer_head *t_bh, 3965 struct ocfs2_caching_info *ref_ci, 3966 struct buffer_head *ref_root_bh, 3967 struct ocfs2_cached_dealloc_ctxt *dealloc) 3968 { 3969 int ret = 0; 3970 u32 p_cluster, num_clusters, clusters, cpos; 3971 loff_t size; 3972 unsigned int ext_flags; 3973 struct ocfs2_extent_tree et; 3974 3975 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); 3976 3977 size = i_size_read(s_inode); 3978 clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); 3979 3980 cpos = 0; 3981 while (cpos < clusters) { 3982 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, 3983 &num_clusters, &ext_flags); 3984 if (ret) { 3985 mlog_errno(ret); 3986 goto out; 3987 } 3988 if (p_cluster) { 3989 ret = ocfs2_add_refcounted_extent(t_inode, &et, 3990 ref_ci, ref_root_bh, 3991 cpos, p_cluster, 3992 num_clusters, 3993 ext_flags, 3994 dealloc); 3995 if (ret) { 3996 mlog_errno(ret); 3997 goto out; 3998 } 3999 } 4000 4001 cpos += num_clusters; 4002 } 4003 4004 out: 4005 return ret; 4006 } 4007 4008 /* 4009 * change the new file's attributes to the src. 4010 * 4011 * reflink creates a snapshot of a file, that means the attributes 4012 * must be identical except for three exceptions - nlink, ino, and ctime. 4013 */ 4014 static int ocfs2_complete_reflink(struct inode *s_inode, 4015 struct buffer_head *s_bh, 4016 struct inode *t_inode, 4017 struct buffer_head *t_bh, 4018 bool preserve) 4019 { 4020 int ret; 4021 handle_t *handle; 4022 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 4023 struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; 4024 loff_t size = i_size_read(s_inode); 4025 4026 handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), 4027 OCFS2_INODE_UPDATE_CREDITS); 4028 if (IS_ERR(handle)) { 4029 ret = PTR_ERR(handle); 4030 mlog_errno(ret); 4031 return ret; 4032 } 4033 4034 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 4035 OCFS2_JOURNAL_ACCESS_WRITE); 4036 if (ret) { 4037 mlog_errno(ret); 4038 goto out_commit; 4039 } 4040 4041 spin_lock(&OCFS2_I(t_inode)->ip_lock); 4042 OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; 4043 OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; 4044 OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; 4045 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 4046 i_size_write(t_inode, size); 4047 t_inode->i_blocks = s_inode->i_blocks; 4048 4049 di->i_xattr_inline_size = s_di->i_xattr_inline_size; 4050 di->i_clusters = s_di->i_clusters; 4051 di->i_size = s_di->i_size; 4052 di->i_dyn_features = s_di->i_dyn_features; 4053 di->i_attr = s_di->i_attr; 4054 4055 if (preserve) { 4056 t_inode->i_uid = s_inode->i_uid; 4057 t_inode->i_gid = s_inode->i_gid; 4058 t_inode->i_mode = s_inode->i_mode; 4059 di->i_uid = s_di->i_uid; 4060 di->i_gid = s_di->i_gid; 4061 di->i_mode = s_di->i_mode; 4062 4063 /* 4064 * update time. 4065 * we want mtime to appear identical to the source and 4066 * update ctime. 4067 */ 4068 inode_set_ctime_current(t_inode); 4069 4070 di->i_ctime = cpu_to_le64(inode_get_ctime_sec(t_inode)); 4071 di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(t_inode)); 4072 4073 inode_set_mtime_to_ts(t_inode, inode_get_mtime(s_inode)); 4074 di->i_mtime = s_di->i_mtime; 4075 di->i_mtime_nsec = s_di->i_mtime_nsec; 4076 } 4077 4078 ocfs2_journal_dirty(handle, t_bh); 4079 4080 out_commit: 4081 ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); 4082 return ret; 4083 } 4084 4085 static int ocfs2_create_reflink_node(struct inode *s_inode, 4086 struct buffer_head *s_bh, 4087 struct inode *t_inode, 4088 struct buffer_head *t_bh, 4089 bool preserve) 4090 { 4091 int ret; 4092 struct buffer_head *ref_root_bh = NULL; 4093 struct ocfs2_cached_dealloc_ctxt dealloc; 4094 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 4095 struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; 4096 struct ocfs2_refcount_tree *ref_tree; 4097 4098 ocfs2_init_dealloc_ctxt(&dealloc); 4099 4100 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4101 le64_to_cpu(di->i_refcount_loc)); 4102 if (ret) { 4103 mlog_errno(ret); 4104 goto out; 4105 } 4106 4107 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4108 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, 4109 t_inode, t_bh); 4110 if (ret) 4111 mlog_errno(ret); 4112 goto out; 4113 } 4114 4115 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4116 1, &ref_tree, &ref_root_bh); 4117 if (ret) { 4118 mlog_errno(ret); 4119 goto out; 4120 } 4121 4122 ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, 4123 &ref_tree->rf_ci, ref_root_bh, 4124 &dealloc); 4125 if (ret) { 4126 mlog_errno(ret); 4127 goto out_unlock_refcount; 4128 } 4129 4130 out_unlock_refcount: 4131 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4132 brelse(ref_root_bh); 4133 out: 4134 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4135 ocfs2_schedule_truncate_log_flush(osb, 1); 4136 ocfs2_run_deallocs(osb, &dealloc); 4137 } 4138 4139 return ret; 4140 } 4141 4142 static int __ocfs2_reflink(struct dentry *old_dentry, 4143 struct buffer_head *old_bh, 4144 struct inode *new_inode, 4145 bool preserve) 4146 { 4147 int ret; 4148 struct inode *inode = d_inode(old_dentry); 4149 struct buffer_head *new_bh = NULL; 4150 struct ocfs2_inode_info *oi = OCFS2_I(inode); 4151 4152 if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) { 4153 ret = -EINVAL; 4154 mlog_errno(ret); 4155 goto out; 4156 } 4157 4158 ret = filemap_fdatawrite(inode->i_mapping); 4159 if (ret) { 4160 mlog_errno(ret); 4161 goto out; 4162 } 4163 4164 ret = ocfs2_attach_refcount_tree(inode, old_bh); 4165 if (ret) { 4166 mlog_errno(ret); 4167 goto out; 4168 } 4169 4170 inode_lock_nested(new_inode, I_MUTEX_CHILD); 4171 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, 4172 OI_LS_REFLINK_TARGET); 4173 if (ret) { 4174 mlog_errno(ret); 4175 goto out_unlock; 4176 } 4177 4178 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && 4179 (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { 4180 /* 4181 * Adjust extent record count to reserve space for extended attribute. 4182 * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). 4183 */ 4184 struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode); 4185 4186 if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && 4187 !(ocfs2_inode_is_fast_symlink(new_inode))) { 4188 struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data; 4189 struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data; 4190 struct ocfs2_extent_list *el = &new_di->id2.i_list; 4191 int inline_size = le16_to_cpu(old_di->i_xattr_inline_size); 4192 4193 le16_add_cpu(&el->l_count, -(inline_size / 4194 sizeof(struct ocfs2_extent_rec))); 4195 } 4196 } 4197 4198 ret = ocfs2_create_reflink_node(inode, old_bh, 4199 new_inode, new_bh, preserve); 4200 if (ret) { 4201 mlog_errno(ret); 4202 goto inode_unlock; 4203 } 4204 4205 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 4206 ret = ocfs2_reflink_xattrs(inode, old_bh, 4207 new_inode, new_bh, 4208 preserve); 4209 if (ret) { 4210 mlog_errno(ret); 4211 goto inode_unlock; 4212 } 4213 } 4214 4215 ret = ocfs2_complete_reflink(inode, old_bh, 4216 new_inode, new_bh, preserve); 4217 if (ret) 4218 mlog_errno(ret); 4219 4220 inode_unlock: 4221 ocfs2_inode_unlock(new_inode, 1); 4222 brelse(new_bh); 4223 out_unlock: 4224 inode_unlock(new_inode); 4225 out: 4226 if (!ret) { 4227 ret = filemap_fdatawait(inode->i_mapping); 4228 if (ret) 4229 mlog_errno(ret); 4230 } 4231 return ret; 4232 } 4233 4234 static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, 4235 struct dentry *new_dentry, bool preserve) 4236 { 4237 int error, had_lock; 4238 struct inode *inode = d_inode(old_dentry); 4239 struct buffer_head *old_bh = NULL; 4240 struct inode *new_orphan_inode = NULL; 4241 struct ocfs2_lock_holder oh; 4242 4243 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4244 return -EOPNOTSUPP; 4245 4246 4247 error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, 4248 &new_orphan_inode); 4249 if (error) { 4250 mlog_errno(error); 4251 goto out; 4252 } 4253 4254 error = ocfs2_rw_lock(inode, 1); 4255 if (error) { 4256 mlog_errno(error); 4257 goto out; 4258 } 4259 4260 error = ocfs2_inode_lock(inode, &old_bh, 1); 4261 if (error) { 4262 mlog_errno(error); 4263 ocfs2_rw_unlock(inode, 1); 4264 goto out; 4265 } 4266 4267 down_write(&OCFS2_I(inode)->ip_xattr_sem); 4268 down_write(&OCFS2_I(inode)->ip_alloc_sem); 4269 error = __ocfs2_reflink(old_dentry, old_bh, 4270 new_orphan_inode, preserve); 4271 up_write(&OCFS2_I(inode)->ip_alloc_sem); 4272 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4273 4274 ocfs2_inode_unlock(inode, 1); 4275 ocfs2_rw_unlock(inode, 1); 4276 brelse(old_bh); 4277 4278 if (error) { 4279 mlog_errno(error); 4280 goto out; 4281 } 4282 4283 had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, 4284 &oh); 4285 if (had_lock < 0) { 4286 error = had_lock; 4287 mlog_errno(error); 4288 goto out; 4289 } 4290 4291 /* If the security isn't preserved, we need to re-initialize them. */ 4292 if (!preserve) { 4293 error = ocfs2_init_security_and_acl(dir, new_orphan_inode, 4294 &new_dentry->d_name); 4295 if (error) 4296 mlog_errno(error); 4297 } 4298 if (!error) { 4299 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, 4300 new_dentry); 4301 if (error) 4302 mlog_errno(error); 4303 } 4304 ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); 4305 4306 out: 4307 if (new_orphan_inode) { 4308 /* 4309 * We need to open_unlock the inode no matter whether we 4310 * succeed or not, so that other nodes can delete it later. 4311 */ 4312 ocfs2_open_unlock(new_orphan_inode); 4313 if (error) 4314 iput(new_orphan_inode); 4315 } 4316 4317 return error; 4318 } 4319 4320 /* 4321 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake 4322 * sys_reflink(). This will go away when vfs_reflink() exists in 4323 * fs/namei.c. 4324 */ 4325 4326 /* copied from may_create in VFS. */ 4327 static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) 4328 { 4329 if (d_really_is_positive(child)) 4330 return -EEXIST; 4331 if (IS_DEADDIR(dir)) 4332 return -ENOENT; 4333 return inode_permission(&nop_mnt_idmap, dir, MAY_WRITE | MAY_EXEC); 4334 } 4335 4336 /** 4337 * ocfs2_vfs_reflink - Create a reference-counted link 4338 * 4339 * @old_dentry: source dentry + inode 4340 * @dir: directory to create the target 4341 * @new_dentry: target dentry 4342 * @preserve: if true, preserve all file attributes 4343 */ 4344 static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, 4345 struct dentry *new_dentry, bool preserve) 4346 { 4347 struct inode *inode = d_inode(old_dentry); 4348 int error; 4349 4350 if (!inode) 4351 return -ENOENT; 4352 4353 error = ocfs2_may_create(dir, new_dentry); 4354 if (error) 4355 return error; 4356 4357 if (dir->i_sb != inode->i_sb) 4358 return -EXDEV; 4359 4360 /* 4361 * A reflink to an append-only or immutable file cannot be created. 4362 */ 4363 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4364 return -EPERM; 4365 4366 /* Only regular files can be reflinked. */ 4367 if (!S_ISREG(inode->i_mode)) 4368 return -EPERM; 4369 4370 /* 4371 * If the caller wants to preserve ownership, they require the 4372 * rights to do so. 4373 */ 4374 if (preserve) { 4375 if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN)) 4376 return -EPERM; 4377 if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) 4378 return -EPERM; 4379 } 4380 4381 /* 4382 * If the caller is modifying any aspect of the attributes, they 4383 * are not creating a snapshot. They need read permission on the 4384 * file. 4385 */ 4386 if (!preserve) { 4387 error = inode_permission(&nop_mnt_idmap, inode, MAY_READ); 4388 if (error) 4389 return error; 4390 } 4391 4392 inode_lock(inode); 4393 error = dquot_initialize(dir); 4394 if (!error) 4395 error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); 4396 inode_unlock(inode); 4397 if (!error) 4398 fsnotify_create(dir, new_dentry); 4399 return error; 4400 } 4401 /* 4402 * Most codes are copied from sys_linkat. 4403 */ 4404 int ocfs2_reflink_ioctl(struct inode *inode, 4405 const char __user *oldname, 4406 const char __user *newname, 4407 bool preserve) 4408 { 4409 struct dentry *new_dentry; 4410 struct path old_path, new_path; 4411 int error; 4412 4413 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4414 return -EOPNOTSUPP; 4415 4416 error = user_path_at(AT_FDCWD, oldname, 0, &old_path); 4417 if (error) { 4418 mlog_errno(error); 4419 return error; 4420 } 4421 4422 new_dentry = start_creating_user_path(AT_FDCWD, newname, &new_path, 0); 4423 error = PTR_ERR(new_dentry); 4424 if (IS_ERR(new_dentry)) { 4425 mlog_errno(error); 4426 goto out; 4427 } 4428 4429 error = -EXDEV; 4430 if (old_path.mnt != new_path.mnt) { 4431 mlog_errno(error); 4432 goto out_dput; 4433 } 4434 4435 error = ocfs2_vfs_reflink(old_path.dentry, 4436 d_inode(new_path.dentry), 4437 new_dentry, preserve); 4438 out_dput: 4439 end_creating_path(&new_path, new_dentry); 4440 out: 4441 path_put(&old_path); 4442 4443 return error; 4444 } 4445 4446 /* Update destination inode size, if necessary. */ 4447 int ocfs2_reflink_update_dest(struct inode *dest, 4448 struct buffer_head *d_bh, 4449 loff_t newlen) 4450 { 4451 handle_t *handle; 4452 int ret; 4453 4454 dest->i_blocks = ocfs2_inode_sector_count(dest); 4455 4456 if (newlen <= i_size_read(dest)) 4457 return 0; 4458 4459 handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb), 4460 OCFS2_INODE_UPDATE_CREDITS); 4461 if (IS_ERR(handle)) { 4462 ret = PTR_ERR(handle); 4463 mlog_errno(ret); 4464 return ret; 4465 } 4466 4467 /* Extend i_size if needed. */ 4468 spin_lock(&OCFS2_I(dest)->ip_lock); 4469 if (newlen > i_size_read(dest)) 4470 i_size_write(dest, newlen); 4471 spin_unlock(&OCFS2_I(dest)->ip_lock); 4472 inode_set_mtime_to_ts(dest, inode_set_ctime_current(dest)); 4473 4474 ret = ocfs2_mark_inode_dirty(handle, dest, d_bh); 4475 if (ret) { 4476 mlog_errno(ret); 4477 goto out_commit; 4478 } 4479 4480 out_commit: 4481 ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle); 4482 return ret; 4483 } 4484 4485 /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */ 4486 static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode, 4487 struct buffer_head *s_bh, 4488 loff_t pos_in, 4489 struct inode *t_inode, 4490 struct buffer_head *t_bh, 4491 loff_t pos_out, 4492 loff_t len, 4493 struct ocfs2_cached_dealloc_ctxt *dealloc) 4494 { 4495 struct ocfs2_extent_tree s_et; 4496 struct ocfs2_extent_tree t_et; 4497 struct ocfs2_dinode *dis; 4498 struct buffer_head *ref_root_bh = NULL; 4499 struct ocfs2_refcount_tree *ref_tree; 4500 struct ocfs2_super *osb; 4501 loff_t remapped_bytes = 0; 4502 loff_t pstart, plen; 4503 u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0; 4504 unsigned int ext_flags; 4505 int ret = 0; 4506 4507 osb = OCFS2_SB(s_inode->i_sb); 4508 dis = (struct ocfs2_dinode *)s_bh->b_data; 4509 ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh); 4510 ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh); 4511 4512 spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in); 4513 tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out); 4514 slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len); 4515 4516 while (spos < slast) { 4517 if (fatal_signal_pending(current)) { 4518 ret = -EINTR; 4519 goto out; 4520 } 4521 4522 /* Look up the extent. */ 4523 ret = ocfs2_get_clusters(s_inode, spos, &p_cluster, 4524 &num_clusters, &ext_flags); 4525 if (ret) { 4526 mlog_errno(ret); 4527 goto out; 4528 } 4529 4530 num_clusters = min_t(u32, num_clusters, slast - spos); 4531 4532 /* Punch out the dest range. */ 4533 pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos); 4534 plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters); 4535 ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen); 4536 if (ret) { 4537 mlog_errno(ret); 4538 goto out; 4539 } 4540 4541 if (p_cluster == 0) 4542 goto next_loop; 4543 4544 /* Lock the refcount btree... */ 4545 ret = ocfs2_lock_refcount_tree(osb, 4546 le64_to_cpu(dis->i_refcount_loc), 4547 1, &ref_tree, &ref_root_bh); 4548 if (ret) { 4549 mlog_errno(ret); 4550 goto out; 4551 } 4552 4553 /* Mark s_inode's extent as refcounted. */ 4554 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) { 4555 ret = ocfs2_add_refcount_flag(s_inode, &s_et, 4556 &ref_tree->rf_ci, 4557 ref_root_bh, spos, 4558 p_cluster, num_clusters, 4559 dealloc, NULL); 4560 if (ret) { 4561 mlog_errno(ret); 4562 goto out_unlock_refcount; 4563 } 4564 } 4565 4566 /* Map in the new extent. */ 4567 ext_flags |= OCFS2_EXT_REFCOUNTED; 4568 ret = ocfs2_add_refcounted_extent(t_inode, &t_et, 4569 &ref_tree->rf_ci, 4570 ref_root_bh, 4571 tpos, p_cluster, 4572 num_clusters, 4573 ext_flags, 4574 dealloc); 4575 if (ret) { 4576 mlog_errno(ret); 4577 goto out_unlock_refcount; 4578 } 4579 4580 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4581 brelse(ref_root_bh); 4582 next_loop: 4583 spos += num_clusters; 4584 tpos += num_clusters; 4585 remapped_clus += num_clusters; 4586 } 4587 4588 goto out; 4589 out_unlock_refcount: 4590 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4591 brelse(ref_root_bh); 4592 out: 4593 remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus); 4594 remapped_bytes = min_t(loff_t, len, remapped_bytes); 4595 4596 return remapped_bytes > 0 ? remapped_bytes : ret; 4597 } 4598 4599 /* Set up refcount tree and remap s_inode to t_inode. */ 4600 loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode, 4601 struct buffer_head *s_bh, 4602 loff_t pos_in, 4603 struct inode *t_inode, 4604 struct buffer_head *t_bh, 4605 loff_t pos_out, 4606 loff_t len) 4607 { 4608 struct ocfs2_cached_dealloc_ctxt dealloc; 4609 struct ocfs2_super *osb; 4610 struct ocfs2_dinode *dis; 4611 struct ocfs2_dinode *dit; 4612 loff_t ret; 4613 4614 osb = OCFS2_SB(s_inode->i_sb); 4615 dis = (struct ocfs2_dinode *)s_bh->b_data; 4616 dit = (struct ocfs2_dinode *)t_bh->b_data; 4617 ocfs2_init_dealloc_ctxt(&dealloc); 4618 4619 /* 4620 * If we're reflinking the entire file and the source is inline 4621 * data, just copy the contents. 4622 */ 4623 if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) && 4624 i_size_read(t_inode) <= len && 4625 (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) { 4626 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh); 4627 if (ret) 4628 mlog_errno(ret); 4629 goto out; 4630 } 4631 4632 /* 4633 * If both inodes belong to two different refcount groups then 4634 * forget it because we don't know how (or want) to go merging 4635 * refcount trees. 4636 */ 4637 ret = -EOPNOTSUPP; 4638 if (ocfs2_is_refcount_inode(s_inode) && 4639 ocfs2_is_refcount_inode(t_inode) && 4640 le64_to_cpu(dis->i_refcount_loc) != 4641 le64_to_cpu(dit->i_refcount_loc)) 4642 goto out; 4643 4644 /* Neither inode has a refcount tree. Add one to s_inode. */ 4645 if (!ocfs2_is_refcount_inode(s_inode) && 4646 !ocfs2_is_refcount_inode(t_inode)) { 4647 ret = ocfs2_create_refcount_tree(s_inode, s_bh); 4648 if (ret) { 4649 mlog_errno(ret); 4650 goto out; 4651 } 4652 } 4653 4654 /* Ensure that both inodes end up with the same refcount tree. */ 4655 if (!ocfs2_is_refcount_inode(s_inode)) { 4656 ret = ocfs2_set_refcount_tree(s_inode, s_bh, 4657 le64_to_cpu(dit->i_refcount_loc)); 4658 if (ret) { 4659 mlog_errno(ret); 4660 goto out; 4661 } 4662 } 4663 if (!ocfs2_is_refcount_inode(t_inode)) { 4664 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4665 le64_to_cpu(dis->i_refcount_loc)); 4666 if (ret) { 4667 mlog_errno(ret); 4668 goto out; 4669 } 4670 } 4671 4672 /* Turn off inline data in the dest file. */ 4673 if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4674 ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh); 4675 if (ret) { 4676 mlog_errno(ret); 4677 goto out; 4678 } 4679 } 4680 4681 /* Actually remap extents now. */ 4682 ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh, 4683 pos_out, len, &dealloc); 4684 if (ret < 0) { 4685 mlog_errno(ret); 4686 goto out; 4687 } 4688 4689 out: 4690 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4691 ocfs2_schedule_truncate_log_flush(osb, 1); 4692 ocfs2_run_deallocs(osb, &dealloc); 4693 } 4694 4695 return ret; 4696 } 4697 4698 /* Lock an inode and grab a bh pointing to the inode. */ 4699 int ocfs2_reflink_inodes_lock(struct inode *s_inode, 4700 struct buffer_head **bh_s, 4701 struct inode *t_inode, 4702 struct buffer_head **bh_t) 4703 { 4704 struct inode *inode1 = s_inode; 4705 struct inode *inode2 = t_inode; 4706 struct ocfs2_inode_info *oi1; 4707 struct ocfs2_inode_info *oi2; 4708 struct buffer_head *bh1 = NULL; 4709 struct buffer_head *bh2 = NULL; 4710 bool same_inode = (s_inode == t_inode); 4711 bool need_swap = (inode1->i_ino > inode2->i_ino); 4712 int status; 4713 4714 /* First grab the VFS and rw locks. */ 4715 lock_two_nondirectories(s_inode, t_inode); 4716 if (need_swap) 4717 swap(inode1, inode2); 4718 4719 status = ocfs2_rw_lock(inode1, 1); 4720 if (status) { 4721 mlog_errno(status); 4722 goto out_i1; 4723 } 4724 if (!same_inode) { 4725 status = ocfs2_rw_lock(inode2, 1); 4726 if (status) { 4727 mlog_errno(status); 4728 goto out_i2; 4729 } 4730 } 4731 4732 /* Now go for the cluster locks */ 4733 oi1 = OCFS2_I(inode1); 4734 oi2 = OCFS2_I(inode2); 4735 4736 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 4737 (unsigned long long)oi2->ip_blkno); 4738 4739 /* We always want to lock the one with the lower lockid first. */ 4740 if (oi1->ip_blkno > oi2->ip_blkno) 4741 mlog_errno(-ENOLCK); 4742 4743 /* lock id1 */ 4744 status = ocfs2_inode_lock_nested(inode1, &bh1, 1, 4745 OI_LS_REFLINK_TARGET); 4746 if (status < 0) { 4747 if (status != -ENOENT) 4748 mlog_errno(status); 4749 goto out_rw2; 4750 } 4751 4752 /* lock id2 */ 4753 if (!same_inode) { 4754 status = ocfs2_inode_lock_nested(inode2, &bh2, 1, 4755 OI_LS_REFLINK_TARGET); 4756 if (status < 0) { 4757 if (status != -ENOENT) 4758 mlog_errno(status); 4759 goto out_cl1; 4760 } 4761 } else { 4762 bh2 = bh1; 4763 } 4764 4765 /* 4766 * If we swapped inode order above, we have to swap the buffer heads 4767 * before passing them back to the caller. 4768 */ 4769 if (need_swap) 4770 swap(bh1, bh2); 4771 *bh_s = bh1; 4772 *bh_t = bh2; 4773 4774 trace_ocfs2_double_lock_end( 4775 (unsigned long long)oi1->ip_blkno, 4776 (unsigned long long)oi2->ip_blkno); 4777 4778 return 0; 4779 4780 out_cl1: 4781 ocfs2_inode_unlock(inode1, 1); 4782 brelse(bh1); 4783 out_rw2: 4784 ocfs2_rw_unlock(inode2, 1); 4785 out_i2: 4786 ocfs2_rw_unlock(inode1, 1); 4787 out_i1: 4788 unlock_two_nondirectories(s_inode, t_inode); 4789 return status; 4790 } 4791 4792 /* Unlock both inodes and release buffers. */ 4793 void ocfs2_reflink_inodes_unlock(struct inode *s_inode, 4794 struct buffer_head *s_bh, 4795 struct inode *t_inode, 4796 struct buffer_head *t_bh) 4797 { 4798 ocfs2_inode_unlock(s_inode, 1); 4799 ocfs2_rw_unlock(s_inode, 1); 4800 brelse(s_bh); 4801 if (s_inode != t_inode) { 4802 ocfs2_inode_unlock(t_inode, 1); 4803 ocfs2_rw_unlock(t_inode, 1); 4804 brelse(t_bh); 4805 } 4806 unlock_two_nondirectories(s_inode, t_inode); 4807 } 4808