1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * refcounttree.c 5 * 6 * Copyright (C) 2009 Oracle. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public 10 * License version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 */ 17 18 #include <linux/sort.h> 19 #include <cluster/masklog.h> 20 #include "ocfs2.h" 21 #include "inode.h" 22 #include "alloc.h" 23 #include "suballoc.h" 24 #include "journal.h" 25 #include "uptodate.h" 26 #include "super.h" 27 #include "buffer_head_io.h" 28 #include "blockcheck.h" 29 #include "refcounttree.h" 30 #include "sysfile.h" 31 #include "dlmglue.h" 32 #include "extent_map.h" 33 #include "aops.h" 34 #include "xattr.h" 35 #include "namei.h" 36 #include "ocfs2_trace.h" 37 38 #include <linux/bio.h> 39 #include <linux/blkdev.h> 40 #include <linux/slab.h> 41 #include <linux/writeback.h> 42 #include <linux/pagevec.h> 43 #include <linux/swap.h> 44 #include <linux/security.h> 45 #include <linux/fsnotify.h> 46 #include <linux/quotaops.h> 47 #include <linux/namei.h> 48 #include <linux/mount.h> 49 #include <linux/posix_acl.h> 50 51 struct ocfs2_cow_context { 52 struct inode *inode; 53 u32 cow_start; 54 u32 cow_len; 55 struct ocfs2_extent_tree data_et; 56 struct ocfs2_refcount_tree *ref_tree; 57 struct buffer_head *ref_root_bh; 58 struct ocfs2_alloc_context *meta_ac; 59 struct ocfs2_alloc_context *data_ac; 60 struct ocfs2_cached_dealloc_ctxt dealloc; 61 void *cow_object; 62 struct ocfs2_post_refcount *post_refcount; 63 int extra_credits; 64 int (*get_clusters)(struct ocfs2_cow_context *context, 65 u32 v_cluster, u32 *p_cluster, 66 u32 *num_clusters, 67 unsigned int *extent_flags); 68 int (*cow_duplicate_clusters)(handle_t *handle, 69 struct inode *inode, 70 u32 cpos, u32 old_cluster, 71 u32 new_cluster, u32 new_len); 72 }; 73 74 static inline struct ocfs2_refcount_tree * 75 cache_info_to_refcount(struct ocfs2_caching_info *ci) 76 { 77 return container_of(ci, struct ocfs2_refcount_tree, rf_ci); 78 } 79 80 static int ocfs2_validate_refcount_block(struct super_block *sb, 81 struct buffer_head *bh) 82 { 83 int rc; 84 struct ocfs2_refcount_block *rb = 85 (struct ocfs2_refcount_block *)bh->b_data; 86 87 trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); 88 89 BUG_ON(!buffer_uptodate(bh)); 90 91 /* 92 * If the ecc fails, we return the error but otherwise 93 * leave the filesystem running. We know any error is 94 * local to this block. 95 */ 96 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); 97 if (rc) { 98 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", 99 (unsigned long long)bh->b_blocknr); 100 return rc; 101 } 102 103 104 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { 105 rc = ocfs2_error(sb, 106 "Refcount block #%llu has bad signature %.*s\n", 107 (unsigned long long)bh->b_blocknr, 7, 108 rb->rf_signature); 109 goto out; 110 } 111 112 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { 113 rc = ocfs2_error(sb, 114 "Refcount block #%llu has an invalid rf_blkno of %llu\n", 115 (unsigned long long)bh->b_blocknr, 116 (unsigned long long)le64_to_cpu(rb->rf_blkno)); 117 goto out; 118 } 119 120 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { 121 rc = ocfs2_error(sb, 122 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n", 123 (unsigned long long)bh->b_blocknr, 124 le32_to_cpu(rb->rf_fs_generation)); 125 goto out; 126 } 127 out: 128 return rc; 129 } 130 131 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, 132 u64 rb_blkno, 133 struct buffer_head **bh) 134 { 135 int rc; 136 struct buffer_head *tmp = *bh; 137 138 rc = ocfs2_read_block(ci, rb_blkno, &tmp, 139 ocfs2_validate_refcount_block); 140 141 /* If ocfs2_read_block() got us a new bh, pass it up. */ 142 if (!rc && !*bh) 143 *bh = tmp; 144 145 return rc; 146 } 147 148 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) 149 { 150 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 151 152 return rf->rf_blkno; 153 } 154 155 static struct super_block * 156 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) 157 { 158 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 159 160 return rf->rf_sb; 161 } 162 163 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) 164 { 165 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 166 167 spin_lock(&rf->rf_lock); 168 } 169 170 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) 171 { 172 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 173 174 spin_unlock(&rf->rf_lock); 175 } 176 177 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) 178 { 179 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 180 181 mutex_lock(&rf->rf_io_mutex); 182 } 183 184 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) 185 { 186 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); 187 188 mutex_unlock(&rf->rf_io_mutex); 189 } 190 191 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { 192 .co_owner = ocfs2_refcount_cache_owner, 193 .co_get_super = ocfs2_refcount_cache_get_super, 194 .co_cache_lock = ocfs2_refcount_cache_lock, 195 .co_cache_unlock = ocfs2_refcount_cache_unlock, 196 .co_io_lock = ocfs2_refcount_cache_io_lock, 197 .co_io_unlock = ocfs2_refcount_cache_io_unlock, 198 }; 199 200 static struct ocfs2_refcount_tree * 201 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) 202 { 203 struct rb_node *n = osb->osb_rf_lock_tree.rb_node; 204 struct ocfs2_refcount_tree *tree = NULL; 205 206 while (n) { 207 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); 208 209 if (blkno < tree->rf_blkno) 210 n = n->rb_left; 211 else if (blkno > tree->rf_blkno) 212 n = n->rb_right; 213 else 214 return tree; 215 } 216 217 return NULL; 218 } 219 220 /* osb_lock is already locked. */ 221 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, 222 struct ocfs2_refcount_tree *new) 223 { 224 u64 rf_blkno = new->rf_blkno; 225 struct rb_node *parent = NULL; 226 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; 227 struct ocfs2_refcount_tree *tmp; 228 229 while (*p) { 230 parent = *p; 231 232 tmp = rb_entry(parent, struct ocfs2_refcount_tree, 233 rf_node); 234 235 if (rf_blkno < tmp->rf_blkno) 236 p = &(*p)->rb_left; 237 else if (rf_blkno > tmp->rf_blkno) 238 p = &(*p)->rb_right; 239 else { 240 /* This should never happen! */ 241 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", 242 (unsigned long long)rf_blkno); 243 BUG(); 244 } 245 } 246 247 rb_link_node(&new->rf_node, parent, p); 248 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); 249 } 250 251 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) 252 { 253 ocfs2_metadata_cache_exit(&tree->rf_ci); 254 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); 255 ocfs2_lock_res_free(&tree->rf_lockres); 256 kfree(tree); 257 } 258 259 static inline void 260 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, 261 struct ocfs2_refcount_tree *tree) 262 { 263 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); 264 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) 265 osb->osb_ref_tree_lru = NULL; 266 } 267 268 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, 269 struct ocfs2_refcount_tree *tree) 270 { 271 spin_lock(&osb->osb_lock); 272 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 273 spin_unlock(&osb->osb_lock); 274 } 275 276 static void ocfs2_kref_remove_refcount_tree(struct kref *kref) 277 { 278 struct ocfs2_refcount_tree *tree = 279 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); 280 281 ocfs2_free_refcount_tree(tree); 282 } 283 284 static inline void 285 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) 286 { 287 kref_get(&tree->rf_getcnt); 288 } 289 290 static inline void 291 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) 292 { 293 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); 294 } 295 296 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, 297 struct super_block *sb) 298 { 299 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); 300 mutex_init(&new->rf_io_mutex); 301 new->rf_sb = sb; 302 spin_lock_init(&new->rf_lock); 303 } 304 305 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, 306 struct ocfs2_refcount_tree *new, 307 u64 rf_blkno, u32 generation) 308 { 309 init_rwsem(&new->rf_sem); 310 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, 311 rf_blkno, generation); 312 } 313 314 static struct ocfs2_refcount_tree* 315 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) 316 { 317 struct ocfs2_refcount_tree *new; 318 319 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); 320 if (!new) 321 return NULL; 322 323 new->rf_blkno = rf_blkno; 324 kref_init(&new->rf_getcnt); 325 ocfs2_init_refcount_tree_ci(new, osb->sb); 326 327 return new; 328 } 329 330 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, 331 struct ocfs2_refcount_tree **ret_tree) 332 { 333 int ret = 0; 334 struct ocfs2_refcount_tree *tree, *new = NULL; 335 struct buffer_head *ref_root_bh = NULL; 336 struct ocfs2_refcount_block *ref_rb; 337 338 spin_lock(&osb->osb_lock); 339 if (osb->osb_ref_tree_lru && 340 osb->osb_ref_tree_lru->rf_blkno == rf_blkno) 341 tree = osb->osb_ref_tree_lru; 342 else 343 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 344 if (tree) 345 goto out; 346 347 spin_unlock(&osb->osb_lock); 348 349 new = ocfs2_allocate_refcount_tree(osb, rf_blkno); 350 if (!new) { 351 ret = -ENOMEM; 352 mlog_errno(ret); 353 return ret; 354 } 355 /* 356 * We need the generation to create the refcount tree lock and since 357 * it isn't changed during the tree modification, we are safe here to 358 * read without protection. 359 * We also have to purge the cache after we create the lock since the 360 * refcount block may have the stale data. It can only be trusted when 361 * we hold the refcount lock. 362 */ 363 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); 364 if (ret) { 365 mlog_errno(ret); 366 ocfs2_metadata_cache_exit(&new->rf_ci); 367 kfree(new); 368 return ret; 369 } 370 371 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 372 new->rf_generation = le32_to_cpu(ref_rb->rf_generation); 373 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, 374 new->rf_generation); 375 ocfs2_metadata_cache_purge(&new->rf_ci); 376 377 spin_lock(&osb->osb_lock); 378 tree = ocfs2_find_refcount_tree(osb, rf_blkno); 379 if (tree) 380 goto out; 381 382 ocfs2_insert_refcount_tree(osb, new); 383 384 tree = new; 385 new = NULL; 386 387 out: 388 *ret_tree = tree; 389 390 osb->osb_ref_tree_lru = tree; 391 392 spin_unlock(&osb->osb_lock); 393 394 if (new) 395 ocfs2_free_refcount_tree(new); 396 397 brelse(ref_root_bh); 398 return ret; 399 } 400 401 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) 402 { 403 int ret; 404 struct buffer_head *di_bh = NULL; 405 struct ocfs2_dinode *di; 406 407 ret = ocfs2_read_inode_block(inode, &di_bh); 408 if (ret) { 409 mlog_errno(ret); 410 goto out; 411 } 412 413 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 414 415 di = (struct ocfs2_dinode *)di_bh->b_data; 416 *ref_blkno = le64_to_cpu(di->i_refcount_loc); 417 brelse(di_bh); 418 out: 419 return ret; 420 } 421 422 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 423 struct ocfs2_refcount_tree *tree, int rw) 424 { 425 int ret; 426 427 ret = ocfs2_refcount_lock(tree, rw); 428 if (ret) { 429 mlog_errno(ret); 430 goto out; 431 } 432 433 if (rw) 434 down_write(&tree->rf_sem); 435 else 436 down_read(&tree->rf_sem); 437 438 out: 439 return ret; 440 } 441 442 /* 443 * Lock the refcount tree pointed by ref_blkno and return the tree. 444 * In most case, we lock the tree and read the refcount block. 445 * So read it here if the caller really needs it. 446 * 447 * If the tree has been re-created by other node, it will free the 448 * old one and re-create it. 449 */ 450 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, 451 u64 ref_blkno, int rw, 452 struct ocfs2_refcount_tree **ret_tree, 453 struct buffer_head **ref_bh) 454 { 455 int ret, delete_tree = 0; 456 struct ocfs2_refcount_tree *tree = NULL; 457 struct buffer_head *ref_root_bh = NULL; 458 struct ocfs2_refcount_block *rb; 459 460 again: 461 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); 462 if (ret) { 463 mlog_errno(ret); 464 return ret; 465 } 466 467 ocfs2_refcount_tree_get(tree); 468 469 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); 470 if (ret) { 471 mlog_errno(ret); 472 ocfs2_refcount_tree_put(tree); 473 goto out; 474 } 475 476 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 477 &ref_root_bh); 478 if (ret) { 479 mlog_errno(ret); 480 ocfs2_unlock_refcount_tree(osb, tree, rw); 481 goto out; 482 } 483 484 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 485 /* 486 * If the refcount block has been freed and re-created, we may need 487 * to recreate the refcount tree also. 488 * 489 * Here we just remove the tree from the rb-tree, and the last 490 * kref holder will unlock and delete this refcount_tree. 491 * Then we goto "again" and ocfs2_get_refcount_tree will create 492 * the new refcount tree for us. 493 */ 494 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { 495 if (!tree->rf_removed) { 496 ocfs2_erase_refcount_tree_from_list(osb, tree); 497 tree->rf_removed = 1; 498 delete_tree = 1; 499 } 500 501 ocfs2_unlock_refcount_tree(osb, tree, rw); 502 /* 503 * We get an extra reference when we create the refcount 504 * tree, so another put will destroy it. 505 */ 506 if (delete_tree) 507 ocfs2_refcount_tree_put(tree); 508 brelse(ref_root_bh); 509 ref_root_bh = NULL; 510 goto again; 511 } 512 513 *ret_tree = tree; 514 if (ref_bh) { 515 *ref_bh = ref_root_bh; 516 ref_root_bh = NULL; 517 } 518 out: 519 brelse(ref_root_bh); 520 return ret; 521 } 522 523 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, 524 struct ocfs2_refcount_tree *tree, int rw) 525 { 526 if (rw) 527 up_write(&tree->rf_sem); 528 else 529 up_read(&tree->rf_sem); 530 531 ocfs2_refcount_unlock(tree, rw); 532 ocfs2_refcount_tree_put(tree); 533 } 534 535 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) 536 { 537 struct rb_node *node; 538 struct ocfs2_refcount_tree *tree; 539 struct rb_root *root = &osb->osb_rf_lock_tree; 540 541 while ((node = rb_last(root)) != NULL) { 542 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 543 544 trace_ocfs2_purge_refcount_trees( 545 (unsigned long long) tree->rf_blkno); 546 547 rb_erase(&tree->rf_node, root); 548 ocfs2_free_refcount_tree(tree); 549 } 550 } 551 552 /* 553 * Create a refcount tree for an inode. 554 * We take for granted that the inode is already locked. 555 */ 556 static int ocfs2_create_refcount_tree(struct inode *inode, 557 struct buffer_head *di_bh) 558 { 559 int ret; 560 handle_t *handle = NULL; 561 struct ocfs2_alloc_context *meta_ac = NULL; 562 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 563 struct ocfs2_inode_info *oi = OCFS2_I(inode); 564 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 565 struct buffer_head *new_bh = NULL; 566 struct ocfs2_refcount_block *rb; 567 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; 568 u16 suballoc_bit_start; 569 u32 num_got; 570 u64 suballoc_loc, first_blkno; 571 572 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); 573 574 trace_ocfs2_create_refcount_tree( 575 (unsigned long long)OCFS2_I(inode)->ip_blkno); 576 577 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 578 if (ret) { 579 mlog_errno(ret); 580 goto out; 581 } 582 583 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); 584 if (IS_ERR(handle)) { 585 ret = PTR_ERR(handle); 586 mlog_errno(ret); 587 goto out; 588 } 589 590 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 591 OCFS2_JOURNAL_ACCESS_WRITE); 592 if (ret) { 593 mlog_errno(ret); 594 goto out_commit; 595 } 596 597 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 598 &suballoc_bit_start, &num_got, 599 &first_blkno); 600 if (ret) { 601 mlog_errno(ret); 602 goto out_commit; 603 } 604 605 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); 606 if (!new_tree) { 607 ret = -ENOMEM; 608 mlog_errno(ret); 609 goto out_commit; 610 } 611 612 new_bh = sb_getblk(inode->i_sb, first_blkno); 613 if (!new_bh) { 614 ret = -ENOMEM; 615 mlog_errno(ret); 616 goto out_commit; 617 } 618 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); 619 620 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, 621 OCFS2_JOURNAL_ACCESS_CREATE); 622 if (ret) { 623 mlog_errno(ret); 624 goto out_commit; 625 } 626 627 /* Initialize ocfs2_refcount_block. */ 628 rb = (struct ocfs2_refcount_block *)new_bh->b_data; 629 memset(rb, 0, inode->i_sb->s_blocksize); 630 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 631 rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 632 rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 633 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 634 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); 635 rb->rf_blkno = cpu_to_le64(first_blkno); 636 rb->rf_count = cpu_to_le32(1); 637 rb->rf_records.rl_count = 638 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); 639 spin_lock(&osb->osb_lock); 640 rb->rf_generation = osb->s_next_generation++; 641 spin_unlock(&osb->osb_lock); 642 643 ocfs2_journal_dirty(handle, new_bh); 644 645 spin_lock(&oi->ip_lock); 646 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 647 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 648 di->i_refcount_loc = cpu_to_le64(first_blkno); 649 spin_unlock(&oi->ip_lock); 650 651 trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); 652 653 ocfs2_journal_dirty(handle, di_bh); 654 655 /* 656 * We have to init the tree lock here since it will use 657 * the generation number to create it. 658 */ 659 new_tree->rf_generation = le32_to_cpu(rb->rf_generation); 660 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, 661 new_tree->rf_generation); 662 663 spin_lock(&osb->osb_lock); 664 tree = ocfs2_find_refcount_tree(osb, first_blkno); 665 666 /* 667 * We've just created a new refcount tree in this block. If 668 * we found a refcount tree on the ocfs2_super, it must be 669 * one we just deleted. We free the old tree before 670 * inserting the new tree. 671 */ 672 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); 673 if (tree) 674 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); 675 ocfs2_insert_refcount_tree(osb, new_tree); 676 spin_unlock(&osb->osb_lock); 677 new_tree = NULL; 678 if (tree) 679 ocfs2_refcount_tree_put(tree); 680 681 out_commit: 682 ocfs2_commit_trans(osb, handle); 683 684 out: 685 if (new_tree) { 686 ocfs2_metadata_cache_exit(&new_tree->rf_ci); 687 kfree(new_tree); 688 } 689 690 brelse(new_bh); 691 if (meta_ac) 692 ocfs2_free_alloc_context(meta_ac); 693 694 return ret; 695 } 696 697 static int ocfs2_set_refcount_tree(struct inode *inode, 698 struct buffer_head *di_bh, 699 u64 refcount_loc) 700 { 701 int ret; 702 handle_t *handle = NULL; 703 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 704 struct ocfs2_inode_info *oi = OCFS2_I(inode); 705 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 706 struct buffer_head *ref_root_bh = NULL; 707 struct ocfs2_refcount_block *rb; 708 struct ocfs2_refcount_tree *ref_tree; 709 710 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); 711 712 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, 713 &ref_tree, &ref_root_bh); 714 if (ret) { 715 mlog_errno(ret); 716 return ret; 717 } 718 719 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); 720 if (IS_ERR(handle)) { 721 ret = PTR_ERR(handle); 722 mlog_errno(ret); 723 goto out; 724 } 725 726 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 727 OCFS2_JOURNAL_ACCESS_WRITE); 728 if (ret) { 729 mlog_errno(ret); 730 goto out_commit; 731 } 732 733 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, 734 OCFS2_JOURNAL_ACCESS_WRITE); 735 if (ret) { 736 mlog_errno(ret); 737 goto out_commit; 738 } 739 740 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 741 le32_add_cpu(&rb->rf_count, 1); 742 743 ocfs2_journal_dirty(handle, ref_root_bh); 744 745 spin_lock(&oi->ip_lock); 746 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; 747 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 748 di->i_refcount_loc = cpu_to_le64(refcount_loc); 749 spin_unlock(&oi->ip_lock); 750 ocfs2_journal_dirty(handle, di_bh); 751 752 out_commit: 753 ocfs2_commit_trans(osb, handle); 754 out: 755 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 756 brelse(ref_root_bh); 757 758 return ret; 759 } 760 761 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) 762 { 763 int ret, delete_tree = 0; 764 handle_t *handle = NULL; 765 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 766 struct ocfs2_inode_info *oi = OCFS2_I(inode); 767 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 768 struct ocfs2_refcount_block *rb; 769 struct inode *alloc_inode = NULL; 770 struct buffer_head *alloc_bh = NULL; 771 struct buffer_head *blk_bh = NULL; 772 struct ocfs2_refcount_tree *ref_tree; 773 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; 774 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); 775 u16 bit = 0; 776 777 if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) 778 return 0; 779 780 BUG_ON(!ref_blkno); 781 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); 782 if (ret) { 783 mlog_errno(ret); 784 return ret; 785 } 786 787 rb = (struct ocfs2_refcount_block *)blk_bh->b_data; 788 789 /* 790 * If we are the last user, we need to free the block. 791 * So lock the allocator ahead. 792 */ 793 if (le32_to_cpu(rb->rf_count) == 1) { 794 blk = le64_to_cpu(rb->rf_blkno); 795 bit = le16_to_cpu(rb->rf_suballoc_bit); 796 if (rb->rf_suballoc_loc) 797 bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); 798 else 799 bg_blkno = ocfs2_which_suballoc_group(blk, bit); 800 801 alloc_inode = ocfs2_get_system_file_inode(osb, 802 EXTENT_ALLOC_SYSTEM_INODE, 803 le16_to_cpu(rb->rf_suballoc_slot)); 804 if (!alloc_inode) { 805 ret = -ENOMEM; 806 mlog_errno(ret); 807 goto out; 808 } 809 inode_lock(alloc_inode); 810 811 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); 812 if (ret) { 813 mlog_errno(ret); 814 goto out_mutex; 815 } 816 817 credits += OCFS2_SUBALLOC_FREE; 818 } 819 820 handle = ocfs2_start_trans(osb, credits); 821 if (IS_ERR(handle)) { 822 ret = PTR_ERR(handle); 823 mlog_errno(ret); 824 goto out_unlock; 825 } 826 827 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 828 OCFS2_JOURNAL_ACCESS_WRITE); 829 if (ret) { 830 mlog_errno(ret); 831 goto out_commit; 832 } 833 834 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, 835 OCFS2_JOURNAL_ACCESS_WRITE); 836 if (ret) { 837 mlog_errno(ret); 838 goto out_commit; 839 } 840 841 spin_lock(&oi->ip_lock); 842 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; 843 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); 844 di->i_refcount_loc = 0; 845 spin_unlock(&oi->ip_lock); 846 ocfs2_journal_dirty(handle, di_bh); 847 848 le32_add_cpu(&rb->rf_count , -1); 849 ocfs2_journal_dirty(handle, blk_bh); 850 851 if (!rb->rf_count) { 852 delete_tree = 1; 853 ocfs2_erase_refcount_tree_from_list(osb, ref_tree); 854 ret = ocfs2_free_suballoc_bits(handle, alloc_inode, 855 alloc_bh, bit, bg_blkno, 1); 856 if (ret) 857 mlog_errno(ret); 858 } 859 860 out_commit: 861 ocfs2_commit_trans(osb, handle); 862 out_unlock: 863 if (alloc_inode) { 864 ocfs2_inode_unlock(alloc_inode, 1); 865 brelse(alloc_bh); 866 } 867 out_mutex: 868 if (alloc_inode) { 869 inode_unlock(alloc_inode); 870 iput(alloc_inode); 871 } 872 out: 873 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 874 if (delete_tree) 875 ocfs2_refcount_tree_put(ref_tree); 876 brelse(blk_bh); 877 878 return ret; 879 } 880 881 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, 882 struct buffer_head *ref_leaf_bh, 883 u64 cpos, unsigned int len, 884 struct ocfs2_refcount_rec *ret_rec, 885 int *index) 886 { 887 int i = 0; 888 struct ocfs2_refcount_block *rb = 889 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 890 struct ocfs2_refcount_rec *rec = NULL; 891 892 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { 893 rec = &rb->rf_records.rl_recs[i]; 894 895 if (le64_to_cpu(rec->r_cpos) + 896 le32_to_cpu(rec->r_clusters) <= cpos) 897 continue; 898 else if (le64_to_cpu(rec->r_cpos) > cpos) 899 break; 900 901 /* ok, cpos fail in this rec. Just return. */ 902 if (ret_rec) 903 *ret_rec = *rec; 904 goto out; 905 } 906 907 if (ret_rec) { 908 /* We meet with a hole here, so fake the rec. */ 909 ret_rec->r_cpos = cpu_to_le64(cpos); 910 ret_rec->r_refcount = 0; 911 if (i < le16_to_cpu(rb->rf_records.rl_used) && 912 le64_to_cpu(rec->r_cpos) < cpos + len) 913 ret_rec->r_clusters = 914 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); 915 else 916 ret_rec->r_clusters = cpu_to_le32(len); 917 } 918 919 out: 920 *index = i; 921 } 922 923 /* 924 * Try to remove refcount tree. The mechanism is: 925 * 1) Check whether i_clusters == 0, if no, exit. 926 * 2) check whether we have i_xattr_loc in dinode. if yes, exit. 927 * 3) Check whether we have inline xattr stored outside, if yes, exit. 928 * 4) Remove the tree. 929 */ 930 int ocfs2_try_remove_refcount_tree(struct inode *inode, 931 struct buffer_head *di_bh) 932 { 933 int ret; 934 struct ocfs2_inode_info *oi = OCFS2_I(inode); 935 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 936 937 down_write(&oi->ip_xattr_sem); 938 down_write(&oi->ip_alloc_sem); 939 940 if (oi->ip_clusters) 941 goto out; 942 943 if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) 944 goto out; 945 946 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && 947 ocfs2_has_inline_xattr_value_outside(inode, di)) 948 goto out; 949 950 ret = ocfs2_remove_refcount_tree(inode, di_bh); 951 if (ret) 952 mlog_errno(ret); 953 out: 954 up_write(&oi->ip_alloc_sem); 955 up_write(&oi->ip_xattr_sem); 956 return 0; 957 } 958 959 /* 960 * Find the end range for a leaf refcount block indicated by 961 * el->l_recs[index].e_blkno. 962 */ 963 static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, 964 struct buffer_head *ref_root_bh, 965 struct ocfs2_extent_block *eb, 966 struct ocfs2_extent_list *el, 967 int index, u32 *cpos_end) 968 { 969 int ret, i, subtree_root; 970 u32 cpos; 971 u64 blkno; 972 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 973 struct ocfs2_path *left_path = NULL, *right_path = NULL; 974 struct ocfs2_extent_tree et; 975 struct ocfs2_extent_list *tmp_el; 976 977 if (index < le16_to_cpu(el->l_next_free_rec) - 1) { 978 /* 979 * We have a extent rec after index, so just use the e_cpos 980 * of the next extent rec. 981 */ 982 *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); 983 return 0; 984 } 985 986 if (!eb || (eb && !eb->h_next_leaf_blk)) { 987 /* 988 * We are the last extent rec, so any high cpos should 989 * be stored in this leaf refcount block. 990 */ 991 *cpos_end = UINT_MAX; 992 return 0; 993 } 994 995 /* 996 * If the extent block isn't the last one, we have to find 997 * the subtree root between this extent block and the next 998 * leaf extent block and get the corresponding e_cpos from 999 * the subroot. Otherwise we may corrupt the b-tree. 1000 */ 1001 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1002 1003 left_path = ocfs2_new_path_from_et(&et); 1004 if (!left_path) { 1005 ret = -ENOMEM; 1006 mlog_errno(ret); 1007 goto out; 1008 } 1009 1010 cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); 1011 ret = ocfs2_find_path(ci, left_path, cpos); 1012 if (ret) { 1013 mlog_errno(ret); 1014 goto out; 1015 } 1016 1017 right_path = ocfs2_new_path_from_path(left_path); 1018 if (!right_path) { 1019 ret = -ENOMEM; 1020 mlog_errno(ret); 1021 goto out; 1022 } 1023 1024 ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); 1025 if (ret) { 1026 mlog_errno(ret); 1027 goto out; 1028 } 1029 1030 ret = ocfs2_find_path(ci, right_path, cpos); 1031 if (ret) { 1032 mlog_errno(ret); 1033 goto out; 1034 } 1035 1036 subtree_root = ocfs2_find_subtree_root(&et, left_path, 1037 right_path); 1038 1039 tmp_el = left_path->p_node[subtree_root].el; 1040 blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; 1041 for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) { 1042 if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { 1043 *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); 1044 break; 1045 } 1046 } 1047 1048 BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec)); 1049 1050 out: 1051 ocfs2_free_path(left_path); 1052 ocfs2_free_path(right_path); 1053 return ret; 1054 } 1055 1056 /* 1057 * Given a cpos and len, try to find the refcount record which contains cpos. 1058 * 1. If cpos can be found in one refcount record, return the record. 1059 * 2. If cpos can't be found, return a fake record which start from cpos 1060 * and end at a small value between cpos+len and start of the next record. 1061 * This fake record has r_refcount = 0. 1062 */ 1063 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, 1064 struct buffer_head *ref_root_bh, 1065 u64 cpos, unsigned int len, 1066 struct ocfs2_refcount_rec *ret_rec, 1067 int *index, 1068 struct buffer_head **ret_bh) 1069 { 1070 int ret = 0, i, found; 1071 u32 low_cpos, uninitialized_var(cpos_end); 1072 struct ocfs2_extent_list *el; 1073 struct ocfs2_extent_rec *rec = NULL; 1074 struct ocfs2_extent_block *eb = NULL; 1075 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; 1076 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1077 struct ocfs2_refcount_block *rb = 1078 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1079 1080 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { 1081 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, 1082 ret_rec, index); 1083 *ret_bh = ref_root_bh; 1084 get_bh(ref_root_bh); 1085 return 0; 1086 } 1087 1088 el = &rb->rf_list; 1089 low_cpos = cpos & OCFS2_32BIT_POS_MASK; 1090 1091 if (el->l_tree_depth) { 1092 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); 1093 if (ret) { 1094 mlog_errno(ret); 1095 goto out; 1096 } 1097 1098 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 1099 el = &eb->h_list; 1100 1101 if (el->l_tree_depth) { 1102 ret = ocfs2_error(sb, 1103 "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n", 1104 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1105 (unsigned long long)eb_bh->b_blocknr); 1106 goto out; 1107 } 1108 } 1109 1110 found = 0; 1111 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { 1112 rec = &el->l_recs[i]; 1113 1114 if (le32_to_cpu(rec->e_cpos) <= low_cpos) { 1115 found = 1; 1116 break; 1117 } 1118 } 1119 1120 if (found) { 1121 ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, 1122 eb, el, i, &cpos_end); 1123 if (ret) { 1124 mlog_errno(ret); 1125 goto out; 1126 } 1127 1128 if (cpos_end < low_cpos + len) 1129 len = cpos_end - low_cpos; 1130 } 1131 1132 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), 1133 &ref_leaf_bh); 1134 if (ret) { 1135 mlog_errno(ret); 1136 goto out; 1137 } 1138 1139 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, 1140 ret_rec, index); 1141 *ret_bh = ref_leaf_bh; 1142 out: 1143 brelse(eb_bh); 1144 return ret; 1145 } 1146 1147 enum ocfs2_ref_rec_contig { 1148 REF_CONTIG_NONE = 0, 1149 REF_CONTIG_LEFT, 1150 REF_CONTIG_RIGHT, 1151 REF_CONTIG_LEFTRIGHT, 1152 }; 1153 1154 static enum ocfs2_ref_rec_contig 1155 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, 1156 int index) 1157 { 1158 if ((rb->rf_records.rl_recs[index].r_refcount == 1159 rb->rf_records.rl_recs[index + 1].r_refcount) && 1160 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + 1161 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == 1162 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) 1163 return REF_CONTIG_RIGHT; 1164 1165 return REF_CONTIG_NONE; 1166 } 1167 1168 static enum ocfs2_ref_rec_contig 1169 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, 1170 int index) 1171 { 1172 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; 1173 1174 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) 1175 ret = ocfs2_refcount_rec_adjacent(rb, index); 1176 1177 if (index > 0) { 1178 enum ocfs2_ref_rec_contig tmp; 1179 1180 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); 1181 1182 if (tmp == REF_CONTIG_RIGHT) { 1183 if (ret == REF_CONTIG_RIGHT) 1184 ret = REF_CONTIG_LEFTRIGHT; 1185 else 1186 ret = REF_CONTIG_LEFT; 1187 } 1188 } 1189 1190 return ret; 1191 } 1192 1193 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, 1194 int index) 1195 { 1196 BUG_ON(rb->rf_records.rl_recs[index].r_refcount != 1197 rb->rf_records.rl_recs[index+1].r_refcount); 1198 1199 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, 1200 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); 1201 1202 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) 1203 memmove(&rb->rf_records.rl_recs[index + 1], 1204 &rb->rf_records.rl_recs[index + 2], 1205 sizeof(struct ocfs2_refcount_rec) * 1206 (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); 1207 1208 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], 1209 0, sizeof(struct ocfs2_refcount_rec)); 1210 le16_add_cpu(&rb->rf_records.rl_used, -1); 1211 } 1212 1213 /* 1214 * Merge the refcount rec if we are contiguous with the adjacent recs. 1215 */ 1216 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, 1217 int index) 1218 { 1219 enum ocfs2_ref_rec_contig contig = 1220 ocfs2_refcount_rec_contig(rb, index); 1221 1222 if (contig == REF_CONTIG_NONE) 1223 return; 1224 1225 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { 1226 BUG_ON(index == 0); 1227 index--; 1228 } 1229 1230 ocfs2_rotate_refcount_rec_left(rb, index); 1231 1232 if (contig == REF_CONTIG_LEFTRIGHT) 1233 ocfs2_rotate_refcount_rec_left(rb, index); 1234 } 1235 1236 /* 1237 * Change the refcount indexed by "index" in ref_bh. 1238 * If refcount reaches 0, remove it. 1239 */ 1240 static int ocfs2_change_refcount_rec(handle_t *handle, 1241 struct ocfs2_caching_info *ci, 1242 struct buffer_head *ref_leaf_bh, 1243 int index, int merge, int change) 1244 { 1245 int ret; 1246 struct ocfs2_refcount_block *rb = 1247 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1248 struct ocfs2_refcount_list *rl = &rb->rf_records; 1249 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; 1250 1251 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1252 OCFS2_JOURNAL_ACCESS_WRITE); 1253 if (ret) { 1254 mlog_errno(ret); 1255 goto out; 1256 } 1257 1258 trace_ocfs2_change_refcount_rec( 1259 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1260 index, le32_to_cpu(rec->r_refcount), change); 1261 le32_add_cpu(&rec->r_refcount, change); 1262 1263 if (!rec->r_refcount) { 1264 if (index != le16_to_cpu(rl->rl_used) - 1) { 1265 memmove(rec, rec + 1, 1266 (le16_to_cpu(rl->rl_used) - index - 1) * 1267 sizeof(struct ocfs2_refcount_rec)); 1268 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], 1269 0, sizeof(struct ocfs2_refcount_rec)); 1270 } 1271 1272 le16_add_cpu(&rl->rl_used, -1); 1273 } else if (merge) 1274 ocfs2_refcount_rec_merge(rb, index); 1275 1276 ocfs2_journal_dirty(handle, ref_leaf_bh); 1277 out: 1278 return ret; 1279 } 1280 1281 static int ocfs2_expand_inline_ref_root(handle_t *handle, 1282 struct ocfs2_caching_info *ci, 1283 struct buffer_head *ref_root_bh, 1284 struct buffer_head **ref_leaf_bh, 1285 struct ocfs2_alloc_context *meta_ac) 1286 { 1287 int ret; 1288 u16 suballoc_bit_start; 1289 u32 num_got; 1290 u64 suballoc_loc, blkno; 1291 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1292 struct buffer_head *new_bh = NULL; 1293 struct ocfs2_refcount_block *new_rb; 1294 struct ocfs2_refcount_block *root_rb = 1295 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1296 1297 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1298 OCFS2_JOURNAL_ACCESS_WRITE); 1299 if (ret) { 1300 mlog_errno(ret); 1301 goto out; 1302 } 1303 1304 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1305 &suballoc_bit_start, &num_got, 1306 &blkno); 1307 if (ret) { 1308 mlog_errno(ret); 1309 goto out; 1310 } 1311 1312 new_bh = sb_getblk(sb, blkno); 1313 if (new_bh == NULL) { 1314 ret = -ENOMEM; 1315 mlog_errno(ret); 1316 goto out; 1317 } 1318 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1319 1320 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1321 OCFS2_JOURNAL_ACCESS_CREATE); 1322 if (ret) { 1323 mlog_errno(ret); 1324 goto out; 1325 } 1326 1327 /* 1328 * Initialize ocfs2_refcount_block. 1329 * It should contain the same information as the old root. 1330 * so just memcpy it and change the corresponding field. 1331 */ 1332 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); 1333 1334 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1335 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1336 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1337 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1338 new_rb->rf_blkno = cpu_to_le64(blkno); 1339 new_rb->rf_cpos = cpu_to_le32(0); 1340 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1341 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1342 ocfs2_journal_dirty(handle, new_bh); 1343 1344 /* Now change the root. */ 1345 memset(&root_rb->rf_list, 0, sb->s_blocksize - 1346 offsetof(struct ocfs2_refcount_block, rf_list)); 1347 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); 1348 root_rb->rf_clusters = cpu_to_le32(1); 1349 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); 1350 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); 1351 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); 1352 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); 1353 1354 ocfs2_journal_dirty(handle, ref_root_bh); 1355 1356 trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, 1357 le16_to_cpu(new_rb->rf_records.rl_used)); 1358 1359 *ref_leaf_bh = new_bh; 1360 new_bh = NULL; 1361 out: 1362 brelse(new_bh); 1363 return ret; 1364 } 1365 1366 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, 1367 struct ocfs2_refcount_rec *next) 1368 { 1369 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= 1370 ocfs2_get_ref_rec_low_cpos(next)) 1371 return 1; 1372 1373 return 0; 1374 } 1375 1376 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) 1377 { 1378 const struct ocfs2_refcount_rec *l = a, *r = b; 1379 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); 1380 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); 1381 1382 if (l_cpos > r_cpos) 1383 return 1; 1384 if (l_cpos < r_cpos) 1385 return -1; 1386 return 0; 1387 } 1388 1389 static int cmp_refcount_rec_by_cpos(const void *a, const void *b) 1390 { 1391 const struct ocfs2_refcount_rec *l = a, *r = b; 1392 u64 l_cpos = le64_to_cpu(l->r_cpos); 1393 u64 r_cpos = le64_to_cpu(r->r_cpos); 1394 1395 if (l_cpos > r_cpos) 1396 return 1; 1397 if (l_cpos < r_cpos) 1398 return -1; 1399 return 0; 1400 } 1401 1402 static void swap_refcount_rec(void *a, void *b, int size) 1403 { 1404 struct ocfs2_refcount_rec *l = a, *r = b; 1405 1406 swap(*l, *r); 1407 } 1408 1409 /* 1410 * The refcount cpos are ordered by their 64bit cpos, 1411 * But we will use the low 32 bit to be the e_cpos in the b-tree. 1412 * So we need to make sure that this pos isn't intersected with others. 1413 * 1414 * Note: The refcount block is already sorted by their low 32 bit cpos, 1415 * So just try the middle pos first, and we will exit when we find 1416 * the good position. 1417 */ 1418 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, 1419 u32 *split_pos, int *split_index) 1420 { 1421 int num_used = le16_to_cpu(rl->rl_used); 1422 int delta, middle = num_used / 2; 1423 1424 for (delta = 0; delta < middle; delta++) { 1425 /* Let's check delta earlier than middle */ 1426 if (ocfs2_refcount_rec_no_intersect( 1427 &rl->rl_recs[middle - delta - 1], 1428 &rl->rl_recs[middle - delta])) { 1429 *split_index = middle - delta; 1430 break; 1431 } 1432 1433 /* For even counts, don't walk off the end */ 1434 if ((middle + delta + 1) == num_used) 1435 continue; 1436 1437 /* Now try delta past middle */ 1438 if (ocfs2_refcount_rec_no_intersect( 1439 &rl->rl_recs[middle + delta], 1440 &rl->rl_recs[middle + delta + 1])) { 1441 *split_index = middle + delta + 1; 1442 break; 1443 } 1444 } 1445 1446 if (delta >= middle) 1447 return -ENOSPC; 1448 1449 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); 1450 return 0; 1451 } 1452 1453 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, 1454 struct buffer_head *new_bh, 1455 u32 *split_cpos) 1456 { 1457 int split_index = 0, num_moved, ret; 1458 u32 cpos = 0; 1459 struct ocfs2_refcount_block *rb = 1460 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1461 struct ocfs2_refcount_list *rl = &rb->rf_records; 1462 struct ocfs2_refcount_block *new_rb = 1463 (struct ocfs2_refcount_block *)new_bh->b_data; 1464 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1465 1466 trace_ocfs2_divide_leaf_refcount_block( 1467 (unsigned long long)ref_leaf_bh->b_blocknr, 1468 le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used)); 1469 1470 /* 1471 * XXX: Improvement later. 1472 * If we know all the high 32 bit cpos is the same, no need to sort. 1473 * 1474 * In order to make the whole process safe, we do: 1475 * 1. sort the entries by their low 32 bit cpos first so that we can 1476 * find the split cpos easily. 1477 * 2. call ocfs2_insert_extent to insert the new refcount block. 1478 * 3. move the refcount rec to the new block. 1479 * 4. sort the entries by their 64 bit cpos. 1480 * 5. dirty the new_rb and rb. 1481 */ 1482 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1483 sizeof(struct ocfs2_refcount_rec), 1484 cmp_refcount_rec_by_low_cpos, swap_refcount_rec); 1485 1486 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); 1487 if (ret) { 1488 mlog_errno(ret); 1489 return ret; 1490 } 1491 1492 new_rb->rf_cpos = cpu_to_le32(cpos); 1493 1494 /* move refcount records starting from split_index to the new block. */ 1495 num_moved = le16_to_cpu(rl->rl_used) - split_index; 1496 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], 1497 num_moved * sizeof(struct ocfs2_refcount_rec)); 1498 1499 /*ok, remove the entries we just moved over to the other block. */ 1500 memset(&rl->rl_recs[split_index], 0, 1501 num_moved * sizeof(struct ocfs2_refcount_rec)); 1502 1503 /* change old and new rl_used accordingly. */ 1504 le16_add_cpu(&rl->rl_used, -num_moved); 1505 new_rl->rl_used = cpu_to_le16(num_moved); 1506 1507 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), 1508 sizeof(struct ocfs2_refcount_rec), 1509 cmp_refcount_rec_by_cpos, swap_refcount_rec); 1510 1511 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), 1512 sizeof(struct ocfs2_refcount_rec), 1513 cmp_refcount_rec_by_cpos, swap_refcount_rec); 1514 1515 *split_cpos = cpos; 1516 return 0; 1517 } 1518 1519 static int ocfs2_new_leaf_refcount_block(handle_t *handle, 1520 struct ocfs2_caching_info *ci, 1521 struct buffer_head *ref_root_bh, 1522 struct buffer_head *ref_leaf_bh, 1523 struct ocfs2_alloc_context *meta_ac) 1524 { 1525 int ret; 1526 u16 suballoc_bit_start; 1527 u32 num_got, new_cpos; 1528 u64 suballoc_loc, blkno; 1529 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 1530 struct ocfs2_refcount_block *root_rb = 1531 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1532 struct buffer_head *new_bh = NULL; 1533 struct ocfs2_refcount_block *new_rb; 1534 struct ocfs2_extent_tree ref_et; 1535 1536 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); 1537 1538 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 1539 OCFS2_JOURNAL_ACCESS_WRITE); 1540 if (ret) { 1541 mlog_errno(ret); 1542 goto out; 1543 } 1544 1545 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1546 OCFS2_JOURNAL_ACCESS_WRITE); 1547 if (ret) { 1548 mlog_errno(ret); 1549 goto out; 1550 } 1551 1552 ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, 1553 &suballoc_bit_start, &num_got, 1554 &blkno); 1555 if (ret) { 1556 mlog_errno(ret); 1557 goto out; 1558 } 1559 1560 new_bh = sb_getblk(sb, blkno); 1561 if (new_bh == NULL) { 1562 ret = -ENOMEM; 1563 mlog_errno(ret); 1564 goto out; 1565 } 1566 ocfs2_set_new_buffer_uptodate(ci, new_bh); 1567 1568 ret = ocfs2_journal_access_rb(handle, ci, new_bh, 1569 OCFS2_JOURNAL_ACCESS_CREATE); 1570 if (ret) { 1571 mlog_errno(ret); 1572 goto out; 1573 } 1574 1575 /* Initialize ocfs2_refcount_block. */ 1576 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; 1577 memset(new_rb, 0, sb->s_blocksize); 1578 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); 1579 new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); 1580 new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); 1581 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); 1582 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); 1583 new_rb->rf_blkno = cpu_to_le64(blkno); 1584 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); 1585 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); 1586 new_rb->rf_records.rl_count = 1587 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 1588 new_rb->rf_generation = root_rb->rf_generation; 1589 1590 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); 1591 if (ret) { 1592 mlog_errno(ret); 1593 goto out; 1594 } 1595 1596 ocfs2_journal_dirty(handle, ref_leaf_bh); 1597 ocfs2_journal_dirty(handle, new_bh); 1598 1599 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1600 1601 trace_ocfs2_new_leaf_refcount_block( 1602 (unsigned long long)new_bh->b_blocknr, new_cpos); 1603 1604 /* Insert the new leaf block with the specific offset cpos. */ 1605 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1606 1, 0, meta_ac); 1607 if (ret) 1608 mlog_errno(ret); 1609 1610 out: 1611 brelse(new_bh); 1612 return ret; 1613 } 1614 1615 static int ocfs2_expand_refcount_tree(handle_t *handle, 1616 struct ocfs2_caching_info *ci, 1617 struct buffer_head *ref_root_bh, 1618 struct buffer_head *ref_leaf_bh, 1619 struct ocfs2_alloc_context *meta_ac) 1620 { 1621 int ret; 1622 struct buffer_head *expand_bh = NULL; 1623 1624 if (ref_root_bh == ref_leaf_bh) { 1625 /* 1626 * the old root bh hasn't been expanded to a b-tree, 1627 * so expand it first. 1628 */ 1629 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, 1630 &expand_bh, meta_ac); 1631 if (ret) { 1632 mlog_errno(ret); 1633 goto out; 1634 } 1635 } else { 1636 expand_bh = ref_leaf_bh; 1637 get_bh(expand_bh); 1638 } 1639 1640 1641 /* Now add a new refcount block into the tree.*/ 1642 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, 1643 expand_bh, meta_ac); 1644 if (ret) 1645 mlog_errno(ret); 1646 out: 1647 brelse(expand_bh); 1648 return ret; 1649 } 1650 1651 /* 1652 * Adjust the extent rec in b-tree representing ref_leaf_bh. 1653 * 1654 * Only called when we have inserted a new refcount rec at index 0 1655 * which means ocfs2_extent_rec.e_cpos may need some change. 1656 */ 1657 static int ocfs2_adjust_refcount_rec(handle_t *handle, 1658 struct ocfs2_caching_info *ci, 1659 struct buffer_head *ref_root_bh, 1660 struct buffer_head *ref_leaf_bh, 1661 struct ocfs2_refcount_rec *rec) 1662 { 1663 int ret = 0, i; 1664 u32 new_cpos, old_cpos; 1665 struct ocfs2_path *path = NULL; 1666 struct ocfs2_extent_tree et; 1667 struct ocfs2_refcount_block *rb = 1668 (struct ocfs2_refcount_block *)ref_root_bh->b_data; 1669 struct ocfs2_extent_list *el; 1670 1671 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) 1672 goto out; 1673 1674 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1675 old_cpos = le32_to_cpu(rb->rf_cpos); 1676 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; 1677 if (old_cpos <= new_cpos) 1678 goto out; 1679 1680 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 1681 1682 path = ocfs2_new_path_from_et(&et); 1683 if (!path) { 1684 ret = -ENOMEM; 1685 mlog_errno(ret); 1686 goto out; 1687 } 1688 1689 ret = ocfs2_find_path(ci, path, old_cpos); 1690 if (ret) { 1691 mlog_errno(ret); 1692 goto out; 1693 } 1694 1695 /* 1696 * 2 more credits, one for the leaf refcount block, one for 1697 * the extent block contains the extent rec. 1698 */ 1699 ret = ocfs2_extend_trans(handle, 2); 1700 if (ret < 0) { 1701 mlog_errno(ret); 1702 goto out; 1703 } 1704 1705 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1706 OCFS2_JOURNAL_ACCESS_WRITE); 1707 if (ret < 0) { 1708 mlog_errno(ret); 1709 goto out; 1710 } 1711 1712 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), 1713 OCFS2_JOURNAL_ACCESS_WRITE); 1714 if (ret < 0) { 1715 mlog_errno(ret); 1716 goto out; 1717 } 1718 1719 /* change the leaf extent block first. */ 1720 el = path_leaf_el(path); 1721 1722 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) 1723 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) 1724 break; 1725 1726 BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); 1727 1728 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); 1729 1730 /* change the r_cpos in the leaf block. */ 1731 rb->rf_cpos = cpu_to_le32(new_cpos); 1732 1733 ocfs2_journal_dirty(handle, path_leaf_bh(path)); 1734 ocfs2_journal_dirty(handle, ref_leaf_bh); 1735 1736 out: 1737 ocfs2_free_path(path); 1738 return ret; 1739 } 1740 1741 static int ocfs2_insert_refcount_rec(handle_t *handle, 1742 struct ocfs2_caching_info *ci, 1743 struct buffer_head *ref_root_bh, 1744 struct buffer_head *ref_leaf_bh, 1745 struct ocfs2_refcount_rec *rec, 1746 int index, int merge, 1747 struct ocfs2_alloc_context *meta_ac) 1748 { 1749 int ret; 1750 struct ocfs2_refcount_block *rb = 1751 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1752 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1753 struct buffer_head *new_bh = NULL; 1754 1755 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1756 1757 if (rf_list->rl_used == rf_list->rl_count) { 1758 u64 cpos = le64_to_cpu(rec->r_cpos); 1759 u32 len = le32_to_cpu(rec->r_clusters); 1760 1761 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1762 ref_leaf_bh, meta_ac); 1763 if (ret) { 1764 mlog_errno(ret); 1765 goto out; 1766 } 1767 1768 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1769 cpos, len, NULL, &index, 1770 &new_bh); 1771 if (ret) { 1772 mlog_errno(ret); 1773 goto out; 1774 } 1775 1776 ref_leaf_bh = new_bh; 1777 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1778 rf_list = &rb->rf_records; 1779 } 1780 1781 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1782 OCFS2_JOURNAL_ACCESS_WRITE); 1783 if (ret) { 1784 mlog_errno(ret); 1785 goto out; 1786 } 1787 1788 if (index < le16_to_cpu(rf_list->rl_used)) 1789 memmove(&rf_list->rl_recs[index + 1], 1790 &rf_list->rl_recs[index], 1791 (le16_to_cpu(rf_list->rl_used) - index) * 1792 sizeof(struct ocfs2_refcount_rec)); 1793 1794 trace_ocfs2_insert_refcount_rec( 1795 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1796 (unsigned long long)le64_to_cpu(rec->r_cpos), 1797 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); 1798 1799 rf_list->rl_recs[index] = *rec; 1800 1801 le16_add_cpu(&rf_list->rl_used, 1); 1802 1803 if (merge) 1804 ocfs2_refcount_rec_merge(rb, index); 1805 1806 ocfs2_journal_dirty(handle, ref_leaf_bh); 1807 1808 if (index == 0) { 1809 ret = ocfs2_adjust_refcount_rec(handle, ci, 1810 ref_root_bh, 1811 ref_leaf_bh, rec); 1812 if (ret) 1813 mlog_errno(ret); 1814 } 1815 out: 1816 brelse(new_bh); 1817 return ret; 1818 } 1819 1820 /* 1821 * Split the refcount_rec indexed by "index" in ref_leaf_bh. 1822 * This is much simple than our b-tree code. 1823 * split_rec is the new refcount rec we want to insert. 1824 * If split_rec->r_refcount > 0, we are changing the refcount(in case we 1825 * increase refcount or decrease a refcount to non-zero). 1826 * If split_rec->r_refcount == 0, we are punching a hole in current refcount 1827 * rec( in case we decrease a refcount to zero). 1828 */ 1829 static int ocfs2_split_refcount_rec(handle_t *handle, 1830 struct ocfs2_caching_info *ci, 1831 struct buffer_head *ref_root_bh, 1832 struct buffer_head *ref_leaf_bh, 1833 struct ocfs2_refcount_rec *split_rec, 1834 int index, int merge, 1835 struct ocfs2_alloc_context *meta_ac, 1836 struct ocfs2_cached_dealloc_ctxt *dealloc) 1837 { 1838 int ret, recs_need; 1839 u32 len; 1840 struct ocfs2_refcount_block *rb = 1841 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1842 struct ocfs2_refcount_list *rf_list = &rb->rf_records; 1843 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; 1844 struct ocfs2_refcount_rec *tail_rec = NULL; 1845 struct buffer_head *new_bh = NULL; 1846 1847 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1848 1849 trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), 1850 le32_to_cpu(orig_rec->r_clusters), 1851 le32_to_cpu(orig_rec->r_refcount), 1852 le64_to_cpu(split_rec->r_cpos), 1853 le32_to_cpu(split_rec->r_clusters), 1854 le32_to_cpu(split_rec->r_refcount)); 1855 1856 /* 1857 * If we just need to split the header or tail clusters, 1858 * no more recs are needed, just split is OK. 1859 * Otherwise we at least need one new recs. 1860 */ 1861 if (!split_rec->r_refcount && 1862 (split_rec->r_cpos == orig_rec->r_cpos || 1863 le64_to_cpu(split_rec->r_cpos) + 1864 le32_to_cpu(split_rec->r_clusters) == 1865 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1866 recs_need = 0; 1867 else 1868 recs_need = 1; 1869 1870 /* 1871 * We need one more rec if we split in the middle and the new rec have 1872 * some refcount in it. 1873 */ 1874 if (split_rec->r_refcount && 1875 (split_rec->r_cpos != orig_rec->r_cpos && 1876 le64_to_cpu(split_rec->r_cpos) + 1877 le32_to_cpu(split_rec->r_clusters) != 1878 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) 1879 recs_need++; 1880 1881 /* If the leaf block don't have enough record, expand it. */ 1882 if (le16_to_cpu(rf_list->rl_used) + recs_need > 1883 le16_to_cpu(rf_list->rl_count)) { 1884 struct ocfs2_refcount_rec tmp_rec; 1885 u64 cpos = le64_to_cpu(orig_rec->r_cpos); 1886 len = le32_to_cpu(orig_rec->r_clusters); 1887 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, 1888 ref_leaf_bh, meta_ac); 1889 if (ret) { 1890 mlog_errno(ret); 1891 goto out; 1892 } 1893 1894 /* 1895 * We have to re-get it since now cpos may be moved to 1896 * another leaf block. 1897 */ 1898 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 1899 cpos, len, &tmp_rec, &index, 1900 &new_bh); 1901 if (ret) { 1902 mlog_errno(ret); 1903 goto out; 1904 } 1905 1906 ref_leaf_bh = new_bh; 1907 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 1908 rf_list = &rb->rf_records; 1909 orig_rec = &rf_list->rl_recs[index]; 1910 } 1911 1912 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, 1913 OCFS2_JOURNAL_ACCESS_WRITE); 1914 if (ret) { 1915 mlog_errno(ret); 1916 goto out; 1917 } 1918 1919 /* 1920 * We have calculated out how many new records we need and store 1921 * in recs_need, so spare enough space first by moving the records 1922 * after "index" to the end. 1923 */ 1924 if (index != le16_to_cpu(rf_list->rl_used) - 1) 1925 memmove(&rf_list->rl_recs[index + 1 + recs_need], 1926 &rf_list->rl_recs[index + 1], 1927 (le16_to_cpu(rf_list->rl_used) - index - 1) * 1928 sizeof(struct ocfs2_refcount_rec)); 1929 1930 len = (le64_to_cpu(orig_rec->r_cpos) + 1931 le32_to_cpu(orig_rec->r_clusters)) - 1932 (le64_to_cpu(split_rec->r_cpos) + 1933 le32_to_cpu(split_rec->r_clusters)); 1934 1935 /* 1936 * If we have "len", the we will split in the tail and move it 1937 * to the end of the space we have just spared. 1938 */ 1939 if (len) { 1940 tail_rec = &rf_list->rl_recs[index + recs_need]; 1941 1942 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); 1943 le64_add_cpu(&tail_rec->r_cpos, 1944 le32_to_cpu(tail_rec->r_clusters) - len); 1945 tail_rec->r_clusters = cpu_to_le32(len); 1946 } 1947 1948 /* 1949 * If the split pos isn't the same as the original one, we need to 1950 * split in the head. 1951 * 1952 * Note: We have the chance that split_rec.r_refcount = 0, 1953 * recs_need = 0 and len > 0, which means we just cut the head from 1954 * the orig_rec and in that case we have done some modification in 1955 * orig_rec above, so the check for r_cpos is faked. 1956 */ 1957 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { 1958 len = le64_to_cpu(split_rec->r_cpos) - 1959 le64_to_cpu(orig_rec->r_cpos); 1960 orig_rec->r_clusters = cpu_to_le32(len); 1961 index++; 1962 } 1963 1964 le16_add_cpu(&rf_list->rl_used, recs_need); 1965 1966 if (split_rec->r_refcount) { 1967 rf_list->rl_recs[index] = *split_rec; 1968 trace_ocfs2_split_refcount_rec_insert( 1969 (unsigned long long)ref_leaf_bh->b_blocknr, index, 1970 (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1971 le32_to_cpu(split_rec->r_clusters), 1972 le32_to_cpu(split_rec->r_refcount)); 1973 1974 if (merge) 1975 ocfs2_refcount_rec_merge(rb, index); 1976 } 1977 1978 ocfs2_journal_dirty(handle, ref_leaf_bh); 1979 1980 out: 1981 brelse(new_bh); 1982 return ret; 1983 } 1984 1985 static int __ocfs2_increase_refcount(handle_t *handle, 1986 struct ocfs2_caching_info *ci, 1987 struct buffer_head *ref_root_bh, 1988 u64 cpos, u32 len, int merge, 1989 struct ocfs2_alloc_context *meta_ac, 1990 struct ocfs2_cached_dealloc_ctxt *dealloc) 1991 { 1992 int ret = 0, index; 1993 struct buffer_head *ref_leaf_bh = NULL; 1994 struct ocfs2_refcount_rec rec; 1995 unsigned int set_len = 0; 1996 1997 trace_ocfs2_increase_refcount_begin( 1998 (unsigned long long)ocfs2_metadata_cache_owner(ci), 1999 (unsigned long long)cpos, len); 2000 2001 while (len) { 2002 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2003 cpos, len, &rec, &index, 2004 &ref_leaf_bh); 2005 if (ret) { 2006 mlog_errno(ret); 2007 goto out; 2008 } 2009 2010 set_len = le32_to_cpu(rec.r_clusters); 2011 2012 /* 2013 * Here we may meet with 3 situations: 2014 * 2015 * 1. If we find an already existing record, and the length 2016 * is the same, cool, we just need to increase the r_refcount 2017 * and it is OK. 2018 * 2. If we find a hole, just insert it with r_refcount = 1. 2019 * 3. If we are in the middle of one extent record, split 2020 * it. 2021 */ 2022 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2023 set_len <= len) { 2024 trace_ocfs2_increase_refcount_change( 2025 (unsigned long long)cpos, set_len, 2026 le32_to_cpu(rec.r_refcount)); 2027 ret = ocfs2_change_refcount_rec(handle, ci, 2028 ref_leaf_bh, index, 2029 merge, 1); 2030 if (ret) { 2031 mlog_errno(ret); 2032 goto out; 2033 } 2034 } else if (!rec.r_refcount) { 2035 rec.r_refcount = cpu_to_le32(1); 2036 2037 trace_ocfs2_increase_refcount_insert( 2038 (unsigned long long)le64_to_cpu(rec.r_cpos), 2039 set_len); 2040 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, 2041 ref_leaf_bh, 2042 &rec, index, 2043 merge, meta_ac); 2044 if (ret) { 2045 mlog_errno(ret); 2046 goto out; 2047 } 2048 } else { 2049 set_len = min((u64)(cpos + len), 2050 le64_to_cpu(rec.r_cpos) + set_len) - cpos; 2051 rec.r_cpos = cpu_to_le64(cpos); 2052 rec.r_clusters = cpu_to_le32(set_len); 2053 le32_add_cpu(&rec.r_refcount, 1); 2054 2055 trace_ocfs2_increase_refcount_split( 2056 (unsigned long long)le64_to_cpu(rec.r_cpos), 2057 set_len, le32_to_cpu(rec.r_refcount)); 2058 ret = ocfs2_split_refcount_rec(handle, ci, 2059 ref_root_bh, ref_leaf_bh, 2060 &rec, index, merge, 2061 meta_ac, dealloc); 2062 if (ret) { 2063 mlog_errno(ret); 2064 goto out; 2065 } 2066 } 2067 2068 cpos += set_len; 2069 len -= set_len; 2070 brelse(ref_leaf_bh); 2071 ref_leaf_bh = NULL; 2072 } 2073 2074 out: 2075 brelse(ref_leaf_bh); 2076 return ret; 2077 } 2078 2079 static int ocfs2_remove_refcount_extent(handle_t *handle, 2080 struct ocfs2_caching_info *ci, 2081 struct buffer_head *ref_root_bh, 2082 struct buffer_head *ref_leaf_bh, 2083 struct ocfs2_alloc_context *meta_ac, 2084 struct ocfs2_cached_dealloc_ctxt *dealloc) 2085 { 2086 int ret; 2087 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2088 struct ocfs2_refcount_block *rb = 2089 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2090 struct ocfs2_extent_tree et; 2091 2092 BUG_ON(rb->rf_records.rl_used); 2093 2094 trace_ocfs2_remove_refcount_extent( 2095 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2096 (unsigned long long)ref_leaf_bh->b_blocknr, 2097 le32_to_cpu(rb->rf_cpos)); 2098 2099 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2100 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 2101 1, meta_ac, dealloc); 2102 if (ret) { 2103 mlog_errno(ret); 2104 goto out; 2105 } 2106 2107 ocfs2_remove_from_cache(ci, ref_leaf_bh); 2108 2109 /* 2110 * add the freed block to the dealloc so that it will be freed 2111 * when we run dealloc. 2112 */ 2113 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, 2114 le16_to_cpu(rb->rf_suballoc_slot), 2115 le64_to_cpu(rb->rf_suballoc_loc), 2116 le64_to_cpu(rb->rf_blkno), 2117 le16_to_cpu(rb->rf_suballoc_bit)); 2118 if (ret) { 2119 mlog_errno(ret); 2120 goto out; 2121 } 2122 2123 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, 2124 OCFS2_JOURNAL_ACCESS_WRITE); 2125 if (ret) { 2126 mlog_errno(ret); 2127 goto out; 2128 } 2129 2130 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2131 2132 le32_add_cpu(&rb->rf_clusters, -1); 2133 2134 /* 2135 * check whether we need to restore the root refcount block if 2136 * there is no leaf extent block at atll. 2137 */ 2138 if (!rb->rf_list.l_next_free_rec) { 2139 BUG_ON(rb->rf_clusters); 2140 2141 trace_ocfs2_restore_refcount_block( 2142 (unsigned long long)ref_root_bh->b_blocknr); 2143 2144 rb->rf_flags = 0; 2145 rb->rf_parent = 0; 2146 rb->rf_cpos = 0; 2147 memset(&rb->rf_records, 0, sb->s_blocksize - 2148 offsetof(struct ocfs2_refcount_block, rf_records)); 2149 rb->rf_records.rl_count = 2150 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); 2151 } 2152 2153 ocfs2_journal_dirty(handle, ref_root_bh); 2154 2155 out: 2156 return ret; 2157 } 2158 2159 int ocfs2_increase_refcount(handle_t *handle, 2160 struct ocfs2_caching_info *ci, 2161 struct buffer_head *ref_root_bh, 2162 u64 cpos, u32 len, 2163 struct ocfs2_alloc_context *meta_ac, 2164 struct ocfs2_cached_dealloc_ctxt *dealloc) 2165 { 2166 return __ocfs2_increase_refcount(handle, ci, ref_root_bh, 2167 cpos, len, 1, 2168 meta_ac, dealloc); 2169 } 2170 2171 static int ocfs2_decrease_refcount_rec(handle_t *handle, 2172 struct ocfs2_caching_info *ci, 2173 struct buffer_head *ref_root_bh, 2174 struct buffer_head *ref_leaf_bh, 2175 int index, u64 cpos, unsigned int len, 2176 struct ocfs2_alloc_context *meta_ac, 2177 struct ocfs2_cached_dealloc_ctxt *dealloc) 2178 { 2179 int ret; 2180 struct ocfs2_refcount_block *rb = 2181 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2182 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; 2183 2184 BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); 2185 BUG_ON(cpos + len > 2186 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2187 2188 trace_ocfs2_decrease_refcount_rec( 2189 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2190 (unsigned long long)cpos, len); 2191 2192 if (cpos == le64_to_cpu(rec->r_cpos) && 2193 len == le32_to_cpu(rec->r_clusters)) 2194 ret = ocfs2_change_refcount_rec(handle, ci, 2195 ref_leaf_bh, index, 1, -1); 2196 else { 2197 struct ocfs2_refcount_rec split = *rec; 2198 split.r_cpos = cpu_to_le64(cpos); 2199 split.r_clusters = cpu_to_le32(len); 2200 2201 le32_add_cpu(&split.r_refcount, -1); 2202 2203 ret = ocfs2_split_refcount_rec(handle, ci, 2204 ref_root_bh, ref_leaf_bh, 2205 &split, index, 1, 2206 meta_ac, dealloc); 2207 } 2208 2209 if (ret) { 2210 mlog_errno(ret); 2211 goto out; 2212 } 2213 2214 /* Remove the leaf refcount block if it contains no refcount record. */ 2215 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { 2216 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, 2217 ref_leaf_bh, meta_ac, 2218 dealloc); 2219 if (ret) 2220 mlog_errno(ret); 2221 } 2222 2223 out: 2224 return ret; 2225 } 2226 2227 static int __ocfs2_decrease_refcount(handle_t *handle, 2228 struct ocfs2_caching_info *ci, 2229 struct buffer_head *ref_root_bh, 2230 u64 cpos, u32 len, 2231 struct ocfs2_alloc_context *meta_ac, 2232 struct ocfs2_cached_dealloc_ctxt *dealloc, 2233 int delete) 2234 { 2235 int ret = 0, index = 0; 2236 struct ocfs2_refcount_rec rec; 2237 unsigned int r_count = 0, r_len; 2238 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2239 struct buffer_head *ref_leaf_bh = NULL; 2240 2241 trace_ocfs2_decrease_refcount( 2242 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2243 (unsigned long long)cpos, len, delete); 2244 2245 while (len) { 2246 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2247 cpos, len, &rec, &index, 2248 &ref_leaf_bh); 2249 if (ret) { 2250 mlog_errno(ret); 2251 goto out; 2252 } 2253 2254 r_count = le32_to_cpu(rec.r_refcount); 2255 BUG_ON(r_count == 0); 2256 if (!delete) 2257 BUG_ON(r_count > 1); 2258 2259 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + 2260 le32_to_cpu(rec.r_clusters)) - cpos; 2261 2262 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, 2263 ref_leaf_bh, index, 2264 cpos, r_len, 2265 meta_ac, dealloc); 2266 if (ret) { 2267 mlog_errno(ret); 2268 goto out; 2269 } 2270 2271 if (le32_to_cpu(rec.r_refcount) == 1 && delete) { 2272 ret = ocfs2_cache_cluster_dealloc(dealloc, 2273 ocfs2_clusters_to_blocks(sb, cpos), 2274 r_len); 2275 if (ret) { 2276 mlog_errno(ret); 2277 goto out; 2278 } 2279 } 2280 2281 cpos += r_len; 2282 len -= r_len; 2283 brelse(ref_leaf_bh); 2284 ref_leaf_bh = NULL; 2285 } 2286 2287 out: 2288 brelse(ref_leaf_bh); 2289 return ret; 2290 } 2291 2292 /* Caller must hold refcount tree lock. */ 2293 int ocfs2_decrease_refcount(struct inode *inode, 2294 handle_t *handle, u32 cpos, u32 len, 2295 struct ocfs2_alloc_context *meta_ac, 2296 struct ocfs2_cached_dealloc_ctxt *dealloc, 2297 int delete) 2298 { 2299 int ret; 2300 u64 ref_blkno; 2301 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2302 struct buffer_head *ref_root_bh = NULL; 2303 struct ocfs2_refcount_tree *tree; 2304 2305 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 2306 2307 ret = ocfs2_get_refcount_block(inode, &ref_blkno); 2308 if (ret) { 2309 mlog_errno(ret); 2310 goto out; 2311 } 2312 2313 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); 2314 if (ret) { 2315 mlog_errno(ret); 2316 goto out; 2317 } 2318 2319 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, 2320 &ref_root_bh); 2321 if (ret) { 2322 mlog_errno(ret); 2323 goto out; 2324 } 2325 2326 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, 2327 cpos, len, meta_ac, dealloc, delete); 2328 if (ret) 2329 mlog_errno(ret); 2330 out: 2331 brelse(ref_root_bh); 2332 return ret; 2333 } 2334 2335 /* 2336 * Mark the already-existing extent at cpos as refcounted for len clusters. 2337 * This adds the refcount extent flag. 2338 * 2339 * If the existing extent is larger than the request, initiate a 2340 * split. An attempt will be made at merging with adjacent extents. 2341 * 2342 * The caller is responsible for passing down meta_ac if we'll need it. 2343 */ 2344 static int ocfs2_mark_extent_refcounted(struct inode *inode, 2345 struct ocfs2_extent_tree *et, 2346 handle_t *handle, u32 cpos, 2347 u32 len, u32 phys, 2348 struct ocfs2_alloc_context *meta_ac, 2349 struct ocfs2_cached_dealloc_ctxt *dealloc) 2350 { 2351 int ret; 2352 2353 trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, 2354 cpos, len, phys); 2355 2356 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2357 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2358 inode->i_ino); 2359 goto out; 2360 } 2361 2362 ret = ocfs2_change_extent_flag(handle, et, cpos, 2363 len, phys, meta_ac, dealloc, 2364 OCFS2_EXT_REFCOUNTED, 0); 2365 if (ret) 2366 mlog_errno(ret); 2367 2368 out: 2369 return ret; 2370 } 2371 2372 /* 2373 * Given some contiguous physical clusters, calculate what we need 2374 * for modifying their refcount. 2375 */ 2376 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, 2377 struct ocfs2_caching_info *ci, 2378 struct buffer_head *ref_root_bh, 2379 u64 start_cpos, 2380 u32 clusters, 2381 int *meta_add, 2382 int *credits) 2383 { 2384 int ret = 0, index, ref_blocks = 0, recs_add = 0; 2385 u64 cpos = start_cpos; 2386 struct ocfs2_refcount_block *rb; 2387 struct ocfs2_refcount_rec rec; 2388 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2389 u32 len; 2390 2391 while (clusters) { 2392 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2393 cpos, clusters, &rec, 2394 &index, &ref_leaf_bh); 2395 if (ret) { 2396 mlog_errno(ret); 2397 goto out; 2398 } 2399 2400 if (ref_leaf_bh != prev_bh) { 2401 /* 2402 * Now we encounter a new leaf block, so calculate 2403 * whether we need to extend the old leaf. 2404 */ 2405 if (prev_bh) { 2406 rb = (struct ocfs2_refcount_block *) 2407 prev_bh->b_data; 2408 2409 if (le16_to_cpu(rb->rf_records.rl_used) + 2410 recs_add > 2411 le16_to_cpu(rb->rf_records.rl_count)) 2412 ref_blocks++; 2413 } 2414 2415 recs_add = 0; 2416 *credits += 1; 2417 brelse(prev_bh); 2418 prev_bh = ref_leaf_bh; 2419 get_bh(prev_bh); 2420 } 2421 2422 trace_ocfs2_calc_refcount_meta_credits_iterate( 2423 recs_add, (unsigned long long)cpos, clusters, 2424 (unsigned long long)le64_to_cpu(rec.r_cpos), 2425 le32_to_cpu(rec.r_clusters), 2426 le32_to_cpu(rec.r_refcount), index); 2427 2428 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2429 le32_to_cpu(rec.r_clusters)) - cpos; 2430 /* 2431 * We record all the records which will be inserted to the 2432 * same refcount block, so that we can tell exactly whether 2433 * we need a new refcount block or not. 2434 * 2435 * If we will insert a new one, this is easy and only happens 2436 * during adding refcounted flag to the extent, so we don't 2437 * have a chance of spliting. We just need one record. 2438 * 2439 * If the refcount rec already exists, that would be a little 2440 * complicated. we may have to: 2441 * 1) split at the beginning if the start pos isn't aligned. 2442 * we need 1 more record in this case. 2443 * 2) split int the end if the end pos isn't aligned. 2444 * we need 1 more record in this case. 2445 * 3) split in the middle because of file system fragmentation. 2446 * we need 2 more records in this case(we can't detect this 2447 * beforehand, so always think of the worst case). 2448 */ 2449 if (rec.r_refcount) { 2450 recs_add += 2; 2451 /* Check whether we need a split at the beginning. */ 2452 if (cpos == start_cpos && 2453 cpos != le64_to_cpu(rec.r_cpos)) 2454 recs_add++; 2455 2456 /* Check whether we need a split in the end. */ 2457 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + 2458 le32_to_cpu(rec.r_clusters)) 2459 recs_add++; 2460 } else 2461 recs_add++; 2462 2463 brelse(ref_leaf_bh); 2464 ref_leaf_bh = NULL; 2465 clusters -= len; 2466 cpos += len; 2467 } 2468 2469 if (prev_bh) { 2470 rb = (struct ocfs2_refcount_block *)prev_bh->b_data; 2471 2472 if (le16_to_cpu(rb->rf_records.rl_used) + recs_add > 2473 le16_to_cpu(rb->rf_records.rl_count)) 2474 ref_blocks++; 2475 2476 *credits += 1; 2477 } 2478 2479 if (!ref_blocks) 2480 goto out; 2481 2482 *meta_add += ref_blocks; 2483 *credits += ref_blocks; 2484 2485 /* 2486 * So we may need ref_blocks to insert into the tree. 2487 * That also means we need to change the b-tree and add that number 2488 * of records since we never merge them. 2489 * We need one more block for expansion since the new created leaf 2490 * block is also full and needs split. 2491 */ 2492 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 2493 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { 2494 struct ocfs2_extent_tree et; 2495 2496 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2497 *meta_add += ocfs2_extend_meta_needed(et.et_root_el); 2498 *credits += ocfs2_calc_extend_credits(sb, 2499 et.et_root_el); 2500 } else { 2501 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 2502 *meta_add += 1; 2503 } 2504 2505 out: 2506 2507 trace_ocfs2_calc_refcount_meta_credits( 2508 (unsigned long long)start_cpos, clusters, 2509 *meta_add, *credits); 2510 brelse(ref_leaf_bh); 2511 brelse(prev_bh); 2512 return ret; 2513 } 2514 2515 /* 2516 * For refcount tree, we will decrease some contiguous clusters 2517 * refcount count, so just go through it to see how many blocks 2518 * we gonna touch and whether we need to create new blocks. 2519 * 2520 * Normally the refcount blocks store these refcount should be 2521 * contiguous also, so that we can get the number easily. 2522 * We will at most add split 2 refcount records and 2 more 2523 * refcount blocks, so just check it in a rough way. 2524 * 2525 * Caller must hold refcount tree lock. 2526 */ 2527 int ocfs2_prepare_refcount_change_for_del(struct inode *inode, 2528 u64 refcount_loc, 2529 u64 phys_blkno, 2530 u32 clusters, 2531 int *credits, 2532 int *ref_blocks) 2533 { 2534 int ret; 2535 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2536 struct buffer_head *ref_root_bh = NULL; 2537 struct ocfs2_refcount_tree *tree; 2538 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); 2539 2540 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2541 ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 2542 inode->i_ino); 2543 goto out; 2544 } 2545 2546 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 2547 2548 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), 2549 refcount_loc, &tree); 2550 if (ret) { 2551 mlog_errno(ret); 2552 goto out; 2553 } 2554 2555 ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc, 2556 &ref_root_bh); 2557 if (ret) { 2558 mlog_errno(ret); 2559 goto out; 2560 } 2561 2562 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 2563 &tree->rf_ci, 2564 ref_root_bh, 2565 start_cpos, clusters, 2566 ref_blocks, credits); 2567 if (ret) { 2568 mlog_errno(ret); 2569 goto out; 2570 } 2571 2572 trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); 2573 2574 out: 2575 brelse(ref_root_bh); 2576 return ret; 2577 } 2578 2579 #define MAX_CONTIG_BYTES 1048576 2580 2581 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) 2582 { 2583 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); 2584 } 2585 2586 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) 2587 { 2588 return ~(ocfs2_cow_contig_clusters(sb) - 1); 2589 } 2590 2591 /* 2592 * Given an extent that starts at 'start' and an I/O that starts at 'cpos', 2593 * find an offset (start + (n * contig_clusters)) that is closest to cpos 2594 * while still being less than or equal to it. 2595 * 2596 * The goal is to break the extent at a multiple of contig_clusters. 2597 */ 2598 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, 2599 unsigned int start, 2600 unsigned int cpos) 2601 { 2602 BUG_ON(start > cpos); 2603 2604 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); 2605 } 2606 2607 /* 2608 * Given a cluster count of len, pad it out so that it is a multiple 2609 * of contig_clusters. 2610 */ 2611 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, 2612 unsigned int len) 2613 { 2614 unsigned int padded = 2615 (len + (ocfs2_cow_contig_clusters(sb) - 1)) & 2616 ocfs2_cow_contig_mask(sb); 2617 2618 /* Did we wrap? */ 2619 if (padded < len) 2620 padded = UINT_MAX; 2621 2622 return padded; 2623 } 2624 2625 /* 2626 * Calculate out the start and number of virtual clusters we need to to CoW. 2627 * 2628 * cpos is vitual start cluster position we want to do CoW in a 2629 * file and write_len is the cluster length. 2630 * max_cpos is the place where we want to stop CoW intentionally. 2631 * 2632 * Normal we will start CoW from the beginning of extent record cotaining cpos. 2633 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we 2634 * get good I/O from the resulting extent tree. 2635 */ 2636 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, 2637 struct ocfs2_extent_list *el, 2638 u32 cpos, 2639 u32 write_len, 2640 u32 max_cpos, 2641 u32 *cow_start, 2642 u32 *cow_len) 2643 { 2644 int ret = 0; 2645 int tree_height = le16_to_cpu(el->l_tree_depth), i; 2646 struct buffer_head *eb_bh = NULL; 2647 struct ocfs2_extent_block *eb = NULL; 2648 struct ocfs2_extent_rec *rec; 2649 unsigned int want_clusters, rec_end = 0; 2650 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); 2651 int leaf_clusters; 2652 2653 BUG_ON(cpos + write_len > max_cpos); 2654 2655 if (tree_height > 0) { 2656 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); 2657 if (ret) { 2658 mlog_errno(ret); 2659 goto out; 2660 } 2661 2662 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2663 el = &eb->h_list; 2664 2665 if (el->l_tree_depth) { 2666 ret = ocfs2_error(inode->i_sb, 2667 "Inode %lu has non zero tree depth in leaf block %llu\n", 2668 inode->i_ino, 2669 (unsigned long long)eb_bh->b_blocknr); 2670 goto out; 2671 } 2672 } 2673 2674 *cow_len = 0; 2675 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 2676 rec = &el->l_recs[i]; 2677 2678 if (ocfs2_is_empty_extent(rec)) { 2679 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " 2680 "index %d\n", inode->i_ino, i); 2681 continue; 2682 } 2683 2684 if (le32_to_cpu(rec->e_cpos) + 2685 le16_to_cpu(rec->e_leaf_clusters) <= cpos) 2686 continue; 2687 2688 if (*cow_len == 0) { 2689 /* 2690 * We should find a refcounted record in the 2691 * first pass. 2692 */ 2693 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); 2694 *cow_start = le32_to_cpu(rec->e_cpos); 2695 } 2696 2697 /* 2698 * If we encounter a hole, a non-refcounted record or 2699 * pass the max_cpos, stop the search. 2700 */ 2701 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || 2702 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || 2703 (max_cpos <= le32_to_cpu(rec->e_cpos))) 2704 break; 2705 2706 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); 2707 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; 2708 if (rec_end > max_cpos) { 2709 rec_end = max_cpos; 2710 leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); 2711 } 2712 2713 /* 2714 * How many clusters do we actually need from 2715 * this extent? First we see how many we actually 2716 * need to complete the write. If that's smaller 2717 * than contig_clusters, we try for contig_clusters. 2718 */ 2719 if (!*cow_len) 2720 want_clusters = write_len; 2721 else 2722 want_clusters = (cpos + write_len) - 2723 (*cow_start + *cow_len); 2724 if (want_clusters < contig_clusters) 2725 want_clusters = contig_clusters; 2726 2727 /* 2728 * If the write does not cover the whole extent, we 2729 * need to calculate how we're going to split the extent. 2730 * We try to do it on contig_clusters boundaries. 2731 * 2732 * Any extent smaller than contig_clusters will be 2733 * CoWed in its entirety. 2734 */ 2735 if (leaf_clusters <= contig_clusters) 2736 *cow_len += leaf_clusters; 2737 else if (*cow_len || (*cow_start == cpos)) { 2738 /* 2739 * This extent needs to be CoW'd from its 2740 * beginning, so all we have to do is compute 2741 * how many clusters to grab. We align 2742 * want_clusters to the edge of contig_clusters 2743 * to get better I/O. 2744 */ 2745 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2746 want_clusters); 2747 2748 if (leaf_clusters < want_clusters) 2749 *cow_len += leaf_clusters; 2750 else 2751 *cow_len += want_clusters; 2752 } else if ((*cow_start + contig_clusters) >= 2753 (cpos + write_len)) { 2754 /* 2755 * Breaking off contig_clusters at the front 2756 * of the extent will cover our write. That's 2757 * easy. 2758 */ 2759 *cow_len = contig_clusters; 2760 } else if ((rec_end - cpos) <= contig_clusters) { 2761 /* 2762 * Breaking off contig_clusters at the tail of 2763 * this extent will cover cpos. 2764 */ 2765 *cow_start = rec_end - contig_clusters; 2766 *cow_len = contig_clusters; 2767 } else if ((rec_end - cpos) <= want_clusters) { 2768 /* 2769 * While we can't fit the entire write in this 2770 * extent, we know that the write goes from cpos 2771 * to the end of the extent. Break that off. 2772 * We try to break it at some multiple of 2773 * contig_clusters from the front of the extent. 2774 * Failing that (ie, cpos is within 2775 * contig_clusters of the front), we'll CoW the 2776 * entire extent. 2777 */ 2778 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2779 *cow_start, cpos); 2780 *cow_len = rec_end - *cow_start; 2781 } else { 2782 /* 2783 * Ok, the entire write lives in the middle of 2784 * this extent. Let's try to slice the extent up 2785 * nicely. Optimally, our CoW region starts at 2786 * m*contig_clusters from the beginning of the 2787 * extent and goes for n*contig_clusters, 2788 * covering the entire write. 2789 */ 2790 *cow_start = ocfs2_cow_align_start(inode->i_sb, 2791 *cow_start, cpos); 2792 2793 want_clusters = (cpos + write_len) - *cow_start; 2794 want_clusters = ocfs2_cow_align_length(inode->i_sb, 2795 want_clusters); 2796 if (*cow_start + want_clusters <= rec_end) 2797 *cow_len = want_clusters; 2798 else 2799 *cow_len = rec_end - *cow_start; 2800 } 2801 2802 /* Have we covered our entire write yet? */ 2803 if ((*cow_start + *cow_len) >= (cpos + write_len)) 2804 break; 2805 2806 /* 2807 * If we reach the end of the extent block and don't get enough 2808 * clusters, continue with the next extent block if possible. 2809 */ 2810 if (i + 1 == le16_to_cpu(el->l_next_free_rec) && 2811 eb && eb->h_next_leaf_blk) { 2812 brelse(eb_bh); 2813 eb_bh = NULL; 2814 2815 ret = ocfs2_read_extent_block(INODE_CACHE(inode), 2816 le64_to_cpu(eb->h_next_leaf_blk), 2817 &eb_bh); 2818 if (ret) { 2819 mlog_errno(ret); 2820 goto out; 2821 } 2822 2823 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 2824 el = &eb->h_list; 2825 i = -1; 2826 } 2827 } 2828 2829 out: 2830 brelse(eb_bh); 2831 return ret; 2832 } 2833 2834 /* 2835 * Prepare meta_ac, data_ac and calculate credits when we want to add some 2836 * num_clusters in data_tree "et" and change the refcount for the old 2837 * clusters(starting form p_cluster) in the refcount tree. 2838 * 2839 * Note: 2840 * 1. since we may split the old tree, so we at most will need num_clusters + 2 2841 * more new leaf records. 2842 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so 2843 * just give data_ac = NULL. 2844 */ 2845 static int ocfs2_lock_refcount_allocators(struct super_block *sb, 2846 u32 p_cluster, u32 num_clusters, 2847 struct ocfs2_extent_tree *et, 2848 struct ocfs2_caching_info *ref_ci, 2849 struct buffer_head *ref_root_bh, 2850 struct ocfs2_alloc_context **meta_ac, 2851 struct ocfs2_alloc_context **data_ac, 2852 int *credits) 2853 { 2854 int ret = 0, meta_add = 0; 2855 int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et); 2856 2857 if (num_free_extents < 0) { 2858 ret = num_free_extents; 2859 mlog_errno(ret); 2860 goto out; 2861 } 2862 2863 if (num_free_extents < num_clusters + 2) 2864 meta_add = 2865 ocfs2_extend_meta_needed(et->et_root_el); 2866 2867 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el); 2868 2869 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, 2870 p_cluster, num_clusters, 2871 &meta_add, credits); 2872 if (ret) { 2873 mlog_errno(ret); 2874 goto out; 2875 } 2876 2877 trace_ocfs2_lock_refcount_allocators(meta_add, *credits); 2878 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2879 meta_ac); 2880 if (ret) { 2881 mlog_errno(ret); 2882 goto out; 2883 } 2884 2885 if (data_ac) { 2886 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, 2887 data_ac); 2888 if (ret) 2889 mlog_errno(ret); 2890 } 2891 2892 out: 2893 if (ret) { 2894 if (*meta_ac) { 2895 ocfs2_free_alloc_context(*meta_ac); 2896 *meta_ac = NULL; 2897 } 2898 } 2899 2900 return ret; 2901 } 2902 2903 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) 2904 { 2905 BUG_ON(buffer_dirty(bh)); 2906 2907 clear_buffer_mapped(bh); 2908 2909 return 0; 2910 } 2911 2912 int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2913 struct inode *inode, 2914 u32 cpos, u32 old_cluster, 2915 u32 new_cluster, u32 new_len) 2916 { 2917 int ret = 0, partial; 2918 struct super_block *sb = inode->i_sb; 2919 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2920 struct page *page; 2921 pgoff_t page_index; 2922 unsigned int from, to; 2923 loff_t offset, end, map_end; 2924 struct address_space *mapping = inode->i_mapping; 2925 2926 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2927 new_cluster, new_len); 2928 2929 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 2930 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); 2931 /* 2932 * We only duplicate pages until we reach the page contains i_size - 1. 2933 * So trim 'end' to i_size. 2934 */ 2935 if (end > i_size_read(inode)) 2936 end = i_size_read(inode); 2937 2938 while (offset < end) { 2939 page_index = offset >> PAGE_SHIFT; 2940 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 2941 if (map_end > end) 2942 map_end = end; 2943 2944 /* from, to is the offset within the page. */ 2945 from = offset & (PAGE_SIZE - 1); 2946 to = PAGE_SIZE; 2947 if (map_end & (PAGE_SIZE - 1)) 2948 to = map_end & (PAGE_SIZE - 1); 2949 2950 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2951 if (!page) { 2952 ret = -ENOMEM; 2953 mlog_errno(ret); 2954 break; 2955 } 2956 2957 /* 2958 * In case PAGE_SIZE <= CLUSTER_SIZE, This page 2959 * can't be dirtied before we CoW it out. 2960 */ 2961 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2962 BUG_ON(PageDirty(page)); 2963 2964 if (!PageUptodate(page)) { 2965 ret = block_read_full_page(page, ocfs2_get_block); 2966 if (ret) { 2967 mlog_errno(ret); 2968 goto unlock; 2969 } 2970 lock_page(page); 2971 } 2972 2973 if (page_has_buffers(page)) { 2974 ret = walk_page_buffers(handle, page_buffers(page), 2975 from, to, &partial, 2976 ocfs2_clear_cow_buffer); 2977 if (ret) { 2978 mlog_errno(ret); 2979 goto unlock; 2980 } 2981 } 2982 2983 ocfs2_map_and_dirty_page(inode, 2984 handle, from, to, 2985 page, 0, &new_block); 2986 mark_page_accessed(page); 2987 unlock: 2988 unlock_page(page); 2989 put_page(page); 2990 page = NULL; 2991 offset = map_end; 2992 if (ret) 2993 break; 2994 } 2995 2996 return ret; 2997 } 2998 2999 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3000 struct inode *inode, 3001 u32 cpos, u32 old_cluster, 3002 u32 new_cluster, u32 new_len) 3003 { 3004 int ret = 0; 3005 struct super_block *sb = inode->i_sb; 3006 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3007 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3008 u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); 3009 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 3010 struct ocfs2_super *osb = OCFS2_SB(sb); 3011 struct buffer_head *old_bh = NULL; 3012 struct buffer_head *new_bh = NULL; 3013 3014 trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 3015 new_cluster, new_len); 3016 3017 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3018 new_bh = sb_getblk(osb->sb, new_block); 3019 if (new_bh == NULL) { 3020 ret = -ENOMEM; 3021 mlog_errno(ret); 3022 break; 3023 } 3024 3025 ocfs2_set_new_buffer_uptodate(ci, new_bh); 3026 3027 ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); 3028 if (ret) { 3029 mlog_errno(ret); 3030 break; 3031 } 3032 3033 ret = ocfs2_journal_access(handle, ci, new_bh, 3034 OCFS2_JOURNAL_ACCESS_CREATE); 3035 if (ret) { 3036 mlog_errno(ret); 3037 break; 3038 } 3039 3040 memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); 3041 ocfs2_journal_dirty(handle, new_bh); 3042 3043 brelse(new_bh); 3044 brelse(old_bh); 3045 new_bh = NULL; 3046 old_bh = NULL; 3047 } 3048 3049 brelse(new_bh); 3050 brelse(old_bh); 3051 return ret; 3052 } 3053 3054 static int ocfs2_clear_ext_refcount(handle_t *handle, 3055 struct ocfs2_extent_tree *et, 3056 u32 cpos, u32 p_cluster, u32 len, 3057 unsigned int ext_flags, 3058 struct ocfs2_alloc_context *meta_ac, 3059 struct ocfs2_cached_dealloc_ctxt *dealloc) 3060 { 3061 int ret, index; 3062 struct ocfs2_extent_rec replace_rec; 3063 struct ocfs2_path *path = NULL; 3064 struct ocfs2_extent_list *el; 3065 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3066 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3067 3068 trace_ocfs2_clear_ext_refcount((unsigned long long)ino, 3069 cpos, len, p_cluster, ext_flags); 3070 3071 memset(&replace_rec, 0, sizeof(replace_rec)); 3072 replace_rec.e_cpos = cpu_to_le32(cpos); 3073 replace_rec.e_leaf_clusters = cpu_to_le16(len); 3074 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, 3075 p_cluster)); 3076 replace_rec.e_flags = ext_flags; 3077 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; 3078 3079 path = ocfs2_new_path_from_et(et); 3080 if (!path) { 3081 ret = -ENOMEM; 3082 mlog_errno(ret); 3083 goto out; 3084 } 3085 3086 ret = ocfs2_find_path(et->et_ci, path, cpos); 3087 if (ret) { 3088 mlog_errno(ret); 3089 goto out; 3090 } 3091 3092 el = path_leaf_el(path); 3093 3094 index = ocfs2_search_extent_list(el, cpos); 3095 if (index == -1) { 3096 ret = ocfs2_error(sb, 3097 "Inode %llu has an extent at cpos %u which can no longer be found\n", 3098 (unsigned long long)ino, cpos); 3099 goto out; 3100 } 3101 3102 ret = ocfs2_split_extent(handle, et, path, index, 3103 &replace_rec, meta_ac, dealloc); 3104 if (ret) 3105 mlog_errno(ret); 3106 3107 out: 3108 ocfs2_free_path(path); 3109 return ret; 3110 } 3111 3112 static int ocfs2_replace_clusters(handle_t *handle, 3113 struct ocfs2_cow_context *context, 3114 u32 cpos, u32 old, 3115 u32 new, u32 len, 3116 unsigned int ext_flags) 3117 { 3118 int ret; 3119 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3120 u64 ino = ocfs2_metadata_cache_owner(ci); 3121 3122 trace_ocfs2_replace_clusters((unsigned long long)ino, 3123 cpos, old, new, len, ext_flags); 3124 3125 /*If the old clusters is unwritten, no need to duplicate. */ 3126 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3127 ret = context->cow_duplicate_clusters(handle, context->inode, 3128 cpos, old, new, len); 3129 if (ret) { 3130 mlog_errno(ret); 3131 goto out; 3132 } 3133 } 3134 3135 ret = ocfs2_clear_ext_refcount(handle, &context->data_et, 3136 cpos, new, len, ext_flags, 3137 context->meta_ac, &context->dealloc); 3138 if (ret) 3139 mlog_errno(ret); 3140 out: 3141 return ret; 3142 } 3143 3144 int ocfs2_cow_sync_writeback(struct super_block *sb, 3145 struct inode *inode, 3146 u32 cpos, u32 num_clusters) 3147 { 3148 int ret = 0; 3149 loff_t offset, end, map_end; 3150 pgoff_t page_index; 3151 struct page *page; 3152 3153 if (ocfs2_should_order_data(inode)) 3154 return 0; 3155 3156 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; 3157 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); 3158 3159 ret = filemap_fdatawrite_range(inode->i_mapping, 3160 offset, end - 1); 3161 if (ret < 0) { 3162 mlog_errno(ret); 3163 return ret; 3164 } 3165 3166 while (offset < end) { 3167 page_index = offset >> PAGE_SHIFT; 3168 map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; 3169 if (map_end > end) 3170 map_end = end; 3171 3172 page = find_or_create_page(inode->i_mapping, 3173 page_index, GFP_NOFS); 3174 BUG_ON(!page); 3175 3176 wait_on_page_writeback(page); 3177 if (PageError(page)) { 3178 ret = -EIO; 3179 mlog_errno(ret); 3180 } else 3181 mark_page_accessed(page); 3182 3183 unlock_page(page); 3184 put_page(page); 3185 page = NULL; 3186 offset = map_end; 3187 if (ret) 3188 break; 3189 } 3190 3191 return ret; 3192 } 3193 3194 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, 3195 u32 v_cluster, u32 *p_cluster, 3196 u32 *num_clusters, 3197 unsigned int *extent_flags) 3198 { 3199 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, 3200 num_clusters, extent_flags); 3201 } 3202 3203 static int ocfs2_make_clusters_writable(struct super_block *sb, 3204 struct ocfs2_cow_context *context, 3205 u32 cpos, u32 p_cluster, 3206 u32 num_clusters, unsigned int e_flags) 3207 { 3208 int ret, delete, index, credits = 0; 3209 u32 new_bit, new_len, orig_num_clusters; 3210 unsigned int set_len; 3211 struct ocfs2_super *osb = OCFS2_SB(sb); 3212 handle_t *handle; 3213 struct buffer_head *ref_leaf_bh = NULL; 3214 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3215 struct ocfs2_refcount_rec rec; 3216 3217 trace_ocfs2_make_clusters_writable(cpos, p_cluster, 3218 num_clusters, e_flags); 3219 3220 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3221 &context->data_et, 3222 ref_ci, 3223 context->ref_root_bh, 3224 &context->meta_ac, 3225 &context->data_ac, &credits); 3226 if (ret) { 3227 mlog_errno(ret); 3228 return ret; 3229 } 3230 3231 if (context->post_refcount) 3232 credits += context->post_refcount->credits; 3233 3234 credits += context->extra_credits; 3235 handle = ocfs2_start_trans(osb, credits); 3236 if (IS_ERR(handle)) { 3237 ret = PTR_ERR(handle); 3238 mlog_errno(ret); 3239 goto out; 3240 } 3241 3242 orig_num_clusters = num_clusters; 3243 3244 while (num_clusters) { 3245 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3246 p_cluster, num_clusters, 3247 &rec, &index, &ref_leaf_bh); 3248 if (ret) { 3249 mlog_errno(ret); 3250 goto out_commit; 3251 } 3252 3253 BUG_ON(!rec.r_refcount); 3254 set_len = min((u64)p_cluster + num_clusters, 3255 le64_to_cpu(rec.r_cpos) + 3256 le32_to_cpu(rec.r_clusters)) - p_cluster; 3257 3258 /* 3259 * There are many different situation here. 3260 * 1. If refcount == 1, remove the flag and don't COW. 3261 * 2. If refcount > 1, allocate clusters. 3262 * Here we may not allocate r_len once at a time, so continue 3263 * until we reach num_clusters. 3264 */ 3265 if (le32_to_cpu(rec.r_refcount) == 1) { 3266 delete = 0; 3267 ret = ocfs2_clear_ext_refcount(handle, 3268 &context->data_et, 3269 cpos, p_cluster, 3270 set_len, e_flags, 3271 context->meta_ac, 3272 &context->dealloc); 3273 if (ret) { 3274 mlog_errno(ret); 3275 goto out_commit; 3276 } 3277 } else { 3278 delete = 1; 3279 3280 ret = __ocfs2_claim_clusters(handle, 3281 context->data_ac, 3282 1, set_len, 3283 &new_bit, &new_len); 3284 if (ret) { 3285 mlog_errno(ret); 3286 goto out_commit; 3287 } 3288 3289 ret = ocfs2_replace_clusters(handle, context, 3290 cpos, p_cluster, new_bit, 3291 new_len, e_flags); 3292 if (ret) { 3293 mlog_errno(ret); 3294 goto out_commit; 3295 } 3296 set_len = new_len; 3297 } 3298 3299 ret = __ocfs2_decrease_refcount(handle, ref_ci, 3300 context->ref_root_bh, 3301 p_cluster, set_len, 3302 context->meta_ac, 3303 &context->dealloc, delete); 3304 if (ret) { 3305 mlog_errno(ret); 3306 goto out_commit; 3307 } 3308 3309 cpos += set_len; 3310 p_cluster += set_len; 3311 num_clusters -= set_len; 3312 brelse(ref_leaf_bh); 3313 ref_leaf_bh = NULL; 3314 } 3315 3316 /* handle any post_cow action. */ 3317 if (context->post_refcount && context->post_refcount->func) { 3318 ret = context->post_refcount->func(context->inode, handle, 3319 context->post_refcount->para); 3320 if (ret) { 3321 mlog_errno(ret); 3322 goto out_commit; 3323 } 3324 } 3325 3326 /* 3327 * Here we should write the new page out first if we are 3328 * in write-back mode. 3329 */ 3330 if (context->get_clusters == ocfs2_di_get_clusters) { 3331 ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos, 3332 orig_num_clusters); 3333 if (ret) 3334 mlog_errno(ret); 3335 } 3336 3337 out_commit: 3338 ocfs2_commit_trans(osb, handle); 3339 3340 out: 3341 if (context->data_ac) { 3342 ocfs2_free_alloc_context(context->data_ac); 3343 context->data_ac = NULL; 3344 } 3345 if (context->meta_ac) { 3346 ocfs2_free_alloc_context(context->meta_ac); 3347 context->meta_ac = NULL; 3348 } 3349 brelse(ref_leaf_bh); 3350 3351 return ret; 3352 } 3353 3354 static int ocfs2_replace_cow(struct ocfs2_cow_context *context) 3355 { 3356 int ret = 0; 3357 struct inode *inode = context->inode; 3358 u32 cow_start = context->cow_start, cow_len = context->cow_len; 3359 u32 p_cluster, num_clusters; 3360 unsigned int ext_flags; 3361 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3362 3363 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 3364 return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n", 3365 inode->i_ino); 3366 } 3367 3368 ocfs2_init_dealloc_ctxt(&context->dealloc); 3369 3370 while (cow_len) { 3371 ret = context->get_clusters(context, cow_start, &p_cluster, 3372 &num_clusters, &ext_flags); 3373 if (ret) { 3374 mlog_errno(ret); 3375 break; 3376 } 3377 3378 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); 3379 3380 if (cow_len < num_clusters) 3381 num_clusters = cow_len; 3382 3383 ret = ocfs2_make_clusters_writable(inode->i_sb, context, 3384 cow_start, p_cluster, 3385 num_clusters, ext_flags); 3386 if (ret) { 3387 mlog_errno(ret); 3388 break; 3389 } 3390 3391 cow_len -= num_clusters; 3392 cow_start += num_clusters; 3393 } 3394 3395 if (ocfs2_dealloc_has_cluster(&context->dealloc)) { 3396 ocfs2_schedule_truncate_log_flush(osb, 1); 3397 ocfs2_run_deallocs(osb, &context->dealloc); 3398 } 3399 3400 return ret; 3401 } 3402 3403 /* 3404 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3405 * past max_cpos. This will stop when it runs into a hole or an 3406 * unrefcounted extent. 3407 */ 3408 static int ocfs2_refcount_cow_hunk(struct inode *inode, 3409 struct buffer_head *di_bh, 3410 u32 cpos, u32 write_len, u32 max_cpos) 3411 { 3412 int ret; 3413 u32 cow_start = 0, cow_len = 0; 3414 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3415 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3416 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3417 struct buffer_head *ref_root_bh = NULL; 3418 struct ocfs2_refcount_tree *ref_tree; 3419 struct ocfs2_cow_context *context = NULL; 3420 3421 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 3422 3423 ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, 3424 cpos, write_len, max_cpos, 3425 &cow_start, &cow_len); 3426 if (ret) { 3427 mlog_errno(ret); 3428 goto out; 3429 } 3430 3431 trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, 3432 cpos, write_len, max_cpos, 3433 cow_start, cow_len); 3434 3435 BUG_ON(cow_len == 0); 3436 3437 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3438 if (!context) { 3439 ret = -ENOMEM; 3440 mlog_errno(ret); 3441 goto out; 3442 } 3443 3444 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 3445 1, &ref_tree, &ref_root_bh); 3446 if (ret) { 3447 mlog_errno(ret); 3448 goto out; 3449 } 3450 3451 context->inode = inode; 3452 context->cow_start = cow_start; 3453 context->cow_len = cow_len; 3454 context->ref_tree = ref_tree; 3455 context->ref_root_bh = ref_root_bh; 3456 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3457 context->get_clusters = ocfs2_di_get_clusters; 3458 3459 ocfs2_init_dinode_extent_tree(&context->data_et, 3460 INODE_CACHE(inode), di_bh); 3461 3462 ret = ocfs2_replace_cow(context); 3463 if (ret) 3464 mlog_errno(ret); 3465 3466 /* 3467 * truncate the extent map here since no matter whether we meet with 3468 * any error during the action, we shouldn't trust cached extent map 3469 * any more. 3470 */ 3471 ocfs2_extent_map_trunc(inode, cow_start); 3472 3473 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3474 brelse(ref_root_bh); 3475 out: 3476 kfree(context); 3477 return ret; 3478 } 3479 3480 /* 3481 * CoW any and all clusters between cpos and cpos+write_len. 3482 * Don't CoW past max_cpos. If this returns successfully, all 3483 * clusters between cpos and cpos+write_len are safe to modify. 3484 */ 3485 int ocfs2_refcount_cow(struct inode *inode, 3486 struct buffer_head *di_bh, 3487 u32 cpos, u32 write_len, u32 max_cpos) 3488 { 3489 int ret = 0; 3490 u32 p_cluster, num_clusters; 3491 unsigned int ext_flags; 3492 3493 while (write_len) { 3494 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3495 &num_clusters, &ext_flags); 3496 if (ret) { 3497 mlog_errno(ret); 3498 break; 3499 } 3500 3501 if (write_len < num_clusters) 3502 num_clusters = write_len; 3503 3504 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3505 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, 3506 num_clusters, max_cpos); 3507 if (ret) { 3508 mlog_errno(ret); 3509 break; 3510 } 3511 } 3512 3513 write_len -= num_clusters; 3514 cpos += num_clusters; 3515 } 3516 3517 return ret; 3518 } 3519 3520 static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, 3521 u32 v_cluster, u32 *p_cluster, 3522 u32 *num_clusters, 3523 unsigned int *extent_flags) 3524 { 3525 struct inode *inode = context->inode; 3526 struct ocfs2_xattr_value_root *xv = context->cow_object; 3527 3528 return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, 3529 num_clusters, &xv->xr_list, 3530 extent_flags); 3531 } 3532 3533 /* 3534 * Given a xattr value root, calculate the most meta/credits we need for 3535 * refcount tree change if we truncate it to 0. 3536 */ 3537 int ocfs2_refcounted_xattr_delete_need(struct inode *inode, 3538 struct ocfs2_caching_info *ref_ci, 3539 struct buffer_head *ref_root_bh, 3540 struct ocfs2_xattr_value_root *xv, 3541 int *meta_add, int *credits) 3542 { 3543 int ret = 0, index, ref_blocks = 0; 3544 u32 p_cluster, num_clusters; 3545 u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); 3546 struct ocfs2_refcount_block *rb; 3547 struct ocfs2_refcount_rec rec; 3548 struct buffer_head *ref_leaf_bh = NULL; 3549 3550 while (cpos < clusters) { 3551 ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, 3552 &num_clusters, &xv->xr_list, 3553 NULL); 3554 if (ret) { 3555 mlog_errno(ret); 3556 goto out; 3557 } 3558 3559 cpos += num_clusters; 3560 3561 while (num_clusters) { 3562 ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, 3563 p_cluster, num_clusters, 3564 &rec, &index, 3565 &ref_leaf_bh); 3566 if (ret) { 3567 mlog_errno(ret); 3568 goto out; 3569 } 3570 3571 BUG_ON(!rec.r_refcount); 3572 3573 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 3574 3575 /* 3576 * We really don't know whether the other clusters is in 3577 * this refcount block or not, so just take the worst 3578 * case that all the clusters are in this block and each 3579 * one will split a refcount rec, so totally we need 3580 * clusters * 2 new refcount rec. 3581 */ 3582 if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 > 3583 le16_to_cpu(rb->rf_records.rl_count)) 3584 ref_blocks++; 3585 3586 *credits += 1; 3587 brelse(ref_leaf_bh); 3588 ref_leaf_bh = NULL; 3589 3590 if (num_clusters <= le32_to_cpu(rec.r_clusters)) 3591 break; 3592 else 3593 num_clusters -= le32_to_cpu(rec.r_clusters); 3594 p_cluster += num_clusters; 3595 } 3596 } 3597 3598 *meta_add += ref_blocks; 3599 if (!ref_blocks) 3600 goto out; 3601 3602 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 3603 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) 3604 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; 3605 else { 3606 struct ocfs2_extent_tree et; 3607 3608 ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); 3609 *credits += ocfs2_calc_extend_credits(inode->i_sb, 3610 et.et_root_el); 3611 } 3612 3613 out: 3614 brelse(ref_leaf_bh); 3615 return ret; 3616 } 3617 3618 /* 3619 * Do CoW for xattr. 3620 */ 3621 int ocfs2_refcount_cow_xattr(struct inode *inode, 3622 struct ocfs2_dinode *di, 3623 struct ocfs2_xattr_value_buf *vb, 3624 struct ocfs2_refcount_tree *ref_tree, 3625 struct buffer_head *ref_root_bh, 3626 u32 cpos, u32 write_len, 3627 struct ocfs2_post_refcount *post) 3628 { 3629 int ret; 3630 struct ocfs2_xattr_value_root *xv = vb->vb_xv; 3631 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3632 struct ocfs2_cow_context *context = NULL; 3633 u32 cow_start, cow_len; 3634 3635 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 3636 3637 ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, 3638 cpos, write_len, UINT_MAX, 3639 &cow_start, &cow_len); 3640 if (ret) { 3641 mlog_errno(ret); 3642 goto out; 3643 } 3644 3645 BUG_ON(cow_len == 0); 3646 3647 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3648 if (!context) { 3649 ret = -ENOMEM; 3650 mlog_errno(ret); 3651 goto out; 3652 } 3653 3654 context->inode = inode; 3655 context->cow_start = cow_start; 3656 context->cow_len = cow_len; 3657 context->ref_tree = ref_tree; 3658 context->ref_root_bh = ref_root_bh; 3659 context->cow_object = xv; 3660 3661 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; 3662 /* We need the extra credits for duplicate_clusters by jbd. */ 3663 context->extra_credits = 3664 ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; 3665 context->get_clusters = ocfs2_xattr_value_get_clusters; 3666 context->post_refcount = post; 3667 3668 ocfs2_init_xattr_value_extent_tree(&context->data_et, 3669 INODE_CACHE(inode), vb); 3670 3671 ret = ocfs2_replace_cow(context); 3672 if (ret) 3673 mlog_errno(ret); 3674 3675 out: 3676 kfree(context); 3677 return ret; 3678 } 3679 3680 /* 3681 * Insert a new extent into refcount tree and mark a extent rec 3682 * as refcounted in the dinode tree. 3683 */ 3684 int ocfs2_add_refcount_flag(struct inode *inode, 3685 struct ocfs2_extent_tree *data_et, 3686 struct ocfs2_caching_info *ref_ci, 3687 struct buffer_head *ref_root_bh, 3688 u32 cpos, u32 p_cluster, u32 num_clusters, 3689 struct ocfs2_cached_dealloc_ctxt *dealloc, 3690 struct ocfs2_post_refcount *post) 3691 { 3692 int ret; 3693 handle_t *handle; 3694 int credits = 1, ref_blocks = 0; 3695 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3696 struct ocfs2_alloc_context *meta_ac = NULL; 3697 3698 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, 3699 ref_ci, ref_root_bh, 3700 p_cluster, num_clusters, 3701 &ref_blocks, &credits); 3702 if (ret) { 3703 mlog_errno(ret); 3704 goto out; 3705 } 3706 3707 trace_ocfs2_add_refcount_flag(ref_blocks, credits); 3708 3709 if (ref_blocks) { 3710 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), 3711 ref_blocks, &meta_ac); 3712 if (ret) { 3713 mlog_errno(ret); 3714 goto out; 3715 } 3716 } 3717 3718 if (post) 3719 credits += post->credits; 3720 3721 handle = ocfs2_start_trans(osb, credits); 3722 if (IS_ERR(handle)) { 3723 ret = PTR_ERR(handle); 3724 mlog_errno(ret); 3725 goto out; 3726 } 3727 3728 ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, 3729 cpos, num_clusters, p_cluster, 3730 meta_ac, dealloc); 3731 if (ret) { 3732 mlog_errno(ret); 3733 goto out_commit; 3734 } 3735 3736 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3737 p_cluster, num_clusters, 0, 3738 meta_ac, dealloc); 3739 if (ret) { 3740 mlog_errno(ret); 3741 goto out_commit; 3742 } 3743 3744 if (post && post->func) { 3745 ret = post->func(inode, handle, post->para); 3746 if (ret) 3747 mlog_errno(ret); 3748 } 3749 3750 out_commit: 3751 ocfs2_commit_trans(osb, handle); 3752 out: 3753 if (meta_ac) 3754 ocfs2_free_alloc_context(meta_ac); 3755 return ret; 3756 } 3757 3758 static int ocfs2_change_ctime(struct inode *inode, 3759 struct buffer_head *di_bh) 3760 { 3761 int ret; 3762 handle_t *handle; 3763 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3764 3765 handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), 3766 OCFS2_INODE_UPDATE_CREDITS); 3767 if (IS_ERR(handle)) { 3768 ret = PTR_ERR(handle); 3769 mlog_errno(ret); 3770 goto out; 3771 } 3772 3773 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 3774 OCFS2_JOURNAL_ACCESS_WRITE); 3775 if (ret) { 3776 mlog_errno(ret); 3777 goto out_commit; 3778 } 3779 3780 inode->i_ctime = current_time(inode); 3781 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 3782 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 3783 3784 ocfs2_journal_dirty(handle, di_bh); 3785 3786 out_commit: 3787 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 3788 out: 3789 return ret; 3790 } 3791 3792 static int ocfs2_attach_refcount_tree(struct inode *inode, 3793 struct buffer_head *di_bh) 3794 { 3795 int ret, data_changed = 0; 3796 struct buffer_head *ref_root_bh = NULL; 3797 struct ocfs2_inode_info *oi = OCFS2_I(inode); 3798 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3799 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3800 struct ocfs2_refcount_tree *ref_tree; 3801 unsigned int ext_flags; 3802 loff_t size; 3803 u32 cpos, num_clusters, clusters, p_cluster; 3804 struct ocfs2_cached_dealloc_ctxt dealloc; 3805 struct ocfs2_extent_tree di_et; 3806 3807 ocfs2_init_dealloc_ctxt(&dealloc); 3808 3809 if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) { 3810 ret = ocfs2_create_refcount_tree(inode, di_bh); 3811 if (ret) { 3812 mlog_errno(ret); 3813 goto out; 3814 } 3815 } 3816 3817 BUG_ON(!di->i_refcount_loc); 3818 ret = ocfs2_lock_refcount_tree(osb, 3819 le64_to_cpu(di->i_refcount_loc), 1, 3820 &ref_tree, &ref_root_bh); 3821 if (ret) { 3822 mlog_errno(ret); 3823 goto out; 3824 } 3825 3826 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 3827 goto attach_xattr; 3828 3829 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3830 3831 size = i_size_read(inode); 3832 clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); 3833 3834 cpos = 0; 3835 while (cpos < clusters) { 3836 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, 3837 &num_clusters, &ext_flags); 3838 if (ret) { 3839 mlog_errno(ret); 3840 goto unlock; 3841 } 3842 if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { 3843 ret = ocfs2_add_refcount_flag(inode, &di_et, 3844 &ref_tree->rf_ci, 3845 ref_root_bh, cpos, 3846 p_cluster, num_clusters, 3847 &dealloc, NULL); 3848 if (ret) { 3849 mlog_errno(ret); 3850 goto unlock; 3851 } 3852 3853 data_changed = 1; 3854 } 3855 cpos += num_clusters; 3856 } 3857 3858 attach_xattr: 3859 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3860 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3861 &ref_tree->rf_ci, 3862 ref_root_bh, 3863 &dealloc); 3864 if (ret) { 3865 mlog_errno(ret); 3866 goto unlock; 3867 } 3868 } 3869 3870 if (data_changed) { 3871 ret = ocfs2_change_ctime(inode, di_bh); 3872 if (ret) 3873 mlog_errno(ret); 3874 } 3875 3876 unlock: 3877 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 3878 brelse(ref_root_bh); 3879 3880 if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { 3881 ocfs2_schedule_truncate_log_flush(osb, 1); 3882 ocfs2_run_deallocs(osb, &dealloc); 3883 } 3884 out: 3885 /* 3886 * Empty the extent map so that we may get the right extent 3887 * record from the disk. 3888 */ 3889 ocfs2_extent_map_trunc(inode, 0); 3890 3891 return ret; 3892 } 3893 3894 static int ocfs2_add_refcounted_extent(struct inode *inode, 3895 struct ocfs2_extent_tree *et, 3896 struct ocfs2_caching_info *ref_ci, 3897 struct buffer_head *ref_root_bh, 3898 u32 cpos, u32 p_cluster, u32 num_clusters, 3899 unsigned int ext_flags, 3900 struct ocfs2_cached_dealloc_ctxt *dealloc) 3901 { 3902 int ret; 3903 handle_t *handle; 3904 int credits = 0; 3905 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3906 struct ocfs2_alloc_context *meta_ac = NULL; 3907 3908 ret = ocfs2_lock_refcount_allocators(inode->i_sb, 3909 p_cluster, num_clusters, 3910 et, ref_ci, 3911 ref_root_bh, &meta_ac, 3912 NULL, &credits); 3913 if (ret) { 3914 mlog_errno(ret); 3915 goto out; 3916 } 3917 3918 handle = ocfs2_start_trans(osb, credits); 3919 if (IS_ERR(handle)) { 3920 ret = PTR_ERR(handle); 3921 mlog_errno(ret); 3922 goto out; 3923 } 3924 3925 ret = ocfs2_insert_extent(handle, et, cpos, 3926 ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), 3927 num_clusters, ext_flags, meta_ac); 3928 if (ret) { 3929 mlog_errno(ret); 3930 goto out_commit; 3931 } 3932 3933 ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, 3934 p_cluster, num_clusters, 3935 meta_ac, dealloc); 3936 if (ret) 3937 mlog_errno(ret); 3938 3939 out_commit: 3940 ocfs2_commit_trans(osb, handle); 3941 out: 3942 if (meta_ac) 3943 ocfs2_free_alloc_context(meta_ac); 3944 return ret; 3945 } 3946 3947 static int ocfs2_duplicate_inline_data(struct inode *s_inode, 3948 struct buffer_head *s_bh, 3949 struct inode *t_inode, 3950 struct buffer_head *t_bh) 3951 { 3952 int ret; 3953 handle_t *handle; 3954 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 3955 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 3956 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data; 3957 3958 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 3959 3960 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 3961 if (IS_ERR(handle)) { 3962 ret = PTR_ERR(handle); 3963 mlog_errno(ret); 3964 goto out; 3965 } 3966 3967 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 3968 OCFS2_JOURNAL_ACCESS_WRITE); 3969 if (ret) { 3970 mlog_errno(ret); 3971 goto out_commit; 3972 } 3973 3974 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count; 3975 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data, 3976 le16_to_cpu(s_di->id2.i_data.id_count)); 3977 spin_lock(&OCFS2_I(t_inode)->ip_lock); 3978 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL; 3979 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features); 3980 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 3981 3982 ocfs2_journal_dirty(handle, t_bh); 3983 3984 out_commit: 3985 ocfs2_commit_trans(osb, handle); 3986 out: 3987 return ret; 3988 } 3989 3990 static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3991 struct inode *t_inode, 3992 struct buffer_head *t_bh, 3993 struct ocfs2_caching_info *ref_ci, 3994 struct buffer_head *ref_root_bh, 3995 struct ocfs2_cached_dealloc_ctxt *dealloc) 3996 { 3997 int ret = 0; 3998 u32 p_cluster, num_clusters, clusters, cpos; 3999 loff_t size; 4000 unsigned int ext_flags; 4001 struct ocfs2_extent_tree et; 4002 4003 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); 4004 4005 size = i_size_read(s_inode); 4006 clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); 4007 4008 cpos = 0; 4009 while (cpos < clusters) { 4010 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, 4011 &num_clusters, &ext_flags); 4012 if (ret) { 4013 mlog_errno(ret); 4014 goto out; 4015 } 4016 if (p_cluster) { 4017 ret = ocfs2_add_refcounted_extent(t_inode, &et, 4018 ref_ci, ref_root_bh, 4019 cpos, p_cluster, 4020 num_clusters, 4021 ext_flags, 4022 dealloc); 4023 if (ret) { 4024 mlog_errno(ret); 4025 goto out; 4026 } 4027 } 4028 4029 cpos += num_clusters; 4030 } 4031 4032 out: 4033 return ret; 4034 } 4035 4036 /* 4037 * change the new file's attributes to the src. 4038 * 4039 * reflink creates a snapshot of a file, that means the attributes 4040 * must be identical except for three exceptions - nlink, ino, and ctime. 4041 */ 4042 static int ocfs2_complete_reflink(struct inode *s_inode, 4043 struct buffer_head *s_bh, 4044 struct inode *t_inode, 4045 struct buffer_head *t_bh, 4046 bool preserve) 4047 { 4048 int ret; 4049 handle_t *handle; 4050 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; 4051 struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; 4052 loff_t size = i_size_read(s_inode); 4053 4054 handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), 4055 OCFS2_INODE_UPDATE_CREDITS); 4056 if (IS_ERR(handle)) { 4057 ret = PTR_ERR(handle); 4058 mlog_errno(ret); 4059 return ret; 4060 } 4061 4062 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, 4063 OCFS2_JOURNAL_ACCESS_WRITE); 4064 if (ret) { 4065 mlog_errno(ret); 4066 goto out_commit; 4067 } 4068 4069 spin_lock(&OCFS2_I(t_inode)->ip_lock); 4070 OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; 4071 OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; 4072 OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; 4073 spin_unlock(&OCFS2_I(t_inode)->ip_lock); 4074 i_size_write(t_inode, size); 4075 t_inode->i_blocks = s_inode->i_blocks; 4076 4077 di->i_xattr_inline_size = s_di->i_xattr_inline_size; 4078 di->i_clusters = s_di->i_clusters; 4079 di->i_size = s_di->i_size; 4080 di->i_dyn_features = s_di->i_dyn_features; 4081 di->i_attr = s_di->i_attr; 4082 4083 if (preserve) { 4084 t_inode->i_uid = s_inode->i_uid; 4085 t_inode->i_gid = s_inode->i_gid; 4086 t_inode->i_mode = s_inode->i_mode; 4087 di->i_uid = s_di->i_uid; 4088 di->i_gid = s_di->i_gid; 4089 di->i_mode = s_di->i_mode; 4090 4091 /* 4092 * update time. 4093 * we want mtime to appear identical to the source and 4094 * update ctime. 4095 */ 4096 t_inode->i_ctime = current_time(t_inode); 4097 4098 di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec); 4099 di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec); 4100 4101 t_inode->i_mtime = s_inode->i_mtime; 4102 di->i_mtime = s_di->i_mtime; 4103 di->i_mtime_nsec = s_di->i_mtime_nsec; 4104 } 4105 4106 ocfs2_journal_dirty(handle, t_bh); 4107 4108 out_commit: 4109 ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); 4110 return ret; 4111 } 4112 4113 static int ocfs2_create_reflink_node(struct inode *s_inode, 4114 struct buffer_head *s_bh, 4115 struct inode *t_inode, 4116 struct buffer_head *t_bh, 4117 bool preserve) 4118 { 4119 int ret; 4120 struct buffer_head *ref_root_bh = NULL; 4121 struct ocfs2_cached_dealloc_ctxt dealloc; 4122 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); 4123 struct ocfs2_refcount_block *rb; 4124 struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; 4125 struct ocfs2_refcount_tree *ref_tree; 4126 4127 ocfs2_init_dealloc_ctxt(&dealloc); 4128 4129 ret = ocfs2_set_refcount_tree(t_inode, t_bh, 4130 le64_to_cpu(di->i_refcount_loc)); 4131 if (ret) { 4132 mlog_errno(ret); 4133 goto out; 4134 } 4135 4136 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 4137 ret = ocfs2_duplicate_inline_data(s_inode, s_bh, 4138 t_inode, t_bh); 4139 if (ret) 4140 mlog_errno(ret); 4141 goto out; 4142 } 4143 4144 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4145 1, &ref_tree, &ref_root_bh); 4146 if (ret) { 4147 mlog_errno(ret); 4148 goto out; 4149 } 4150 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; 4151 4152 ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, 4153 &ref_tree->rf_ci, ref_root_bh, 4154 &dealloc); 4155 if (ret) { 4156 mlog_errno(ret); 4157 goto out_unlock_refcount; 4158 } 4159 4160 out_unlock_refcount: 4161 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4162 brelse(ref_root_bh); 4163 out: 4164 if (ocfs2_dealloc_has_cluster(&dealloc)) { 4165 ocfs2_schedule_truncate_log_flush(osb, 1); 4166 ocfs2_run_deallocs(osb, &dealloc); 4167 } 4168 4169 return ret; 4170 } 4171 4172 static int __ocfs2_reflink(struct dentry *old_dentry, 4173 struct buffer_head *old_bh, 4174 struct inode *new_inode, 4175 bool preserve) 4176 { 4177 int ret; 4178 struct inode *inode = d_inode(old_dentry); 4179 struct buffer_head *new_bh = NULL; 4180 4181 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) { 4182 ret = -EINVAL; 4183 mlog_errno(ret); 4184 goto out; 4185 } 4186 4187 ret = filemap_fdatawrite(inode->i_mapping); 4188 if (ret) { 4189 mlog_errno(ret); 4190 goto out; 4191 } 4192 4193 ret = ocfs2_attach_refcount_tree(inode, old_bh); 4194 if (ret) { 4195 mlog_errno(ret); 4196 goto out; 4197 } 4198 4199 inode_lock_nested(new_inode, I_MUTEX_CHILD); 4200 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1, 4201 OI_LS_REFLINK_TARGET); 4202 if (ret) { 4203 mlog_errno(ret); 4204 goto out_unlock; 4205 } 4206 4207 ret = ocfs2_create_reflink_node(inode, old_bh, 4208 new_inode, new_bh, preserve); 4209 if (ret) { 4210 mlog_errno(ret); 4211 goto inode_unlock; 4212 } 4213 4214 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 4215 ret = ocfs2_reflink_xattrs(inode, old_bh, 4216 new_inode, new_bh, 4217 preserve); 4218 if (ret) { 4219 mlog_errno(ret); 4220 goto inode_unlock; 4221 } 4222 } 4223 4224 ret = ocfs2_complete_reflink(inode, old_bh, 4225 new_inode, new_bh, preserve); 4226 if (ret) 4227 mlog_errno(ret); 4228 4229 inode_unlock: 4230 ocfs2_inode_unlock(new_inode, 1); 4231 brelse(new_bh); 4232 out_unlock: 4233 inode_unlock(new_inode); 4234 out: 4235 if (!ret) { 4236 ret = filemap_fdatawait(inode->i_mapping); 4237 if (ret) 4238 mlog_errno(ret); 4239 } 4240 return ret; 4241 } 4242 4243 static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, 4244 struct dentry *new_dentry, bool preserve) 4245 { 4246 int error; 4247 struct inode *inode = d_inode(old_dentry); 4248 struct buffer_head *old_bh = NULL; 4249 struct inode *new_orphan_inode = NULL; 4250 4251 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4252 return -EOPNOTSUPP; 4253 4254 4255 error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, 4256 &new_orphan_inode); 4257 if (error) { 4258 mlog_errno(error); 4259 goto out; 4260 } 4261 4262 error = ocfs2_rw_lock(inode, 1); 4263 if (error) { 4264 mlog_errno(error); 4265 goto out; 4266 } 4267 4268 error = ocfs2_inode_lock(inode, &old_bh, 1); 4269 if (error) { 4270 mlog_errno(error); 4271 ocfs2_rw_unlock(inode, 1); 4272 goto out; 4273 } 4274 4275 down_write(&OCFS2_I(inode)->ip_xattr_sem); 4276 down_write(&OCFS2_I(inode)->ip_alloc_sem); 4277 error = __ocfs2_reflink(old_dentry, old_bh, 4278 new_orphan_inode, preserve); 4279 up_write(&OCFS2_I(inode)->ip_alloc_sem); 4280 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4281 4282 ocfs2_inode_unlock(inode, 1); 4283 ocfs2_rw_unlock(inode, 1); 4284 brelse(old_bh); 4285 4286 if (error) { 4287 mlog_errno(error); 4288 goto out; 4289 } 4290 4291 /* If the security isn't preserved, we need to re-initialize them. */ 4292 if (!preserve) { 4293 error = ocfs2_init_security_and_acl(dir, new_orphan_inode, 4294 &new_dentry->d_name); 4295 if (error) 4296 mlog_errno(error); 4297 } 4298 out: 4299 if (!error) { 4300 error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, 4301 new_dentry); 4302 if (error) 4303 mlog_errno(error); 4304 } 4305 4306 if (new_orphan_inode) { 4307 /* 4308 * We need to open_unlock the inode no matter whether we 4309 * succeed or not, so that other nodes can delete it later. 4310 */ 4311 ocfs2_open_unlock(new_orphan_inode); 4312 if (error) 4313 iput(new_orphan_inode); 4314 } 4315 4316 return error; 4317 } 4318 4319 /* 4320 * Below here are the bits used by OCFS2_IOC_REFLINK() to fake 4321 * sys_reflink(). This will go away when vfs_reflink() exists in 4322 * fs/namei.c. 4323 */ 4324 4325 /* copied from may_create in VFS. */ 4326 static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) 4327 { 4328 if (d_really_is_positive(child)) 4329 return -EEXIST; 4330 if (IS_DEADDIR(dir)) 4331 return -ENOENT; 4332 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 4333 } 4334 4335 /** 4336 * ocfs2_vfs_reflink - Create a reference-counted link 4337 * 4338 * @old_dentry: source dentry + inode 4339 * @dir: directory to create the target 4340 * @new_dentry: target dentry 4341 * @preserve: if true, preserve all file attributes 4342 */ 4343 static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, 4344 struct dentry *new_dentry, bool preserve) 4345 { 4346 struct inode *inode = d_inode(old_dentry); 4347 int error; 4348 4349 if (!inode) 4350 return -ENOENT; 4351 4352 error = ocfs2_may_create(dir, new_dentry); 4353 if (error) 4354 return error; 4355 4356 if (dir->i_sb != inode->i_sb) 4357 return -EXDEV; 4358 4359 /* 4360 * A reflink to an append-only or immutable file cannot be created. 4361 */ 4362 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 4363 return -EPERM; 4364 4365 /* Only regular files can be reflinked. */ 4366 if (!S_ISREG(inode->i_mode)) 4367 return -EPERM; 4368 4369 /* 4370 * If the caller wants to preserve ownership, they require the 4371 * rights to do so. 4372 */ 4373 if (preserve) { 4374 if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN)) 4375 return -EPERM; 4376 if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) 4377 return -EPERM; 4378 } 4379 4380 /* 4381 * If the caller is modifying any aspect of the attributes, they 4382 * are not creating a snapshot. They need read permission on the 4383 * file. 4384 */ 4385 if (!preserve) { 4386 error = inode_permission(inode, MAY_READ); 4387 if (error) 4388 return error; 4389 } 4390 4391 inode_lock(inode); 4392 error = dquot_initialize(dir); 4393 if (!error) 4394 error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); 4395 inode_unlock(inode); 4396 if (!error) 4397 fsnotify_create(dir, new_dentry); 4398 return error; 4399 } 4400 /* 4401 * Most codes are copied from sys_linkat. 4402 */ 4403 int ocfs2_reflink_ioctl(struct inode *inode, 4404 const char __user *oldname, 4405 const char __user *newname, 4406 bool preserve) 4407 { 4408 struct dentry *new_dentry; 4409 struct path old_path, new_path; 4410 int error; 4411 4412 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4413 return -EOPNOTSUPP; 4414 4415 error = user_path_at(AT_FDCWD, oldname, 0, &old_path); 4416 if (error) { 4417 mlog_errno(error); 4418 return error; 4419 } 4420 4421 new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0); 4422 error = PTR_ERR(new_dentry); 4423 if (IS_ERR(new_dentry)) { 4424 mlog_errno(error); 4425 goto out; 4426 } 4427 4428 error = -EXDEV; 4429 if (old_path.mnt != new_path.mnt) { 4430 mlog_errno(error); 4431 goto out_dput; 4432 } 4433 4434 error = ocfs2_vfs_reflink(old_path.dentry, 4435 d_inode(new_path.dentry), 4436 new_dentry, preserve); 4437 out_dput: 4438 done_path_create(&new_path, new_dentry); 4439 out: 4440 path_put(&old_path); 4441 4442 return error; 4443 } 4444