1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 #include <linux/sched/mm.h> 15 16 #include "ctree.h" 17 #include "transaction.h" 18 #include "disk-io.h" 19 #include "locking.h" 20 #include "ulist.h" 21 #include "backref.h" 22 #include "extent_io.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "sysfs.h" 26 #include "tree-mod-log.h" 27 28 /* 29 * Helpers to access qgroup reservation 30 * 31 * Callers should ensure the lock context and type are valid 32 */ 33 34 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 35 { 36 u64 ret = 0; 37 int i; 38 39 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 40 ret += qgroup->rsv.values[i]; 41 42 return ret; 43 } 44 45 #ifdef CONFIG_BTRFS_DEBUG 46 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 47 { 48 if (type == BTRFS_QGROUP_RSV_DATA) 49 return "data"; 50 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 51 return "meta_pertrans"; 52 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 53 return "meta_prealloc"; 54 return NULL; 55 } 56 #endif 57 58 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 59 struct btrfs_qgroup *qgroup, u64 num_bytes, 60 enum btrfs_qgroup_rsv_type type) 61 { 62 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 63 qgroup->rsv.values[type] += num_bytes; 64 } 65 66 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 67 struct btrfs_qgroup *qgroup, u64 num_bytes, 68 enum btrfs_qgroup_rsv_type type) 69 { 70 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 71 if (qgroup->rsv.values[type] >= num_bytes) { 72 qgroup->rsv.values[type] -= num_bytes; 73 return; 74 } 75 #ifdef CONFIG_BTRFS_DEBUG 76 WARN_RATELIMIT(1, 77 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 78 qgroup->qgroupid, qgroup_rsv_type_str(type), 79 qgroup->rsv.values[type], num_bytes); 80 #endif 81 qgroup->rsv.values[type] = 0; 82 } 83 84 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 85 struct btrfs_qgroup *dest, 86 struct btrfs_qgroup *src) 87 { 88 int i; 89 90 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 91 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 92 } 93 94 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 95 struct btrfs_qgroup *dest, 96 struct btrfs_qgroup *src) 97 { 98 int i; 99 100 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 101 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 102 } 103 104 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 105 int mod) 106 { 107 if (qg->old_refcnt < seq) 108 qg->old_refcnt = seq; 109 qg->old_refcnt += mod; 110 } 111 112 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 113 int mod) 114 { 115 if (qg->new_refcnt < seq) 116 qg->new_refcnt = seq; 117 qg->new_refcnt += mod; 118 } 119 120 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 121 { 122 if (qg->old_refcnt < seq) 123 return 0; 124 return qg->old_refcnt - seq; 125 } 126 127 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 128 { 129 if (qg->new_refcnt < seq) 130 return 0; 131 return qg->new_refcnt - seq; 132 } 133 134 /* 135 * glue structure to represent the relations between qgroups. 136 */ 137 struct btrfs_qgroup_list { 138 struct list_head next_group; 139 struct list_head next_member; 140 struct btrfs_qgroup *group; 141 struct btrfs_qgroup *member; 142 }; 143 144 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) 145 { 146 return (u64)(uintptr_t)qg; 147 } 148 149 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) 150 { 151 return (struct btrfs_qgroup *)(uintptr_t)n->aux; 152 } 153 154 static int 155 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 156 int init_flags); 157 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 158 159 /* must be called with qgroup_ioctl_lock held */ 160 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 161 u64 qgroupid) 162 { 163 struct rb_node *n = fs_info->qgroup_tree.rb_node; 164 struct btrfs_qgroup *qgroup; 165 166 while (n) { 167 qgroup = rb_entry(n, struct btrfs_qgroup, node); 168 if (qgroup->qgroupid < qgroupid) 169 n = n->rb_left; 170 else if (qgroup->qgroupid > qgroupid) 171 n = n->rb_right; 172 else 173 return qgroup; 174 } 175 return NULL; 176 } 177 178 /* must be called with qgroup_lock held */ 179 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 180 u64 qgroupid) 181 { 182 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 183 struct rb_node *parent = NULL; 184 struct btrfs_qgroup *qgroup; 185 186 while (*p) { 187 parent = *p; 188 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 189 190 if (qgroup->qgroupid < qgroupid) 191 p = &(*p)->rb_left; 192 else if (qgroup->qgroupid > qgroupid) 193 p = &(*p)->rb_right; 194 else 195 return qgroup; 196 } 197 198 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 199 if (!qgroup) 200 return ERR_PTR(-ENOMEM); 201 202 qgroup->qgroupid = qgroupid; 203 INIT_LIST_HEAD(&qgroup->groups); 204 INIT_LIST_HEAD(&qgroup->members); 205 INIT_LIST_HEAD(&qgroup->dirty); 206 207 rb_link_node(&qgroup->node, parent, p); 208 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 209 210 return qgroup; 211 } 212 213 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, 214 struct btrfs_qgroup *qgroup) 215 { 216 struct btrfs_qgroup_list *list; 217 218 list_del(&qgroup->dirty); 219 while (!list_empty(&qgroup->groups)) { 220 list = list_first_entry(&qgroup->groups, 221 struct btrfs_qgroup_list, next_group); 222 list_del(&list->next_group); 223 list_del(&list->next_member); 224 kfree(list); 225 } 226 227 while (!list_empty(&qgroup->members)) { 228 list = list_first_entry(&qgroup->members, 229 struct btrfs_qgroup_list, next_member); 230 list_del(&list->next_group); 231 list_del(&list->next_member); 232 kfree(list); 233 } 234 } 235 236 /* must be called with qgroup_lock held */ 237 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 238 { 239 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 240 241 if (!qgroup) 242 return -ENOENT; 243 244 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 245 __del_qgroup_rb(fs_info, qgroup); 246 return 0; 247 } 248 249 /* 250 * Add relation specified by two qgroups. 251 * 252 * Must be called with qgroup_lock held. 253 * 254 * Return: 0 on success 255 * -ENOENT if one of the qgroups is NULL 256 * <0 other errors 257 */ 258 static int __add_relation_rb(struct btrfs_qgroup *member, struct btrfs_qgroup *parent) 259 { 260 struct btrfs_qgroup_list *list; 261 262 if (!member || !parent) 263 return -ENOENT; 264 265 list = kzalloc(sizeof(*list), GFP_ATOMIC); 266 if (!list) 267 return -ENOMEM; 268 269 list->group = parent; 270 list->member = member; 271 list_add_tail(&list->next_group, &member->groups); 272 list_add_tail(&list->next_member, &parent->members); 273 274 return 0; 275 } 276 277 /* 278 * Add relation specified by two qgroup ids. 279 * 280 * Must be called with qgroup_lock held. 281 * 282 * Return: 0 on success 283 * -ENOENT if one of the ids does not exist 284 * <0 other errors 285 */ 286 static int add_relation_rb(struct btrfs_fs_info *fs_info, u64 memberid, u64 parentid) 287 { 288 struct btrfs_qgroup *member; 289 struct btrfs_qgroup *parent; 290 291 member = find_qgroup_rb(fs_info, memberid); 292 parent = find_qgroup_rb(fs_info, parentid); 293 294 return __add_relation_rb(member, parent); 295 } 296 297 /* Must be called with qgroup_lock held */ 298 static int del_relation_rb(struct btrfs_fs_info *fs_info, 299 u64 memberid, u64 parentid) 300 { 301 struct btrfs_qgroup *member; 302 struct btrfs_qgroup *parent; 303 struct btrfs_qgroup_list *list; 304 305 member = find_qgroup_rb(fs_info, memberid); 306 parent = find_qgroup_rb(fs_info, parentid); 307 if (!member || !parent) 308 return -ENOENT; 309 310 list_for_each_entry(list, &member->groups, next_group) { 311 if (list->group == parent) { 312 list_del(&list->next_group); 313 list_del(&list->next_member); 314 kfree(list); 315 return 0; 316 } 317 } 318 return -ENOENT; 319 } 320 321 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 322 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 323 u64 rfer, u64 excl) 324 { 325 struct btrfs_qgroup *qgroup; 326 327 qgroup = find_qgroup_rb(fs_info, qgroupid); 328 if (!qgroup) 329 return -EINVAL; 330 if (qgroup->rfer != rfer || qgroup->excl != excl) 331 return -EINVAL; 332 return 0; 333 } 334 #endif 335 336 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info) 337 { 338 fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | 339 BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 340 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 341 } 342 343 /* 344 * The full config is read in one go, only called from open_ctree() 345 * It doesn't use any locking, as at this point we're still single-threaded 346 */ 347 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 348 { 349 struct btrfs_key key; 350 struct btrfs_key found_key; 351 struct btrfs_root *quota_root = fs_info->quota_root; 352 struct btrfs_path *path = NULL; 353 struct extent_buffer *l; 354 int slot; 355 int ret = 0; 356 u64 flags = 0; 357 u64 rescan_progress = 0; 358 359 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 360 return 0; 361 362 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 363 if (!fs_info->qgroup_ulist) { 364 ret = -ENOMEM; 365 goto out; 366 } 367 368 path = btrfs_alloc_path(); 369 if (!path) { 370 ret = -ENOMEM; 371 goto out; 372 } 373 374 ret = btrfs_sysfs_add_qgroups(fs_info); 375 if (ret < 0) 376 goto out; 377 /* default this to quota off, in case no status key is found */ 378 fs_info->qgroup_flags = 0; 379 380 /* 381 * pass 1: read status, all qgroup infos and limits 382 */ 383 key.objectid = 0; 384 key.type = 0; 385 key.offset = 0; 386 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 387 if (ret) 388 goto out; 389 390 while (1) { 391 struct btrfs_qgroup *qgroup; 392 393 slot = path->slots[0]; 394 l = path->nodes[0]; 395 btrfs_item_key_to_cpu(l, &found_key, slot); 396 397 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 398 struct btrfs_qgroup_status_item *ptr; 399 400 ptr = btrfs_item_ptr(l, slot, 401 struct btrfs_qgroup_status_item); 402 403 if (btrfs_qgroup_status_version(l, ptr) != 404 BTRFS_QGROUP_STATUS_VERSION) { 405 btrfs_err(fs_info, 406 "old qgroup version, quota disabled"); 407 goto out; 408 } 409 if (btrfs_qgroup_status_generation(l, ptr) != 410 fs_info->generation) { 411 qgroup_mark_inconsistent(fs_info); 412 btrfs_err(fs_info, 413 "qgroup generation mismatch, marked as inconsistent"); 414 } 415 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 416 ptr); 417 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 418 goto next1; 419 } 420 421 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 422 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 423 goto next1; 424 425 qgroup = find_qgroup_rb(fs_info, found_key.offset); 426 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 427 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 428 btrfs_err(fs_info, "inconsistent qgroup config"); 429 qgroup_mark_inconsistent(fs_info); 430 } 431 if (!qgroup) { 432 qgroup = add_qgroup_rb(fs_info, found_key.offset); 433 if (IS_ERR(qgroup)) { 434 ret = PTR_ERR(qgroup); 435 goto out; 436 } 437 } 438 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 439 if (ret < 0) 440 goto out; 441 442 switch (found_key.type) { 443 case BTRFS_QGROUP_INFO_KEY: { 444 struct btrfs_qgroup_info_item *ptr; 445 446 ptr = btrfs_item_ptr(l, slot, 447 struct btrfs_qgroup_info_item); 448 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 449 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 450 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 451 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 452 /* generation currently unused */ 453 break; 454 } 455 case BTRFS_QGROUP_LIMIT_KEY: { 456 struct btrfs_qgroup_limit_item *ptr; 457 458 ptr = btrfs_item_ptr(l, slot, 459 struct btrfs_qgroup_limit_item); 460 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 461 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 462 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 463 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 464 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 465 break; 466 } 467 } 468 next1: 469 ret = btrfs_next_item(quota_root, path); 470 if (ret < 0) 471 goto out; 472 if (ret) 473 break; 474 } 475 btrfs_release_path(path); 476 477 /* 478 * pass 2: read all qgroup relations 479 */ 480 key.objectid = 0; 481 key.type = BTRFS_QGROUP_RELATION_KEY; 482 key.offset = 0; 483 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 484 if (ret) 485 goto out; 486 while (1) { 487 slot = path->slots[0]; 488 l = path->nodes[0]; 489 btrfs_item_key_to_cpu(l, &found_key, slot); 490 491 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 492 goto next2; 493 494 if (found_key.objectid > found_key.offset) { 495 /* parent <- member, not needed to build config */ 496 /* FIXME should we omit the key completely? */ 497 goto next2; 498 } 499 500 ret = add_relation_rb(fs_info, found_key.objectid, 501 found_key.offset); 502 if (ret == -ENOENT) { 503 btrfs_warn(fs_info, 504 "orphan qgroup relation 0x%llx->0x%llx", 505 found_key.objectid, found_key.offset); 506 ret = 0; /* ignore the error */ 507 } 508 if (ret) 509 goto out; 510 next2: 511 ret = btrfs_next_item(quota_root, path); 512 if (ret < 0) 513 goto out; 514 if (ret) 515 break; 516 } 517 out: 518 btrfs_free_path(path); 519 fs_info->qgroup_flags |= flags; 520 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 521 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 522 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 523 ret >= 0) 524 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 525 526 if (ret < 0) { 527 ulist_free(fs_info->qgroup_ulist); 528 fs_info->qgroup_ulist = NULL; 529 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 530 btrfs_sysfs_del_qgroups(fs_info); 531 } 532 533 return ret < 0 ? ret : 0; 534 } 535 536 /* 537 * Called in close_ctree() when quota is still enabled. This verifies we don't 538 * leak some reserved space. 539 * 540 * Return false if no reserved space is left. 541 * Return true if some reserved space is leaked. 542 */ 543 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) 544 { 545 struct rb_node *node; 546 bool ret = false; 547 548 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 549 return ret; 550 /* 551 * Since we're unmounting, there is no race and no need to grab qgroup 552 * lock. And here we don't go post-order to provide a more user 553 * friendly sorted result. 554 */ 555 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 556 struct btrfs_qgroup *qgroup; 557 int i; 558 559 qgroup = rb_entry(node, struct btrfs_qgroup, node); 560 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 561 if (qgroup->rsv.values[i]) { 562 ret = true; 563 btrfs_warn(fs_info, 564 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 565 btrfs_qgroup_level(qgroup->qgroupid), 566 btrfs_qgroup_subvolid(qgroup->qgroupid), 567 i, qgroup->rsv.values[i]); 568 } 569 } 570 } 571 return ret; 572 } 573 574 /* 575 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 576 * first two are in single-threaded paths.And for the third one, we have set 577 * quota_root to be null with qgroup_lock held before, so it is safe to clean 578 * up the in-memory structures without qgroup_lock held. 579 */ 580 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 581 { 582 struct rb_node *n; 583 struct btrfs_qgroup *qgroup; 584 585 while ((n = rb_first(&fs_info->qgroup_tree))) { 586 qgroup = rb_entry(n, struct btrfs_qgroup, node); 587 rb_erase(n, &fs_info->qgroup_tree); 588 __del_qgroup_rb(fs_info, qgroup); 589 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 590 kfree(qgroup); 591 } 592 /* 593 * We call btrfs_free_qgroup_config() when unmounting 594 * filesystem and disabling quota, so we set qgroup_ulist 595 * to be null here to avoid double free. 596 */ 597 ulist_free(fs_info->qgroup_ulist); 598 fs_info->qgroup_ulist = NULL; 599 btrfs_sysfs_del_qgroups(fs_info); 600 } 601 602 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 603 u64 dst) 604 { 605 int ret; 606 struct btrfs_root *quota_root = trans->fs_info->quota_root; 607 struct btrfs_path *path; 608 struct btrfs_key key; 609 610 path = btrfs_alloc_path(); 611 if (!path) 612 return -ENOMEM; 613 614 key.objectid = src; 615 key.type = BTRFS_QGROUP_RELATION_KEY; 616 key.offset = dst; 617 618 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 619 620 btrfs_mark_buffer_dirty(path->nodes[0]); 621 622 btrfs_free_path(path); 623 return ret; 624 } 625 626 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 627 u64 dst) 628 { 629 int ret; 630 struct btrfs_root *quota_root = trans->fs_info->quota_root; 631 struct btrfs_path *path; 632 struct btrfs_key key; 633 634 path = btrfs_alloc_path(); 635 if (!path) 636 return -ENOMEM; 637 638 key.objectid = src; 639 key.type = BTRFS_QGROUP_RELATION_KEY; 640 key.offset = dst; 641 642 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 643 if (ret < 0) 644 goto out; 645 646 if (ret > 0) { 647 ret = -ENOENT; 648 goto out; 649 } 650 651 ret = btrfs_del_item(trans, quota_root, path); 652 out: 653 btrfs_free_path(path); 654 return ret; 655 } 656 657 static int add_qgroup_item(struct btrfs_trans_handle *trans, 658 struct btrfs_root *quota_root, u64 qgroupid) 659 { 660 int ret; 661 struct btrfs_path *path; 662 struct btrfs_qgroup_info_item *qgroup_info; 663 struct btrfs_qgroup_limit_item *qgroup_limit; 664 struct extent_buffer *leaf; 665 struct btrfs_key key; 666 667 if (btrfs_is_testing(quota_root->fs_info)) 668 return 0; 669 670 path = btrfs_alloc_path(); 671 if (!path) 672 return -ENOMEM; 673 674 key.objectid = 0; 675 key.type = BTRFS_QGROUP_INFO_KEY; 676 key.offset = qgroupid; 677 678 /* 679 * Avoid a transaction abort by catching -EEXIST here. In that 680 * case, we proceed by re-initializing the existing structure 681 * on disk. 682 */ 683 684 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 685 sizeof(*qgroup_info)); 686 if (ret && ret != -EEXIST) 687 goto out; 688 689 leaf = path->nodes[0]; 690 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 691 struct btrfs_qgroup_info_item); 692 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 693 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 694 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 695 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 696 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 697 698 btrfs_mark_buffer_dirty(leaf); 699 700 btrfs_release_path(path); 701 702 key.type = BTRFS_QGROUP_LIMIT_KEY; 703 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 704 sizeof(*qgroup_limit)); 705 if (ret && ret != -EEXIST) 706 goto out; 707 708 leaf = path->nodes[0]; 709 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 710 struct btrfs_qgroup_limit_item); 711 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 712 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 713 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 714 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 715 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 716 717 btrfs_mark_buffer_dirty(leaf); 718 719 ret = 0; 720 out: 721 btrfs_free_path(path); 722 return ret; 723 } 724 725 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 726 { 727 int ret; 728 struct btrfs_root *quota_root = trans->fs_info->quota_root; 729 struct btrfs_path *path; 730 struct btrfs_key key; 731 732 path = btrfs_alloc_path(); 733 if (!path) 734 return -ENOMEM; 735 736 key.objectid = 0; 737 key.type = BTRFS_QGROUP_INFO_KEY; 738 key.offset = qgroupid; 739 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 740 if (ret < 0) 741 goto out; 742 743 if (ret > 0) { 744 ret = -ENOENT; 745 goto out; 746 } 747 748 ret = btrfs_del_item(trans, quota_root, path); 749 if (ret) 750 goto out; 751 752 btrfs_release_path(path); 753 754 key.type = BTRFS_QGROUP_LIMIT_KEY; 755 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 756 if (ret < 0) 757 goto out; 758 759 if (ret > 0) { 760 ret = -ENOENT; 761 goto out; 762 } 763 764 ret = btrfs_del_item(trans, quota_root, path); 765 766 out: 767 btrfs_free_path(path); 768 return ret; 769 } 770 771 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 772 struct btrfs_qgroup *qgroup) 773 { 774 struct btrfs_root *quota_root = trans->fs_info->quota_root; 775 struct btrfs_path *path; 776 struct btrfs_key key; 777 struct extent_buffer *l; 778 struct btrfs_qgroup_limit_item *qgroup_limit; 779 int ret; 780 int slot; 781 782 key.objectid = 0; 783 key.type = BTRFS_QGROUP_LIMIT_KEY; 784 key.offset = qgroup->qgroupid; 785 786 path = btrfs_alloc_path(); 787 if (!path) 788 return -ENOMEM; 789 790 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 791 if (ret > 0) 792 ret = -ENOENT; 793 794 if (ret) 795 goto out; 796 797 l = path->nodes[0]; 798 slot = path->slots[0]; 799 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 800 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 801 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 802 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 803 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 804 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 805 806 btrfs_mark_buffer_dirty(l); 807 808 out: 809 btrfs_free_path(path); 810 return ret; 811 } 812 813 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 814 struct btrfs_qgroup *qgroup) 815 { 816 struct btrfs_fs_info *fs_info = trans->fs_info; 817 struct btrfs_root *quota_root = fs_info->quota_root; 818 struct btrfs_path *path; 819 struct btrfs_key key; 820 struct extent_buffer *l; 821 struct btrfs_qgroup_info_item *qgroup_info; 822 int ret; 823 int slot; 824 825 if (btrfs_is_testing(fs_info)) 826 return 0; 827 828 key.objectid = 0; 829 key.type = BTRFS_QGROUP_INFO_KEY; 830 key.offset = qgroup->qgroupid; 831 832 path = btrfs_alloc_path(); 833 if (!path) 834 return -ENOMEM; 835 836 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 837 if (ret > 0) 838 ret = -ENOENT; 839 840 if (ret) 841 goto out; 842 843 l = path->nodes[0]; 844 slot = path->slots[0]; 845 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 846 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 847 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 848 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 849 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 850 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 851 852 btrfs_mark_buffer_dirty(l); 853 854 out: 855 btrfs_free_path(path); 856 return ret; 857 } 858 859 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 860 { 861 struct btrfs_fs_info *fs_info = trans->fs_info; 862 struct btrfs_root *quota_root = fs_info->quota_root; 863 struct btrfs_path *path; 864 struct btrfs_key key; 865 struct extent_buffer *l; 866 struct btrfs_qgroup_status_item *ptr; 867 int ret; 868 int slot; 869 870 key.objectid = 0; 871 key.type = BTRFS_QGROUP_STATUS_KEY; 872 key.offset = 0; 873 874 path = btrfs_alloc_path(); 875 if (!path) 876 return -ENOMEM; 877 878 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 879 if (ret > 0) 880 ret = -ENOENT; 881 882 if (ret) 883 goto out; 884 885 l = path->nodes[0]; 886 slot = path->slots[0]; 887 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 888 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & 889 BTRFS_QGROUP_STATUS_FLAGS_MASK); 890 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 891 btrfs_set_qgroup_status_rescan(l, ptr, 892 fs_info->qgroup_rescan_progress.objectid); 893 894 btrfs_mark_buffer_dirty(l); 895 896 out: 897 btrfs_free_path(path); 898 return ret; 899 } 900 901 /* 902 * called with qgroup_lock held 903 */ 904 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 905 struct btrfs_root *root) 906 { 907 struct btrfs_path *path; 908 struct btrfs_key key; 909 struct extent_buffer *leaf = NULL; 910 int ret; 911 int nr = 0; 912 913 path = btrfs_alloc_path(); 914 if (!path) 915 return -ENOMEM; 916 917 key.objectid = 0; 918 key.offset = 0; 919 key.type = 0; 920 921 while (1) { 922 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 923 if (ret < 0) 924 goto out; 925 leaf = path->nodes[0]; 926 nr = btrfs_header_nritems(leaf); 927 if (!nr) 928 break; 929 /* 930 * delete the leaf one by one 931 * since the whole tree is going 932 * to be deleted. 933 */ 934 path->slots[0] = 0; 935 ret = btrfs_del_items(trans, root, path, 0, nr); 936 if (ret) 937 goto out; 938 939 btrfs_release_path(path); 940 } 941 ret = 0; 942 out: 943 btrfs_free_path(path); 944 return ret; 945 } 946 947 int btrfs_quota_enable(struct btrfs_fs_info *fs_info) 948 { 949 struct btrfs_root *quota_root; 950 struct btrfs_root *tree_root = fs_info->tree_root; 951 struct btrfs_path *path = NULL; 952 struct btrfs_qgroup_status_item *ptr; 953 struct extent_buffer *leaf; 954 struct btrfs_key key; 955 struct btrfs_key found_key; 956 struct btrfs_qgroup *qgroup = NULL; 957 struct btrfs_trans_handle *trans = NULL; 958 struct ulist *ulist = NULL; 959 int ret = 0; 960 int slot; 961 962 /* 963 * We need to have subvol_sem write locked, to prevent races between 964 * concurrent tasks trying to enable quotas, because we will unlock 965 * and relock qgroup_ioctl_lock before setting fs_info->quota_root 966 * and before setting BTRFS_FS_QUOTA_ENABLED. 967 */ 968 lockdep_assert_held_write(&fs_info->subvol_sem); 969 970 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 971 btrfs_err(fs_info, 972 "qgroups are currently unsupported in extent tree v2"); 973 return -EINVAL; 974 } 975 976 mutex_lock(&fs_info->qgroup_ioctl_lock); 977 if (fs_info->quota_root) 978 goto out; 979 980 ulist = ulist_alloc(GFP_KERNEL); 981 if (!ulist) { 982 ret = -ENOMEM; 983 goto out; 984 } 985 986 ret = btrfs_sysfs_add_qgroups(fs_info); 987 if (ret < 0) 988 goto out; 989 990 /* 991 * Unlock qgroup_ioctl_lock before starting the transaction. This is to 992 * avoid lock acquisition inversion problems (reported by lockdep) between 993 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we 994 * start a transaction. 995 * After we started the transaction lock qgroup_ioctl_lock again and 996 * check if someone else created the quota root in the meanwhile. If so, 997 * just return success and release the transaction handle. 998 * 999 * Also we don't need to worry about someone else calling 1000 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because 1001 * that function returns 0 (success) when the sysfs entries already exist. 1002 */ 1003 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1004 1005 /* 1006 * 1 for quota root item 1007 * 1 for BTRFS_QGROUP_STATUS item 1008 * 1009 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 1010 * per subvolume. However those are not currently reserved since it 1011 * would be a lot of overkill. 1012 */ 1013 trans = btrfs_start_transaction(tree_root, 2); 1014 1015 mutex_lock(&fs_info->qgroup_ioctl_lock); 1016 if (IS_ERR(trans)) { 1017 ret = PTR_ERR(trans); 1018 trans = NULL; 1019 goto out; 1020 } 1021 1022 if (fs_info->quota_root) 1023 goto out; 1024 1025 fs_info->qgroup_ulist = ulist; 1026 ulist = NULL; 1027 1028 /* 1029 * initially create the quota tree 1030 */ 1031 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 1032 if (IS_ERR(quota_root)) { 1033 ret = PTR_ERR(quota_root); 1034 btrfs_abort_transaction(trans, ret); 1035 goto out; 1036 } 1037 1038 path = btrfs_alloc_path(); 1039 if (!path) { 1040 ret = -ENOMEM; 1041 btrfs_abort_transaction(trans, ret); 1042 goto out_free_root; 1043 } 1044 1045 key.objectid = 0; 1046 key.type = BTRFS_QGROUP_STATUS_KEY; 1047 key.offset = 0; 1048 1049 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 1050 sizeof(*ptr)); 1051 if (ret) { 1052 btrfs_abort_transaction(trans, ret); 1053 goto out_free_path; 1054 } 1055 1056 leaf = path->nodes[0]; 1057 ptr = btrfs_item_ptr(leaf, path->slots[0], 1058 struct btrfs_qgroup_status_item); 1059 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1060 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1061 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 1062 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1063 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & 1064 BTRFS_QGROUP_STATUS_FLAGS_MASK); 1065 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1066 1067 btrfs_mark_buffer_dirty(leaf); 1068 1069 key.objectid = 0; 1070 key.type = BTRFS_ROOT_REF_KEY; 1071 key.offset = 0; 1072 1073 btrfs_release_path(path); 1074 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1075 if (ret > 0) 1076 goto out_add_root; 1077 if (ret < 0) { 1078 btrfs_abort_transaction(trans, ret); 1079 goto out_free_path; 1080 } 1081 1082 while (1) { 1083 slot = path->slots[0]; 1084 leaf = path->nodes[0]; 1085 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1086 1087 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1088 1089 /* Release locks on tree_root before we access quota_root */ 1090 btrfs_release_path(path); 1091 1092 ret = add_qgroup_item(trans, quota_root, 1093 found_key.offset); 1094 if (ret) { 1095 btrfs_abort_transaction(trans, ret); 1096 goto out_free_path; 1097 } 1098 1099 qgroup = add_qgroup_rb(fs_info, found_key.offset); 1100 if (IS_ERR(qgroup)) { 1101 ret = PTR_ERR(qgroup); 1102 btrfs_abort_transaction(trans, ret); 1103 goto out_free_path; 1104 } 1105 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1106 if (ret < 0) { 1107 btrfs_abort_transaction(trans, ret); 1108 goto out_free_path; 1109 } 1110 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1111 path, 1, 0); 1112 if (ret < 0) { 1113 btrfs_abort_transaction(trans, ret); 1114 goto out_free_path; 1115 } 1116 if (ret > 0) { 1117 /* 1118 * Shouldn't happen, but in case it does we 1119 * don't need to do the btrfs_next_item, just 1120 * continue. 1121 */ 1122 continue; 1123 } 1124 } 1125 ret = btrfs_next_item(tree_root, path); 1126 if (ret < 0) { 1127 btrfs_abort_transaction(trans, ret); 1128 goto out_free_path; 1129 } 1130 if (ret) 1131 break; 1132 } 1133 1134 out_add_root: 1135 btrfs_release_path(path); 1136 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1137 if (ret) { 1138 btrfs_abort_transaction(trans, ret); 1139 goto out_free_path; 1140 } 1141 1142 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); 1143 if (IS_ERR(qgroup)) { 1144 ret = PTR_ERR(qgroup); 1145 btrfs_abort_transaction(trans, ret); 1146 goto out_free_path; 1147 } 1148 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1149 if (ret < 0) { 1150 btrfs_abort_transaction(trans, ret); 1151 goto out_free_path; 1152 } 1153 1154 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1155 /* 1156 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid 1157 * a deadlock with tasks concurrently doing other qgroup operations, such 1158 * adding/removing qgroups or adding/deleting qgroup relations for example, 1159 * because all qgroup operations first start or join a transaction and then 1160 * lock the qgroup_ioctl_lock mutex. 1161 * We are safe from a concurrent task trying to enable quotas, by calling 1162 * this function, since we are serialized by fs_info->subvol_sem. 1163 */ 1164 ret = btrfs_commit_transaction(trans); 1165 trans = NULL; 1166 mutex_lock(&fs_info->qgroup_ioctl_lock); 1167 if (ret) 1168 goto out_free_path; 1169 1170 /* 1171 * Set quota enabled flag after committing the transaction, to avoid 1172 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1173 * creation. 1174 */ 1175 spin_lock(&fs_info->qgroup_lock); 1176 fs_info->quota_root = quota_root; 1177 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1178 spin_unlock(&fs_info->qgroup_lock); 1179 1180 ret = qgroup_rescan_init(fs_info, 0, 1); 1181 if (!ret) { 1182 qgroup_rescan_zero_tracking(fs_info); 1183 fs_info->qgroup_rescan_running = true; 1184 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1185 &fs_info->qgroup_rescan_work); 1186 } else { 1187 /* 1188 * We have set both BTRFS_FS_QUOTA_ENABLED and 1189 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with 1190 * -EINPROGRESS. That can happen because someone started the 1191 * rescan worker by calling quota rescan ioctl before we 1192 * attempted to initialize the rescan worker. Failure due to 1193 * quotas disabled in the meanwhile is not possible, because 1194 * we are holding a write lock on fs_info->subvol_sem, which 1195 * is also acquired when disabling quotas. 1196 * Ignore such error, and any other error would need to undo 1197 * everything we did in the transaction we just committed. 1198 */ 1199 ASSERT(ret == -EINPROGRESS); 1200 ret = 0; 1201 } 1202 1203 out_free_path: 1204 btrfs_free_path(path); 1205 out_free_root: 1206 if (ret) 1207 btrfs_put_root(quota_root); 1208 out: 1209 if (ret) { 1210 ulist_free(fs_info->qgroup_ulist); 1211 fs_info->qgroup_ulist = NULL; 1212 btrfs_sysfs_del_qgroups(fs_info); 1213 } 1214 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1215 if (ret && trans) 1216 btrfs_end_transaction(trans); 1217 else if (trans) 1218 ret = btrfs_end_transaction(trans); 1219 ulist_free(ulist); 1220 return ret; 1221 } 1222 1223 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1224 { 1225 struct btrfs_root *quota_root; 1226 struct btrfs_trans_handle *trans = NULL; 1227 int ret = 0; 1228 1229 /* 1230 * We need to have subvol_sem write locked, to prevent races between 1231 * concurrent tasks trying to disable quotas, because we will unlock 1232 * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. 1233 */ 1234 lockdep_assert_held_write(&fs_info->subvol_sem); 1235 1236 mutex_lock(&fs_info->qgroup_ioctl_lock); 1237 if (!fs_info->quota_root) 1238 goto out; 1239 1240 /* 1241 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to 1242 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs 1243 * to lock that mutex while holding a transaction handle and the rescan 1244 * worker needs to commit a transaction. 1245 */ 1246 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1247 1248 /* 1249 * Request qgroup rescan worker to complete and wait for it. This wait 1250 * must be done before transaction start for quota disable since it may 1251 * deadlock with transaction by the qgroup rescan worker. 1252 */ 1253 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1254 btrfs_qgroup_wait_for_completion(fs_info, false); 1255 1256 /* 1257 * 1 For the root item 1258 * 1259 * We should also reserve enough items for the quota tree deletion in 1260 * btrfs_clean_quota_tree but this is not done. 1261 * 1262 * Also, we must always start a transaction without holding the mutex 1263 * qgroup_ioctl_lock, see btrfs_quota_enable(). 1264 */ 1265 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1266 1267 mutex_lock(&fs_info->qgroup_ioctl_lock); 1268 if (IS_ERR(trans)) { 1269 ret = PTR_ERR(trans); 1270 trans = NULL; 1271 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1272 goto out; 1273 } 1274 1275 if (!fs_info->quota_root) 1276 goto out; 1277 1278 spin_lock(&fs_info->qgroup_lock); 1279 quota_root = fs_info->quota_root; 1280 fs_info->quota_root = NULL; 1281 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1282 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL; 1283 spin_unlock(&fs_info->qgroup_lock); 1284 1285 btrfs_free_qgroup_config(fs_info); 1286 1287 ret = btrfs_clean_quota_tree(trans, quota_root); 1288 if (ret) { 1289 btrfs_abort_transaction(trans, ret); 1290 goto out; 1291 } 1292 1293 ret = btrfs_del_root(trans, "a_root->root_key); 1294 if (ret) { 1295 btrfs_abort_transaction(trans, ret); 1296 goto out; 1297 } 1298 1299 list_del("a_root->dirty_list); 1300 1301 btrfs_tree_lock(quota_root->node); 1302 btrfs_clean_tree_block(quota_root->node); 1303 btrfs_tree_unlock(quota_root->node); 1304 btrfs_free_tree_block(trans, btrfs_root_id(quota_root), 1305 quota_root->node, 0, 1); 1306 1307 btrfs_put_root(quota_root); 1308 1309 out: 1310 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1311 if (ret && trans) 1312 btrfs_end_transaction(trans); 1313 else if (trans) 1314 ret = btrfs_end_transaction(trans); 1315 1316 return ret; 1317 } 1318 1319 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1320 struct btrfs_qgroup *qgroup) 1321 { 1322 if (list_empty(&qgroup->dirty)) 1323 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1324 } 1325 1326 /* 1327 * The easy accounting, we're updating qgroup relationship whose child qgroup 1328 * only has exclusive extents. 1329 * 1330 * In this case, all exclusive extents will also be exclusive for parent, so 1331 * excl/rfer just get added/removed. 1332 * 1333 * So is qgroup reservation space, which should also be added/removed to 1334 * parent. 1335 * Or when child tries to release reservation space, parent will underflow its 1336 * reservation (for relationship adding case). 1337 * 1338 * Caller should hold fs_info->qgroup_lock. 1339 */ 1340 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1341 struct ulist *tmp, u64 ref_root, 1342 struct btrfs_qgroup *src, int sign) 1343 { 1344 struct btrfs_qgroup *qgroup; 1345 struct btrfs_qgroup_list *glist; 1346 struct ulist_node *unode; 1347 struct ulist_iterator uiter; 1348 u64 num_bytes = src->excl; 1349 int ret = 0; 1350 1351 qgroup = find_qgroup_rb(fs_info, ref_root); 1352 if (!qgroup) 1353 goto out; 1354 1355 qgroup->rfer += sign * num_bytes; 1356 qgroup->rfer_cmpr += sign * num_bytes; 1357 1358 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1359 qgroup->excl += sign * num_bytes; 1360 qgroup->excl_cmpr += sign * num_bytes; 1361 1362 if (sign > 0) 1363 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1364 else 1365 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1366 1367 qgroup_dirty(fs_info, qgroup); 1368 1369 /* Get all of the parent groups that contain this qgroup */ 1370 list_for_each_entry(glist, &qgroup->groups, next_group) { 1371 ret = ulist_add(tmp, glist->group->qgroupid, 1372 qgroup_to_aux(glist->group), GFP_ATOMIC); 1373 if (ret < 0) 1374 goto out; 1375 } 1376 1377 /* Iterate all of the parents and adjust their reference counts */ 1378 ULIST_ITER_INIT(&uiter); 1379 while ((unode = ulist_next(tmp, &uiter))) { 1380 qgroup = unode_aux_to_qgroup(unode); 1381 qgroup->rfer += sign * num_bytes; 1382 qgroup->rfer_cmpr += sign * num_bytes; 1383 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1384 qgroup->excl += sign * num_bytes; 1385 if (sign > 0) 1386 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1387 else 1388 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1389 qgroup->excl_cmpr += sign * num_bytes; 1390 qgroup_dirty(fs_info, qgroup); 1391 1392 /* Add any parents of the parents */ 1393 list_for_each_entry(glist, &qgroup->groups, next_group) { 1394 ret = ulist_add(tmp, glist->group->qgroupid, 1395 qgroup_to_aux(glist->group), GFP_ATOMIC); 1396 if (ret < 0) 1397 goto out; 1398 } 1399 } 1400 ret = 0; 1401 out: 1402 return ret; 1403 } 1404 1405 1406 /* 1407 * Quick path for updating qgroup with only excl refs. 1408 * 1409 * In that case, just update all parent will be enough. 1410 * Or we needs to do a full rescan. 1411 * Caller should also hold fs_info->qgroup_lock. 1412 * 1413 * Return 0 for quick update, return >0 for need to full rescan 1414 * and mark INCONSISTENT flag. 1415 * Return < 0 for other error. 1416 */ 1417 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1418 struct ulist *tmp, u64 src, u64 dst, 1419 int sign) 1420 { 1421 struct btrfs_qgroup *qgroup; 1422 int ret = 1; 1423 int err = 0; 1424 1425 qgroup = find_qgroup_rb(fs_info, src); 1426 if (!qgroup) 1427 goto out; 1428 if (qgroup->excl == qgroup->rfer) { 1429 ret = 0; 1430 err = __qgroup_excl_accounting(fs_info, tmp, dst, 1431 qgroup, sign); 1432 if (err < 0) { 1433 ret = err; 1434 goto out; 1435 } 1436 } 1437 out: 1438 if (ret) 1439 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1440 return ret; 1441 } 1442 1443 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1444 u64 dst) 1445 { 1446 struct btrfs_fs_info *fs_info = trans->fs_info; 1447 struct btrfs_qgroup *parent; 1448 struct btrfs_qgroup *member; 1449 struct btrfs_qgroup_list *list; 1450 struct ulist *tmp; 1451 unsigned int nofs_flag; 1452 int ret = 0; 1453 1454 /* Check the level of src and dst first */ 1455 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1456 return -EINVAL; 1457 1458 /* We hold a transaction handle open, must do a NOFS allocation. */ 1459 nofs_flag = memalloc_nofs_save(); 1460 tmp = ulist_alloc(GFP_KERNEL); 1461 memalloc_nofs_restore(nofs_flag); 1462 if (!tmp) 1463 return -ENOMEM; 1464 1465 mutex_lock(&fs_info->qgroup_ioctl_lock); 1466 if (!fs_info->quota_root) { 1467 ret = -ENOTCONN; 1468 goto out; 1469 } 1470 member = find_qgroup_rb(fs_info, src); 1471 parent = find_qgroup_rb(fs_info, dst); 1472 if (!member || !parent) { 1473 ret = -EINVAL; 1474 goto out; 1475 } 1476 1477 /* check if such qgroup relation exist firstly */ 1478 list_for_each_entry(list, &member->groups, next_group) { 1479 if (list->group == parent) { 1480 ret = -EEXIST; 1481 goto out; 1482 } 1483 } 1484 1485 ret = add_qgroup_relation_item(trans, src, dst); 1486 if (ret) 1487 goto out; 1488 1489 ret = add_qgroup_relation_item(trans, dst, src); 1490 if (ret) { 1491 del_qgroup_relation_item(trans, src, dst); 1492 goto out; 1493 } 1494 1495 spin_lock(&fs_info->qgroup_lock); 1496 ret = __add_relation_rb(member, parent); 1497 if (ret < 0) { 1498 spin_unlock(&fs_info->qgroup_lock); 1499 goto out; 1500 } 1501 ret = quick_update_accounting(fs_info, tmp, src, dst, 1); 1502 spin_unlock(&fs_info->qgroup_lock); 1503 out: 1504 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1505 ulist_free(tmp); 1506 return ret; 1507 } 1508 1509 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1510 u64 dst) 1511 { 1512 struct btrfs_fs_info *fs_info = trans->fs_info; 1513 struct btrfs_qgroup *parent; 1514 struct btrfs_qgroup *member; 1515 struct btrfs_qgroup_list *list; 1516 struct ulist *tmp; 1517 bool found = false; 1518 unsigned int nofs_flag; 1519 int ret = 0; 1520 int ret2; 1521 1522 /* We hold a transaction handle open, must do a NOFS allocation. */ 1523 nofs_flag = memalloc_nofs_save(); 1524 tmp = ulist_alloc(GFP_KERNEL); 1525 memalloc_nofs_restore(nofs_flag); 1526 if (!tmp) 1527 return -ENOMEM; 1528 1529 if (!fs_info->quota_root) { 1530 ret = -ENOTCONN; 1531 goto out; 1532 } 1533 1534 member = find_qgroup_rb(fs_info, src); 1535 parent = find_qgroup_rb(fs_info, dst); 1536 /* 1537 * The parent/member pair doesn't exist, then try to delete the dead 1538 * relation items only. 1539 */ 1540 if (!member || !parent) 1541 goto delete_item; 1542 1543 /* check if such qgroup relation exist firstly */ 1544 list_for_each_entry(list, &member->groups, next_group) { 1545 if (list->group == parent) { 1546 found = true; 1547 break; 1548 } 1549 } 1550 1551 delete_item: 1552 ret = del_qgroup_relation_item(trans, src, dst); 1553 if (ret < 0 && ret != -ENOENT) 1554 goto out; 1555 ret2 = del_qgroup_relation_item(trans, dst, src); 1556 if (ret2 < 0 && ret2 != -ENOENT) 1557 goto out; 1558 1559 /* At least one deletion succeeded, return 0 */ 1560 if (!ret || !ret2) 1561 ret = 0; 1562 1563 if (found) { 1564 spin_lock(&fs_info->qgroup_lock); 1565 del_relation_rb(fs_info, src, dst); 1566 ret = quick_update_accounting(fs_info, tmp, src, dst, -1); 1567 spin_unlock(&fs_info->qgroup_lock); 1568 } 1569 out: 1570 ulist_free(tmp); 1571 return ret; 1572 } 1573 1574 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1575 u64 dst) 1576 { 1577 struct btrfs_fs_info *fs_info = trans->fs_info; 1578 int ret = 0; 1579 1580 mutex_lock(&fs_info->qgroup_ioctl_lock); 1581 ret = __del_qgroup_relation(trans, src, dst); 1582 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1583 1584 return ret; 1585 } 1586 1587 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1588 { 1589 struct btrfs_fs_info *fs_info = trans->fs_info; 1590 struct btrfs_root *quota_root; 1591 struct btrfs_qgroup *qgroup; 1592 int ret = 0; 1593 1594 mutex_lock(&fs_info->qgroup_ioctl_lock); 1595 if (!fs_info->quota_root) { 1596 ret = -ENOTCONN; 1597 goto out; 1598 } 1599 quota_root = fs_info->quota_root; 1600 qgroup = find_qgroup_rb(fs_info, qgroupid); 1601 if (qgroup) { 1602 ret = -EEXIST; 1603 goto out; 1604 } 1605 1606 ret = add_qgroup_item(trans, quota_root, qgroupid); 1607 if (ret) 1608 goto out; 1609 1610 spin_lock(&fs_info->qgroup_lock); 1611 qgroup = add_qgroup_rb(fs_info, qgroupid); 1612 spin_unlock(&fs_info->qgroup_lock); 1613 1614 if (IS_ERR(qgroup)) { 1615 ret = PTR_ERR(qgroup); 1616 goto out; 1617 } 1618 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1619 out: 1620 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1621 return ret; 1622 } 1623 1624 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1625 { 1626 struct btrfs_fs_info *fs_info = trans->fs_info; 1627 struct btrfs_qgroup *qgroup; 1628 struct btrfs_qgroup_list *list; 1629 int ret = 0; 1630 1631 mutex_lock(&fs_info->qgroup_ioctl_lock); 1632 if (!fs_info->quota_root) { 1633 ret = -ENOTCONN; 1634 goto out; 1635 } 1636 1637 qgroup = find_qgroup_rb(fs_info, qgroupid); 1638 if (!qgroup) { 1639 ret = -ENOENT; 1640 goto out; 1641 } 1642 1643 /* Check if there are no children of this qgroup */ 1644 if (!list_empty(&qgroup->members)) { 1645 ret = -EBUSY; 1646 goto out; 1647 } 1648 1649 ret = del_qgroup_item(trans, qgroupid); 1650 if (ret && ret != -ENOENT) 1651 goto out; 1652 1653 while (!list_empty(&qgroup->groups)) { 1654 list = list_first_entry(&qgroup->groups, 1655 struct btrfs_qgroup_list, next_group); 1656 ret = __del_qgroup_relation(trans, qgroupid, 1657 list->group->qgroupid); 1658 if (ret) 1659 goto out; 1660 } 1661 1662 spin_lock(&fs_info->qgroup_lock); 1663 del_qgroup_rb(fs_info, qgroupid); 1664 spin_unlock(&fs_info->qgroup_lock); 1665 1666 /* 1667 * Remove the qgroup from sysfs now without holding the qgroup_lock 1668 * spinlock, since the sysfs_remove_group() function needs to take 1669 * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). 1670 */ 1671 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 1672 kfree(qgroup); 1673 out: 1674 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1675 return ret; 1676 } 1677 1678 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1679 struct btrfs_qgroup_limit *limit) 1680 { 1681 struct btrfs_fs_info *fs_info = trans->fs_info; 1682 struct btrfs_qgroup *qgroup; 1683 int ret = 0; 1684 /* Sometimes we would want to clear the limit on this qgroup. 1685 * To meet this requirement, we treat the -1 as a special value 1686 * which tell kernel to clear the limit on this qgroup. 1687 */ 1688 const u64 CLEAR_VALUE = -1; 1689 1690 mutex_lock(&fs_info->qgroup_ioctl_lock); 1691 if (!fs_info->quota_root) { 1692 ret = -ENOTCONN; 1693 goto out; 1694 } 1695 1696 qgroup = find_qgroup_rb(fs_info, qgroupid); 1697 if (!qgroup) { 1698 ret = -ENOENT; 1699 goto out; 1700 } 1701 1702 spin_lock(&fs_info->qgroup_lock); 1703 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1704 if (limit->max_rfer == CLEAR_VALUE) { 1705 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1706 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1707 qgroup->max_rfer = 0; 1708 } else { 1709 qgroup->max_rfer = limit->max_rfer; 1710 } 1711 } 1712 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1713 if (limit->max_excl == CLEAR_VALUE) { 1714 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1715 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1716 qgroup->max_excl = 0; 1717 } else { 1718 qgroup->max_excl = limit->max_excl; 1719 } 1720 } 1721 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1722 if (limit->rsv_rfer == CLEAR_VALUE) { 1723 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1724 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1725 qgroup->rsv_rfer = 0; 1726 } else { 1727 qgroup->rsv_rfer = limit->rsv_rfer; 1728 } 1729 } 1730 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1731 if (limit->rsv_excl == CLEAR_VALUE) { 1732 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1733 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1734 qgroup->rsv_excl = 0; 1735 } else { 1736 qgroup->rsv_excl = limit->rsv_excl; 1737 } 1738 } 1739 qgroup->lim_flags |= limit->flags; 1740 1741 spin_unlock(&fs_info->qgroup_lock); 1742 1743 ret = update_qgroup_limit_item(trans, qgroup); 1744 if (ret) { 1745 qgroup_mark_inconsistent(fs_info); 1746 btrfs_info(fs_info, "unable to update quota limit for %llu", 1747 qgroupid); 1748 } 1749 1750 out: 1751 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1752 return ret; 1753 } 1754 1755 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 1756 struct btrfs_delayed_ref_root *delayed_refs, 1757 struct btrfs_qgroup_extent_record *record) 1758 { 1759 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1760 struct rb_node *parent_node = NULL; 1761 struct btrfs_qgroup_extent_record *entry; 1762 u64 bytenr = record->bytenr; 1763 1764 lockdep_assert_held(&delayed_refs->lock); 1765 trace_btrfs_qgroup_trace_extent(fs_info, record); 1766 1767 while (*p) { 1768 parent_node = *p; 1769 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1770 node); 1771 if (bytenr < entry->bytenr) { 1772 p = &(*p)->rb_left; 1773 } else if (bytenr > entry->bytenr) { 1774 p = &(*p)->rb_right; 1775 } else { 1776 if (record->data_rsv && !entry->data_rsv) { 1777 entry->data_rsv = record->data_rsv; 1778 entry->data_rsv_refroot = 1779 record->data_rsv_refroot; 1780 } 1781 return 1; 1782 } 1783 } 1784 1785 rb_link_node(&record->node, parent_node, p); 1786 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1787 return 0; 1788 } 1789 1790 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, 1791 struct btrfs_qgroup_extent_record *qrecord) 1792 { 1793 struct ulist *old_root; 1794 u64 bytenr = qrecord->bytenr; 1795 int ret; 1796 1797 /* 1798 * We are always called in a context where we are already holding a 1799 * transaction handle. Often we are called when adding a data delayed 1800 * reference from btrfs_truncate_inode_items() (truncating or unlinking), 1801 * in which case we will be holding a write lock on extent buffer from a 1802 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to 1803 * acquire fs_info->commit_root_sem, because that is a higher level lock 1804 * that must be acquired before locking any extent buffers. 1805 * 1806 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem 1807 * but we can't pass it a non-NULL transaction handle, because otherwise 1808 * it would not use commit roots and would lock extent buffers, causing 1809 * a deadlock if it ends up trying to read lock the same extent buffer 1810 * that was previously write locked at btrfs_truncate_inode_items(). 1811 * 1812 * So pass a NULL transaction handle to btrfs_find_all_roots() and 1813 * explicitly tell it to not acquire the commit_root_sem - if we are 1814 * holding a transaction handle we don't need its protection. 1815 */ 1816 ASSERT(trans != NULL); 1817 1818 if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 1819 return 0; 1820 1821 ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root, 1822 true); 1823 if (ret < 0) { 1824 qgroup_mark_inconsistent(trans->fs_info); 1825 btrfs_warn(trans->fs_info, 1826 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 1827 ret); 1828 return 0; 1829 } 1830 1831 /* 1832 * Here we don't need to get the lock of 1833 * trans->transaction->delayed_refs, since inserted qrecord won't 1834 * be deleted, only qrecord->node may be modified (new qrecord insert) 1835 * 1836 * So modifying qrecord->old_roots is safe here 1837 */ 1838 qrecord->old_roots = old_root; 1839 return 0; 1840 } 1841 1842 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 1843 u64 num_bytes, gfp_t gfp_flag) 1844 { 1845 struct btrfs_fs_info *fs_info = trans->fs_info; 1846 struct btrfs_qgroup_extent_record *record; 1847 struct btrfs_delayed_ref_root *delayed_refs; 1848 int ret; 1849 1850 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) 1851 || bytenr == 0 || num_bytes == 0) 1852 return 0; 1853 record = kzalloc(sizeof(*record), gfp_flag); 1854 if (!record) 1855 return -ENOMEM; 1856 1857 delayed_refs = &trans->transaction->delayed_refs; 1858 record->bytenr = bytenr; 1859 record->num_bytes = num_bytes; 1860 record->old_roots = NULL; 1861 1862 spin_lock(&delayed_refs->lock); 1863 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 1864 spin_unlock(&delayed_refs->lock); 1865 if (ret > 0) { 1866 kfree(record); 1867 return 0; 1868 } 1869 return btrfs_qgroup_trace_extent_post(trans, record); 1870 } 1871 1872 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 1873 struct extent_buffer *eb) 1874 { 1875 struct btrfs_fs_info *fs_info = trans->fs_info; 1876 int nr = btrfs_header_nritems(eb); 1877 int i, extent_type, ret; 1878 struct btrfs_key key; 1879 struct btrfs_file_extent_item *fi; 1880 u64 bytenr, num_bytes; 1881 1882 /* We can be called directly from walk_up_proc() */ 1883 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1884 return 0; 1885 1886 for (i = 0; i < nr; i++) { 1887 btrfs_item_key_to_cpu(eb, &key, i); 1888 1889 if (key.type != BTRFS_EXTENT_DATA_KEY) 1890 continue; 1891 1892 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 1893 /* filter out non qgroup-accountable extents */ 1894 extent_type = btrfs_file_extent_type(eb, fi); 1895 1896 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1897 continue; 1898 1899 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 1900 if (!bytenr) 1901 continue; 1902 1903 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 1904 1905 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes, 1906 GFP_NOFS); 1907 if (ret) 1908 return ret; 1909 } 1910 cond_resched(); 1911 return 0; 1912 } 1913 1914 /* 1915 * Walk up the tree from the bottom, freeing leaves and any interior 1916 * nodes which have had all slots visited. If a node (leaf or 1917 * interior) is freed, the node above it will have it's slot 1918 * incremented. The root node will never be freed. 1919 * 1920 * At the end of this function, we should have a path which has all 1921 * slots incremented to the next position for a search. If we need to 1922 * read a new node it will be NULL and the node above it will have the 1923 * correct slot selected for a later read. 1924 * 1925 * If we increment the root nodes slot counter past the number of 1926 * elements, 1 is returned to signal completion of the search. 1927 */ 1928 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 1929 { 1930 int level = 0; 1931 int nr, slot; 1932 struct extent_buffer *eb; 1933 1934 if (root_level == 0) 1935 return 1; 1936 1937 while (level <= root_level) { 1938 eb = path->nodes[level]; 1939 nr = btrfs_header_nritems(eb); 1940 path->slots[level]++; 1941 slot = path->slots[level]; 1942 if (slot >= nr || level == 0) { 1943 /* 1944 * Don't free the root - we will detect this 1945 * condition after our loop and return a 1946 * positive value for caller to stop walking the tree. 1947 */ 1948 if (level != root_level) { 1949 btrfs_tree_unlock_rw(eb, path->locks[level]); 1950 path->locks[level] = 0; 1951 1952 free_extent_buffer(eb); 1953 path->nodes[level] = NULL; 1954 path->slots[level] = 0; 1955 } 1956 } else { 1957 /* 1958 * We have a valid slot to walk back down 1959 * from. Stop here so caller can process these 1960 * new nodes. 1961 */ 1962 break; 1963 } 1964 1965 level++; 1966 } 1967 1968 eb = path->nodes[root_level]; 1969 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 1970 return 1; 1971 1972 return 0; 1973 } 1974 1975 /* 1976 * Helper function to trace a subtree tree block swap. 1977 * 1978 * The swap will happen in highest tree block, but there may be a lot of 1979 * tree blocks involved. 1980 * 1981 * For example: 1982 * OO = Old tree blocks 1983 * NN = New tree blocks allocated during balance 1984 * 1985 * File tree (257) Reloc tree for 257 1986 * L2 OO NN 1987 * / \ / \ 1988 * L1 OO OO (a) OO NN (a) 1989 * / \ / \ / \ / \ 1990 * L0 OO OO OO OO OO OO NN NN 1991 * (b) (c) (b) (c) 1992 * 1993 * When calling qgroup_trace_extent_swap(), we will pass: 1994 * @src_eb = OO(a) 1995 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 1996 * @dst_level = 0 1997 * @root_level = 1 1998 * 1999 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 2000 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 2001 * 2002 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 2003 * 2004 * 1) Tree search from @src_eb 2005 * It should acts as a simplified btrfs_search_slot(). 2006 * The key for search can be extracted from @dst_path->nodes[dst_level] 2007 * (first key). 2008 * 2009 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 2010 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 2011 * They should be marked during previous (@dst_level = 1) iteration. 2012 * 2013 * 3) Mark file extents in leaves dirty 2014 * We don't have good way to pick out new file extents only. 2015 * So we still follow the old method by scanning all file extents in 2016 * the leave. 2017 * 2018 * This function can free us from keeping two paths, thus later we only need 2019 * to care about how to iterate all new tree blocks in reloc tree. 2020 */ 2021 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 2022 struct extent_buffer *src_eb, 2023 struct btrfs_path *dst_path, 2024 int dst_level, int root_level, 2025 bool trace_leaf) 2026 { 2027 struct btrfs_key key; 2028 struct btrfs_path *src_path; 2029 struct btrfs_fs_info *fs_info = trans->fs_info; 2030 u32 nodesize = fs_info->nodesize; 2031 int cur_level = root_level; 2032 int ret; 2033 2034 BUG_ON(dst_level > root_level); 2035 /* Level mismatch */ 2036 if (btrfs_header_level(src_eb) != root_level) 2037 return -EINVAL; 2038 2039 src_path = btrfs_alloc_path(); 2040 if (!src_path) { 2041 ret = -ENOMEM; 2042 goto out; 2043 } 2044 2045 if (dst_level) 2046 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2047 else 2048 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2049 2050 /* For src_path */ 2051 atomic_inc(&src_eb->refs); 2052 src_path->nodes[root_level] = src_eb; 2053 src_path->slots[root_level] = dst_path->slots[root_level]; 2054 src_path->locks[root_level] = 0; 2055 2056 /* A simplified version of btrfs_search_slot() */ 2057 while (cur_level >= dst_level) { 2058 struct btrfs_key src_key; 2059 struct btrfs_key dst_key; 2060 2061 if (src_path->nodes[cur_level] == NULL) { 2062 struct extent_buffer *eb; 2063 int parent_slot; 2064 2065 eb = src_path->nodes[cur_level + 1]; 2066 parent_slot = src_path->slots[cur_level + 1]; 2067 2068 eb = btrfs_read_node_slot(eb, parent_slot); 2069 if (IS_ERR(eb)) { 2070 ret = PTR_ERR(eb); 2071 goto out; 2072 } 2073 2074 src_path->nodes[cur_level] = eb; 2075 2076 btrfs_tree_read_lock(eb); 2077 src_path->locks[cur_level] = BTRFS_READ_LOCK; 2078 } 2079 2080 src_path->slots[cur_level] = dst_path->slots[cur_level]; 2081 if (cur_level) { 2082 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 2083 &dst_key, dst_path->slots[cur_level]); 2084 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 2085 &src_key, src_path->slots[cur_level]); 2086 } else { 2087 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 2088 &dst_key, dst_path->slots[cur_level]); 2089 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 2090 &src_key, src_path->slots[cur_level]); 2091 } 2092 /* Content mismatch, something went wrong */ 2093 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { 2094 ret = -ENOENT; 2095 goto out; 2096 } 2097 cur_level--; 2098 } 2099 2100 /* 2101 * Now both @dst_path and @src_path have been populated, record the tree 2102 * blocks for qgroup accounting. 2103 */ 2104 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 2105 nodesize, GFP_NOFS); 2106 if (ret < 0) 2107 goto out; 2108 ret = btrfs_qgroup_trace_extent(trans, 2109 dst_path->nodes[dst_level]->start, 2110 nodesize, GFP_NOFS); 2111 if (ret < 0) 2112 goto out; 2113 2114 /* Record leaf file extents */ 2115 if (dst_level == 0 && trace_leaf) { 2116 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 2117 if (ret < 0) 2118 goto out; 2119 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 2120 } 2121 out: 2122 btrfs_free_path(src_path); 2123 return ret; 2124 } 2125 2126 /* 2127 * Helper function to do recursive generation-aware depth-first search, to 2128 * locate all new tree blocks in a subtree of reloc tree. 2129 * 2130 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 2131 * reloc tree 2132 * L2 NN (a) 2133 * / \ 2134 * L1 OO NN (b) 2135 * / \ / \ 2136 * L0 OO OO OO NN 2137 * (c) (d) 2138 * If we pass: 2139 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 2140 * @cur_level = 1 2141 * @root_level = 1 2142 * 2143 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 2144 * above tree blocks along with their counter parts in file tree. 2145 * While during search, old tree blocks OO(c) will be skipped as tree block swap 2146 * won't affect OO(c). 2147 */ 2148 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 2149 struct extent_buffer *src_eb, 2150 struct btrfs_path *dst_path, 2151 int cur_level, int root_level, 2152 u64 last_snapshot, bool trace_leaf) 2153 { 2154 struct btrfs_fs_info *fs_info = trans->fs_info; 2155 struct extent_buffer *eb; 2156 bool need_cleanup = false; 2157 int ret = 0; 2158 int i; 2159 2160 /* Level sanity check */ 2161 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2162 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2163 root_level < cur_level) { 2164 btrfs_err_rl(fs_info, 2165 "%s: bad levels, cur_level=%d root_level=%d", 2166 __func__, cur_level, root_level); 2167 return -EUCLEAN; 2168 } 2169 2170 /* Read the tree block if needed */ 2171 if (dst_path->nodes[cur_level] == NULL) { 2172 int parent_slot; 2173 u64 child_gen; 2174 2175 /* 2176 * dst_path->nodes[root_level] must be initialized before 2177 * calling this function. 2178 */ 2179 if (cur_level == root_level) { 2180 btrfs_err_rl(fs_info, 2181 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2182 __func__, root_level, root_level, cur_level); 2183 return -EUCLEAN; 2184 } 2185 2186 /* 2187 * We need to get child blockptr/gen from parent before we can 2188 * read it. 2189 */ 2190 eb = dst_path->nodes[cur_level + 1]; 2191 parent_slot = dst_path->slots[cur_level + 1]; 2192 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2193 2194 /* This node is old, no need to trace */ 2195 if (child_gen < last_snapshot) 2196 goto out; 2197 2198 eb = btrfs_read_node_slot(eb, parent_slot); 2199 if (IS_ERR(eb)) { 2200 ret = PTR_ERR(eb); 2201 goto out; 2202 } 2203 2204 dst_path->nodes[cur_level] = eb; 2205 dst_path->slots[cur_level] = 0; 2206 2207 btrfs_tree_read_lock(eb); 2208 dst_path->locks[cur_level] = BTRFS_READ_LOCK; 2209 need_cleanup = true; 2210 } 2211 2212 /* Now record this tree block and its counter part for qgroups */ 2213 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2214 root_level, trace_leaf); 2215 if (ret < 0) 2216 goto cleanup; 2217 2218 eb = dst_path->nodes[cur_level]; 2219 2220 if (cur_level > 0) { 2221 /* Iterate all child tree blocks */ 2222 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2223 /* Skip old tree blocks as they won't be swapped */ 2224 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2225 continue; 2226 dst_path->slots[cur_level] = i; 2227 2228 /* Recursive call (at most 7 times) */ 2229 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2230 dst_path, cur_level - 1, root_level, 2231 last_snapshot, trace_leaf); 2232 if (ret < 0) 2233 goto cleanup; 2234 } 2235 } 2236 2237 cleanup: 2238 if (need_cleanup) { 2239 /* Clean up */ 2240 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2241 dst_path->locks[cur_level]); 2242 free_extent_buffer(dst_path->nodes[cur_level]); 2243 dst_path->nodes[cur_level] = NULL; 2244 dst_path->slots[cur_level] = 0; 2245 dst_path->locks[cur_level] = 0; 2246 } 2247 out: 2248 return ret; 2249 } 2250 2251 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2252 struct extent_buffer *src_eb, 2253 struct extent_buffer *dst_eb, 2254 u64 last_snapshot, bool trace_leaf) 2255 { 2256 struct btrfs_fs_info *fs_info = trans->fs_info; 2257 struct btrfs_path *dst_path = NULL; 2258 int level; 2259 int ret; 2260 2261 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2262 return 0; 2263 2264 /* Wrong parameter order */ 2265 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { 2266 btrfs_err_rl(fs_info, 2267 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2268 btrfs_header_generation(src_eb), 2269 btrfs_header_generation(dst_eb)); 2270 return -EUCLEAN; 2271 } 2272 2273 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2274 ret = -EIO; 2275 goto out; 2276 } 2277 2278 level = btrfs_header_level(dst_eb); 2279 dst_path = btrfs_alloc_path(); 2280 if (!dst_path) { 2281 ret = -ENOMEM; 2282 goto out; 2283 } 2284 /* For dst_path */ 2285 atomic_inc(&dst_eb->refs); 2286 dst_path->nodes[level] = dst_eb; 2287 dst_path->slots[level] = 0; 2288 dst_path->locks[level] = 0; 2289 2290 /* Do the generation aware breadth-first search */ 2291 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2292 level, last_snapshot, trace_leaf); 2293 if (ret < 0) 2294 goto out; 2295 ret = 0; 2296 2297 out: 2298 btrfs_free_path(dst_path); 2299 if (ret < 0) 2300 qgroup_mark_inconsistent(fs_info); 2301 return ret; 2302 } 2303 2304 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2305 struct extent_buffer *root_eb, 2306 u64 root_gen, int root_level) 2307 { 2308 struct btrfs_fs_info *fs_info = trans->fs_info; 2309 int ret = 0; 2310 int level; 2311 u8 drop_subptree_thres; 2312 struct extent_buffer *eb = root_eb; 2313 struct btrfs_path *path = NULL; 2314 2315 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); 2316 BUG_ON(root_eb == NULL); 2317 2318 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2319 return 0; 2320 2321 spin_lock(&fs_info->qgroup_lock); 2322 drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; 2323 spin_unlock(&fs_info->qgroup_lock); 2324 2325 /* 2326 * This function only gets called for snapshot drop, if we hit a high 2327 * node here, it means we are going to change ownership for quite a lot 2328 * of extents, which will greatly slow down btrfs_commit_transaction(). 2329 * 2330 * So here if we find a high tree here, we just skip the accounting and 2331 * mark qgroup inconsistent. 2332 */ 2333 if (root_level >= drop_subptree_thres) { 2334 qgroup_mark_inconsistent(fs_info); 2335 return 0; 2336 } 2337 2338 if (!extent_buffer_uptodate(root_eb)) { 2339 ret = btrfs_read_extent_buffer(root_eb, root_gen, root_level, NULL); 2340 if (ret) 2341 goto out; 2342 } 2343 2344 if (root_level == 0) { 2345 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); 2346 goto out; 2347 } 2348 2349 path = btrfs_alloc_path(); 2350 if (!path) 2351 return -ENOMEM; 2352 2353 /* 2354 * Walk down the tree. Missing extent blocks are filled in as 2355 * we go. Metadata is accounted every time we read a new 2356 * extent block. 2357 * 2358 * When we reach a leaf, we account for file extent items in it, 2359 * walk back up the tree (adjusting slot pointers as we go) 2360 * and restart the search process. 2361 */ 2362 atomic_inc(&root_eb->refs); /* For path */ 2363 path->nodes[root_level] = root_eb; 2364 path->slots[root_level] = 0; 2365 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2366 walk_down: 2367 level = root_level; 2368 while (level >= 0) { 2369 if (path->nodes[level] == NULL) { 2370 int parent_slot; 2371 u64 child_bytenr; 2372 2373 /* 2374 * We need to get child blockptr from parent before we 2375 * can read it. 2376 */ 2377 eb = path->nodes[level + 1]; 2378 parent_slot = path->slots[level + 1]; 2379 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2380 2381 eb = btrfs_read_node_slot(eb, parent_slot); 2382 if (IS_ERR(eb)) { 2383 ret = PTR_ERR(eb); 2384 goto out; 2385 } 2386 2387 path->nodes[level] = eb; 2388 path->slots[level] = 0; 2389 2390 btrfs_tree_read_lock(eb); 2391 path->locks[level] = BTRFS_READ_LOCK; 2392 2393 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2394 fs_info->nodesize, 2395 GFP_NOFS); 2396 if (ret) 2397 goto out; 2398 } 2399 2400 if (level == 0) { 2401 ret = btrfs_qgroup_trace_leaf_items(trans, 2402 path->nodes[level]); 2403 if (ret) 2404 goto out; 2405 2406 /* Nonzero return here means we completed our search */ 2407 ret = adjust_slots_upwards(path, root_level); 2408 if (ret) 2409 break; 2410 2411 /* Restart search with new slots */ 2412 goto walk_down; 2413 } 2414 2415 level--; 2416 } 2417 2418 ret = 0; 2419 out: 2420 btrfs_free_path(path); 2421 2422 return ret; 2423 } 2424 2425 #define UPDATE_NEW 0 2426 #define UPDATE_OLD 1 2427 /* 2428 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2429 */ 2430 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2431 struct ulist *roots, struct ulist *tmp, 2432 struct ulist *qgroups, u64 seq, int update_old) 2433 { 2434 struct ulist_node *unode; 2435 struct ulist_iterator uiter; 2436 struct ulist_node *tmp_unode; 2437 struct ulist_iterator tmp_uiter; 2438 struct btrfs_qgroup *qg; 2439 int ret = 0; 2440 2441 if (!roots) 2442 return 0; 2443 ULIST_ITER_INIT(&uiter); 2444 while ((unode = ulist_next(roots, &uiter))) { 2445 qg = find_qgroup_rb(fs_info, unode->val); 2446 if (!qg) 2447 continue; 2448 2449 ulist_reinit(tmp); 2450 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), 2451 GFP_ATOMIC); 2452 if (ret < 0) 2453 return ret; 2454 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); 2455 if (ret < 0) 2456 return ret; 2457 ULIST_ITER_INIT(&tmp_uiter); 2458 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 2459 struct btrfs_qgroup_list *glist; 2460 2461 qg = unode_aux_to_qgroup(tmp_unode); 2462 if (update_old) 2463 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2464 else 2465 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2466 list_for_each_entry(glist, &qg->groups, next_group) { 2467 ret = ulist_add(qgroups, glist->group->qgroupid, 2468 qgroup_to_aux(glist->group), 2469 GFP_ATOMIC); 2470 if (ret < 0) 2471 return ret; 2472 ret = ulist_add(tmp, glist->group->qgroupid, 2473 qgroup_to_aux(glist->group), 2474 GFP_ATOMIC); 2475 if (ret < 0) 2476 return ret; 2477 } 2478 } 2479 } 2480 return 0; 2481 } 2482 2483 /* 2484 * Update qgroup rfer/excl counters. 2485 * Rfer update is easy, codes can explain themselves. 2486 * 2487 * Excl update is tricky, the update is split into 2 parts. 2488 * Part 1: Possible exclusive <-> sharing detect: 2489 * | A | !A | 2490 * ------------------------------------- 2491 * B | * | - | 2492 * ------------------------------------- 2493 * !B | + | ** | 2494 * ------------------------------------- 2495 * 2496 * Conditions: 2497 * A: cur_old_roots < nr_old_roots (not exclusive before) 2498 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2499 * B: cur_new_roots < nr_new_roots (not exclusive now) 2500 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2501 * 2502 * Results: 2503 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2504 * *: Definitely not changed. **: Possible unchanged. 2505 * 2506 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2507 * 2508 * To make the logic clear, we first use condition A and B to split 2509 * combination into 4 results. 2510 * 2511 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2512 * only on variant maybe 0. 2513 * 2514 * Lastly, check result **, since there are 2 variants maybe 0, split them 2515 * again(2x2). 2516 * But this time we don't need to consider other things, the codes and logic 2517 * is easy to understand now. 2518 */ 2519 static int qgroup_update_counters(struct btrfs_fs_info *fs_info, 2520 struct ulist *qgroups, 2521 u64 nr_old_roots, 2522 u64 nr_new_roots, 2523 u64 num_bytes, u64 seq) 2524 { 2525 struct ulist_node *unode; 2526 struct ulist_iterator uiter; 2527 struct btrfs_qgroup *qg; 2528 u64 cur_new_count, cur_old_count; 2529 2530 ULIST_ITER_INIT(&uiter); 2531 while ((unode = ulist_next(qgroups, &uiter))) { 2532 bool dirty = false; 2533 2534 qg = unode_aux_to_qgroup(unode); 2535 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2536 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2537 2538 trace_qgroup_update_counters(fs_info, qg, cur_old_count, 2539 cur_new_count); 2540 2541 /* Rfer update part */ 2542 if (cur_old_count == 0 && cur_new_count > 0) { 2543 qg->rfer += num_bytes; 2544 qg->rfer_cmpr += num_bytes; 2545 dirty = true; 2546 } 2547 if (cur_old_count > 0 && cur_new_count == 0) { 2548 qg->rfer -= num_bytes; 2549 qg->rfer_cmpr -= num_bytes; 2550 dirty = true; 2551 } 2552 2553 /* Excl update part */ 2554 /* Exclusive/none -> shared case */ 2555 if (cur_old_count == nr_old_roots && 2556 cur_new_count < nr_new_roots) { 2557 /* Exclusive -> shared */ 2558 if (cur_old_count != 0) { 2559 qg->excl -= num_bytes; 2560 qg->excl_cmpr -= num_bytes; 2561 dirty = true; 2562 } 2563 } 2564 2565 /* Shared -> exclusive/none case */ 2566 if (cur_old_count < nr_old_roots && 2567 cur_new_count == nr_new_roots) { 2568 /* Shared->exclusive */ 2569 if (cur_new_count != 0) { 2570 qg->excl += num_bytes; 2571 qg->excl_cmpr += num_bytes; 2572 dirty = true; 2573 } 2574 } 2575 2576 /* Exclusive/none -> exclusive/none case */ 2577 if (cur_old_count == nr_old_roots && 2578 cur_new_count == nr_new_roots) { 2579 if (cur_old_count == 0) { 2580 /* None -> exclusive/none */ 2581 2582 if (cur_new_count != 0) { 2583 /* None -> exclusive */ 2584 qg->excl += num_bytes; 2585 qg->excl_cmpr += num_bytes; 2586 dirty = true; 2587 } 2588 /* None -> none, nothing changed */ 2589 } else { 2590 /* Exclusive -> exclusive/none */ 2591 2592 if (cur_new_count == 0) { 2593 /* Exclusive -> none */ 2594 qg->excl -= num_bytes; 2595 qg->excl_cmpr -= num_bytes; 2596 dirty = true; 2597 } 2598 /* Exclusive -> exclusive, nothing changed */ 2599 } 2600 } 2601 2602 if (dirty) 2603 qgroup_dirty(fs_info, qg); 2604 } 2605 return 0; 2606 } 2607 2608 /* 2609 * Check if the @roots potentially is a list of fs tree roots 2610 * 2611 * Return 0 for definitely not a fs/subvol tree roots ulist 2612 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2613 * one as well) 2614 */ 2615 static int maybe_fs_roots(struct ulist *roots) 2616 { 2617 struct ulist_node *unode; 2618 struct ulist_iterator uiter; 2619 2620 /* Empty one, still possible for fs roots */ 2621 if (!roots || roots->nnodes == 0) 2622 return 1; 2623 2624 ULIST_ITER_INIT(&uiter); 2625 unode = ulist_next(roots, &uiter); 2626 if (!unode) 2627 return 1; 2628 2629 /* 2630 * If it contains fs tree roots, then it must belong to fs/subvol 2631 * trees. 2632 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2633 */ 2634 return is_fstree(unode->val); 2635 } 2636 2637 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2638 u64 num_bytes, struct ulist *old_roots, 2639 struct ulist *new_roots) 2640 { 2641 struct btrfs_fs_info *fs_info = trans->fs_info; 2642 struct ulist *qgroups = NULL; 2643 struct ulist *tmp = NULL; 2644 u64 seq; 2645 u64 nr_new_roots = 0; 2646 u64 nr_old_roots = 0; 2647 int ret = 0; 2648 2649 /* 2650 * If quotas get disabled meanwhile, the resources need to be freed and 2651 * we can't just exit here. 2652 */ 2653 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2654 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2655 goto out_free; 2656 2657 if (new_roots) { 2658 if (!maybe_fs_roots(new_roots)) 2659 goto out_free; 2660 nr_new_roots = new_roots->nnodes; 2661 } 2662 if (old_roots) { 2663 if (!maybe_fs_roots(old_roots)) 2664 goto out_free; 2665 nr_old_roots = old_roots->nnodes; 2666 } 2667 2668 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2669 if (nr_old_roots == 0 && nr_new_roots == 0) 2670 goto out_free; 2671 2672 BUG_ON(!fs_info->quota_root); 2673 2674 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2675 num_bytes, nr_old_roots, nr_new_roots); 2676 2677 qgroups = ulist_alloc(GFP_NOFS); 2678 if (!qgroups) { 2679 ret = -ENOMEM; 2680 goto out_free; 2681 } 2682 tmp = ulist_alloc(GFP_NOFS); 2683 if (!tmp) { 2684 ret = -ENOMEM; 2685 goto out_free; 2686 } 2687 2688 mutex_lock(&fs_info->qgroup_rescan_lock); 2689 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2690 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2691 mutex_unlock(&fs_info->qgroup_rescan_lock); 2692 ret = 0; 2693 goto out_free; 2694 } 2695 } 2696 mutex_unlock(&fs_info->qgroup_rescan_lock); 2697 2698 spin_lock(&fs_info->qgroup_lock); 2699 seq = fs_info->qgroup_seq; 2700 2701 /* Update old refcnts using old_roots */ 2702 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, 2703 UPDATE_OLD); 2704 if (ret < 0) 2705 goto out; 2706 2707 /* Update new refcnts using new_roots */ 2708 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, 2709 UPDATE_NEW); 2710 if (ret < 0) 2711 goto out; 2712 2713 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, 2714 num_bytes, seq); 2715 2716 /* 2717 * Bump qgroup_seq to avoid seq overlap 2718 */ 2719 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2720 out: 2721 spin_unlock(&fs_info->qgroup_lock); 2722 out_free: 2723 ulist_free(tmp); 2724 ulist_free(qgroups); 2725 ulist_free(old_roots); 2726 ulist_free(new_roots); 2727 return ret; 2728 } 2729 2730 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 2731 { 2732 struct btrfs_fs_info *fs_info = trans->fs_info; 2733 struct btrfs_qgroup_extent_record *record; 2734 struct btrfs_delayed_ref_root *delayed_refs; 2735 struct ulist *new_roots = NULL; 2736 struct rb_node *node; 2737 u64 num_dirty_extents = 0; 2738 u64 qgroup_to_skip; 2739 int ret = 0; 2740 2741 delayed_refs = &trans->transaction->delayed_refs; 2742 qgroup_to_skip = delayed_refs->qgroup_to_skip; 2743 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 2744 record = rb_entry(node, struct btrfs_qgroup_extent_record, 2745 node); 2746 2747 num_dirty_extents++; 2748 trace_btrfs_qgroup_account_extents(fs_info, record); 2749 2750 if (!ret && !(fs_info->qgroup_flags & 2751 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { 2752 /* 2753 * Old roots should be searched when inserting qgroup 2754 * extent record 2755 */ 2756 if (WARN_ON(!record->old_roots)) { 2757 /* Search commit root to find old_roots */ 2758 ret = btrfs_find_all_roots(NULL, fs_info, 2759 record->bytenr, 0, 2760 &record->old_roots, false); 2761 if (ret < 0) 2762 goto cleanup; 2763 } 2764 2765 /* Free the reserved data space */ 2766 btrfs_qgroup_free_refroot(fs_info, 2767 record->data_rsv_refroot, 2768 record->data_rsv, 2769 BTRFS_QGROUP_RSV_DATA); 2770 /* 2771 * Use BTRFS_SEQ_LAST as time_seq to do special search, 2772 * which doesn't lock tree or delayed_refs and search 2773 * current root. It's safe inside commit_transaction(). 2774 */ 2775 ret = btrfs_find_all_roots(trans, fs_info, 2776 record->bytenr, BTRFS_SEQ_LAST, &new_roots, false); 2777 if (ret < 0) 2778 goto cleanup; 2779 if (qgroup_to_skip) { 2780 ulist_del(new_roots, qgroup_to_skip, 0); 2781 ulist_del(record->old_roots, qgroup_to_skip, 2782 0); 2783 } 2784 ret = btrfs_qgroup_account_extent(trans, record->bytenr, 2785 record->num_bytes, 2786 record->old_roots, 2787 new_roots); 2788 record->old_roots = NULL; 2789 new_roots = NULL; 2790 } 2791 cleanup: 2792 ulist_free(record->old_roots); 2793 ulist_free(new_roots); 2794 new_roots = NULL; 2795 rb_erase(node, &delayed_refs->dirty_extent_root); 2796 kfree(record); 2797 2798 } 2799 trace_qgroup_num_dirty_extents(fs_info, trans->transid, 2800 num_dirty_extents); 2801 return ret; 2802 } 2803 2804 /* 2805 * called from commit_transaction. Writes all changed qgroups to disk. 2806 */ 2807 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 2808 { 2809 struct btrfs_fs_info *fs_info = trans->fs_info; 2810 int ret = 0; 2811 2812 if (!fs_info->quota_root) 2813 return ret; 2814 2815 spin_lock(&fs_info->qgroup_lock); 2816 while (!list_empty(&fs_info->dirty_qgroups)) { 2817 struct btrfs_qgroup *qgroup; 2818 qgroup = list_first_entry(&fs_info->dirty_qgroups, 2819 struct btrfs_qgroup, dirty); 2820 list_del_init(&qgroup->dirty); 2821 spin_unlock(&fs_info->qgroup_lock); 2822 ret = update_qgroup_info_item(trans, qgroup); 2823 if (ret) 2824 qgroup_mark_inconsistent(fs_info); 2825 ret = update_qgroup_limit_item(trans, qgroup); 2826 if (ret) 2827 qgroup_mark_inconsistent(fs_info); 2828 spin_lock(&fs_info->qgroup_lock); 2829 } 2830 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2831 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 2832 else 2833 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 2834 spin_unlock(&fs_info->qgroup_lock); 2835 2836 ret = update_qgroup_status_item(trans); 2837 if (ret) 2838 qgroup_mark_inconsistent(fs_info); 2839 2840 return ret; 2841 } 2842 2843 /* 2844 * Copy the accounting information between qgroups. This is necessary 2845 * when a snapshot or a subvolume is created. Throwing an error will 2846 * cause a transaction abort so we take extra care here to only error 2847 * when a readonly fs is a reasonable outcome. 2848 */ 2849 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 2850 u64 objectid, struct btrfs_qgroup_inherit *inherit) 2851 { 2852 int ret = 0; 2853 int i; 2854 u64 *i_qgroups; 2855 bool committing = false; 2856 struct btrfs_fs_info *fs_info = trans->fs_info; 2857 struct btrfs_root *quota_root; 2858 struct btrfs_qgroup *srcgroup; 2859 struct btrfs_qgroup *dstgroup; 2860 bool need_rescan = false; 2861 u32 level_size = 0; 2862 u64 nums; 2863 2864 /* 2865 * There are only two callers of this function. 2866 * 2867 * One in create_subvol() in the ioctl context, which needs to hold 2868 * the qgroup_ioctl_lock. 2869 * 2870 * The other one in create_pending_snapshot() where no other qgroup 2871 * code can modify the fs as they all need to either start a new trans 2872 * or hold a trans handler, thus we don't need to hold 2873 * qgroup_ioctl_lock. 2874 * This would avoid long and complex lock chain and make lockdep happy. 2875 */ 2876 spin_lock(&fs_info->trans_lock); 2877 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 2878 committing = true; 2879 spin_unlock(&fs_info->trans_lock); 2880 2881 if (!committing) 2882 mutex_lock(&fs_info->qgroup_ioctl_lock); 2883 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2884 goto out; 2885 2886 quota_root = fs_info->quota_root; 2887 if (!quota_root) { 2888 ret = -EINVAL; 2889 goto out; 2890 } 2891 2892 if (inherit) { 2893 i_qgroups = (u64 *)(inherit + 1); 2894 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 2895 2 * inherit->num_excl_copies; 2896 for (i = 0; i < nums; ++i) { 2897 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 2898 2899 /* 2900 * Zero out invalid groups so we can ignore 2901 * them later. 2902 */ 2903 if (!srcgroup || 2904 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 2905 *i_qgroups = 0ULL; 2906 2907 ++i_qgroups; 2908 } 2909 } 2910 2911 /* 2912 * create a tracking group for the subvol itself 2913 */ 2914 ret = add_qgroup_item(trans, quota_root, objectid); 2915 if (ret) 2916 goto out; 2917 2918 /* 2919 * add qgroup to all inherited groups 2920 */ 2921 if (inherit) { 2922 i_qgroups = (u64 *)(inherit + 1); 2923 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { 2924 if (*i_qgroups == 0) 2925 continue; 2926 ret = add_qgroup_relation_item(trans, objectid, 2927 *i_qgroups); 2928 if (ret && ret != -EEXIST) 2929 goto out; 2930 ret = add_qgroup_relation_item(trans, *i_qgroups, 2931 objectid); 2932 if (ret && ret != -EEXIST) 2933 goto out; 2934 } 2935 ret = 0; 2936 } 2937 2938 2939 spin_lock(&fs_info->qgroup_lock); 2940 2941 dstgroup = add_qgroup_rb(fs_info, objectid); 2942 if (IS_ERR(dstgroup)) { 2943 ret = PTR_ERR(dstgroup); 2944 goto unlock; 2945 } 2946 2947 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 2948 dstgroup->lim_flags = inherit->lim.flags; 2949 dstgroup->max_rfer = inherit->lim.max_rfer; 2950 dstgroup->max_excl = inherit->lim.max_excl; 2951 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 2952 dstgroup->rsv_excl = inherit->lim.rsv_excl; 2953 2954 ret = update_qgroup_limit_item(trans, dstgroup); 2955 if (ret) { 2956 qgroup_mark_inconsistent(fs_info); 2957 btrfs_info(fs_info, 2958 "unable to update quota limit for %llu", 2959 dstgroup->qgroupid); 2960 goto unlock; 2961 } 2962 } 2963 2964 if (srcid) { 2965 srcgroup = find_qgroup_rb(fs_info, srcid); 2966 if (!srcgroup) 2967 goto unlock; 2968 2969 /* 2970 * We call inherit after we clone the root in order to make sure 2971 * our counts don't go crazy, so at this point the only 2972 * difference between the two roots should be the root node. 2973 */ 2974 level_size = fs_info->nodesize; 2975 dstgroup->rfer = srcgroup->rfer; 2976 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 2977 dstgroup->excl = level_size; 2978 dstgroup->excl_cmpr = level_size; 2979 srcgroup->excl = level_size; 2980 srcgroup->excl_cmpr = level_size; 2981 2982 /* inherit the limit info */ 2983 dstgroup->lim_flags = srcgroup->lim_flags; 2984 dstgroup->max_rfer = srcgroup->max_rfer; 2985 dstgroup->max_excl = srcgroup->max_excl; 2986 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 2987 dstgroup->rsv_excl = srcgroup->rsv_excl; 2988 2989 qgroup_dirty(fs_info, dstgroup); 2990 qgroup_dirty(fs_info, srcgroup); 2991 } 2992 2993 if (!inherit) 2994 goto unlock; 2995 2996 i_qgroups = (u64 *)(inherit + 1); 2997 for (i = 0; i < inherit->num_qgroups; ++i) { 2998 if (*i_qgroups) { 2999 ret = add_relation_rb(fs_info, objectid, *i_qgroups); 3000 if (ret) 3001 goto unlock; 3002 } 3003 ++i_qgroups; 3004 3005 /* 3006 * If we're doing a snapshot, and adding the snapshot to a new 3007 * qgroup, the numbers are guaranteed to be incorrect. 3008 */ 3009 if (srcid) 3010 need_rescan = true; 3011 } 3012 3013 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { 3014 struct btrfs_qgroup *src; 3015 struct btrfs_qgroup *dst; 3016 3017 if (!i_qgroups[0] || !i_qgroups[1]) 3018 continue; 3019 3020 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3021 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3022 3023 if (!src || !dst) { 3024 ret = -EINVAL; 3025 goto unlock; 3026 } 3027 3028 dst->rfer = src->rfer - level_size; 3029 dst->rfer_cmpr = src->rfer_cmpr - level_size; 3030 3031 /* Manually tweaking numbers certainly needs a rescan */ 3032 need_rescan = true; 3033 } 3034 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 3035 struct btrfs_qgroup *src; 3036 struct btrfs_qgroup *dst; 3037 3038 if (!i_qgroups[0] || !i_qgroups[1]) 3039 continue; 3040 3041 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3042 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3043 3044 if (!src || !dst) { 3045 ret = -EINVAL; 3046 goto unlock; 3047 } 3048 3049 dst->excl = src->excl + level_size; 3050 dst->excl_cmpr = src->excl_cmpr + level_size; 3051 need_rescan = true; 3052 } 3053 3054 unlock: 3055 spin_unlock(&fs_info->qgroup_lock); 3056 if (!ret) 3057 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 3058 out: 3059 if (!committing) 3060 mutex_unlock(&fs_info->qgroup_ioctl_lock); 3061 if (need_rescan) 3062 qgroup_mark_inconsistent(fs_info); 3063 return ret; 3064 } 3065 3066 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 3067 { 3068 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 3069 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 3070 return false; 3071 3072 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 3073 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 3074 return false; 3075 3076 return true; 3077 } 3078 3079 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 3080 enum btrfs_qgroup_rsv_type type) 3081 { 3082 struct btrfs_qgroup *qgroup; 3083 struct btrfs_fs_info *fs_info = root->fs_info; 3084 u64 ref_root = root->root_key.objectid; 3085 int ret = 0; 3086 struct ulist_node *unode; 3087 struct ulist_iterator uiter; 3088 3089 if (!is_fstree(ref_root)) 3090 return 0; 3091 3092 if (num_bytes == 0) 3093 return 0; 3094 3095 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 3096 capable(CAP_SYS_RESOURCE)) 3097 enforce = false; 3098 3099 spin_lock(&fs_info->qgroup_lock); 3100 if (!fs_info->quota_root) 3101 goto out; 3102 3103 qgroup = find_qgroup_rb(fs_info, ref_root); 3104 if (!qgroup) 3105 goto out; 3106 3107 /* 3108 * in a first step, we check all affected qgroups if any limits would 3109 * be exceeded 3110 */ 3111 ulist_reinit(fs_info->qgroup_ulist); 3112 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3113 qgroup_to_aux(qgroup), GFP_ATOMIC); 3114 if (ret < 0) 3115 goto out; 3116 ULIST_ITER_INIT(&uiter); 3117 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3118 struct btrfs_qgroup *qg; 3119 struct btrfs_qgroup_list *glist; 3120 3121 qg = unode_aux_to_qgroup(unode); 3122 3123 if (enforce && !qgroup_check_limits(qg, num_bytes)) { 3124 ret = -EDQUOT; 3125 goto out; 3126 } 3127 3128 list_for_each_entry(glist, &qg->groups, next_group) { 3129 ret = ulist_add(fs_info->qgroup_ulist, 3130 glist->group->qgroupid, 3131 qgroup_to_aux(glist->group), GFP_ATOMIC); 3132 if (ret < 0) 3133 goto out; 3134 } 3135 } 3136 ret = 0; 3137 /* 3138 * no limits exceeded, now record the reservation into all qgroups 3139 */ 3140 ULIST_ITER_INIT(&uiter); 3141 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3142 struct btrfs_qgroup *qg; 3143 3144 qg = unode_aux_to_qgroup(unode); 3145 3146 qgroup_rsv_add(fs_info, qg, num_bytes, type); 3147 } 3148 3149 out: 3150 spin_unlock(&fs_info->qgroup_lock); 3151 return ret; 3152 } 3153 3154 /* 3155 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3156 * qgroup). 3157 * 3158 * Will handle all higher level qgroup too. 3159 * 3160 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3161 * This special case is only used for META_PERTRANS type. 3162 */ 3163 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3164 u64 ref_root, u64 num_bytes, 3165 enum btrfs_qgroup_rsv_type type) 3166 { 3167 struct btrfs_qgroup *qgroup; 3168 struct ulist_node *unode; 3169 struct ulist_iterator uiter; 3170 int ret = 0; 3171 3172 if (!is_fstree(ref_root)) 3173 return; 3174 3175 if (num_bytes == 0) 3176 return; 3177 3178 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3179 WARN(1, "%s: Invalid type to free", __func__); 3180 return; 3181 } 3182 spin_lock(&fs_info->qgroup_lock); 3183 3184 if (!fs_info->quota_root) 3185 goto out; 3186 3187 qgroup = find_qgroup_rb(fs_info, ref_root); 3188 if (!qgroup) 3189 goto out; 3190 3191 if (num_bytes == (u64)-1) 3192 /* 3193 * We're freeing all pertrans rsv, get reserved value from 3194 * level 0 qgroup as real num_bytes to free. 3195 */ 3196 num_bytes = qgroup->rsv.values[type]; 3197 3198 ulist_reinit(fs_info->qgroup_ulist); 3199 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3200 qgroup_to_aux(qgroup), GFP_ATOMIC); 3201 if (ret < 0) 3202 goto out; 3203 ULIST_ITER_INIT(&uiter); 3204 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3205 struct btrfs_qgroup *qg; 3206 struct btrfs_qgroup_list *glist; 3207 3208 qg = unode_aux_to_qgroup(unode); 3209 3210 qgroup_rsv_release(fs_info, qg, num_bytes, type); 3211 3212 list_for_each_entry(glist, &qg->groups, next_group) { 3213 ret = ulist_add(fs_info->qgroup_ulist, 3214 glist->group->qgroupid, 3215 qgroup_to_aux(glist->group), GFP_ATOMIC); 3216 if (ret < 0) 3217 goto out; 3218 } 3219 } 3220 3221 out: 3222 spin_unlock(&fs_info->qgroup_lock); 3223 } 3224 3225 /* 3226 * Check if the leaf is the last leaf. Which means all node pointers 3227 * are at their last position. 3228 */ 3229 static bool is_last_leaf(struct btrfs_path *path) 3230 { 3231 int i; 3232 3233 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3234 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3235 return false; 3236 } 3237 return true; 3238 } 3239 3240 /* 3241 * returns < 0 on error, 0 when more leafs are to be scanned. 3242 * returns 1 when done. 3243 */ 3244 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3245 struct btrfs_path *path) 3246 { 3247 struct btrfs_fs_info *fs_info = trans->fs_info; 3248 struct btrfs_root *extent_root; 3249 struct btrfs_key found; 3250 struct extent_buffer *scratch_leaf = NULL; 3251 struct ulist *roots = NULL; 3252 u64 num_bytes; 3253 bool done; 3254 int slot; 3255 int ret; 3256 3257 mutex_lock(&fs_info->qgroup_rescan_lock); 3258 extent_root = btrfs_extent_root(fs_info, 3259 fs_info->qgroup_rescan_progress.objectid); 3260 ret = btrfs_search_slot_for_read(extent_root, 3261 &fs_info->qgroup_rescan_progress, 3262 path, 1, 0); 3263 3264 btrfs_debug(fs_info, 3265 "current progress key (%llu %u %llu), search_slot ret %d", 3266 fs_info->qgroup_rescan_progress.objectid, 3267 fs_info->qgroup_rescan_progress.type, 3268 fs_info->qgroup_rescan_progress.offset, ret); 3269 3270 if (ret) { 3271 /* 3272 * The rescan is about to end, we will not be scanning any 3273 * further blocks. We cannot unset the RESCAN flag here, because 3274 * we want to commit the transaction if everything went well. 3275 * To make the live accounting work in this phase, we set our 3276 * scan progress pointer such that every real extent objectid 3277 * will be smaller. 3278 */ 3279 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3280 btrfs_release_path(path); 3281 mutex_unlock(&fs_info->qgroup_rescan_lock); 3282 return ret; 3283 } 3284 done = is_last_leaf(path); 3285 3286 btrfs_item_key_to_cpu(path->nodes[0], &found, 3287 btrfs_header_nritems(path->nodes[0]) - 1); 3288 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3289 3290 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3291 if (!scratch_leaf) { 3292 ret = -ENOMEM; 3293 mutex_unlock(&fs_info->qgroup_rescan_lock); 3294 goto out; 3295 } 3296 slot = path->slots[0]; 3297 btrfs_release_path(path); 3298 mutex_unlock(&fs_info->qgroup_rescan_lock); 3299 3300 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3301 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3302 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3303 found.type != BTRFS_METADATA_ITEM_KEY) 3304 continue; 3305 if (found.type == BTRFS_METADATA_ITEM_KEY) 3306 num_bytes = fs_info->nodesize; 3307 else 3308 num_bytes = found.offset; 3309 3310 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, 3311 &roots, false); 3312 if (ret < 0) 3313 goto out; 3314 /* For rescan, just pass old_roots as NULL */ 3315 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3316 num_bytes, NULL, roots); 3317 if (ret < 0) 3318 goto out; 3319 } 3320 out: 3321 if (scratch_leaf) 3322 free_extent_buffer(scratch_leaf); 3323 3324 if (done && !ret) { 3325 ret = 1; 3326 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3327 } 3328 return ret; 3329 } 3330 3331 static bool rescan_should_stop(struct btrfs_fs_info *fs_info) 3332 { 3333 return btrfs_fs_closing(fs_info) || 3334 test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) || 3335 !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3336 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; 3337 } 3338 3339 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3340 { 3341 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3342 qgroup_rescan_work); 3343 struct btrfs_path *path; 3344 struct btrfs_trans_handle *trans = NULL; 3345 int err = -ENOMEM; 3346 int ret = 0; 3347 bool stopped = false; 3348 3349 path = btrfs_alloc_path(); 3350 if (!path) 3351 goto out; 3352 /* 3353 * Rescan should only search for commit root, and any later difference 3354 * should be recorded by qgroup 3355 */ 3356 path->search_commit_root = 1; 3357 path->skip_locking = 1; 3358 3359 err = 0; 3360 while (!err && !(stopped = rescan_should_stop(fs_info))) { 3361 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3362 if (IS_ERR(trans)) { 3363 err = PTR_ERR(trans); 3364 break; 3365 } 3366 3367 err = qgroup_rescan_leaf(trans, path); 3368 3369 if (err > 0) 3370 btrfs_commit_transaction(trans); 3371 else 3372 btrfs_end_transaction(trans); 3373 } 3374 3375 out: 3376 btrfs_free_path(path); 3377 3378 mutex_lock(&fs_info->qgroup_rescan_lock); 3379 if (err > 0 && 3380 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3381 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3382 } else if (err < 0 || stopped) { 3383 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3384 } 3385 mutex_unlock(&fs_info->qgroup_rescan_lock); 3386 3387 /* 3388 * only update status, since the previous part has already updated the 3389 * qgroup info. 3390 */ 3391 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3392 if (IS_ERR(trans)) { 3393 err = PTR_ERR(trans); 3394 trans = NULL; 3395 btrfs_err(fs_info, 3396 "fail to start transaction for status update: %d", 3397 err); 3398 } 3399 3400 mutex_lock(&fs_info->qgroup_rescan_lock); 3401 if (!stopped || 3402 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3403 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3404 if (trans) { 3405 ret = update_qgroup_status_item(trans); 3406 if (ret < 0) { 3407 err = ret; 3408 btrfs_err(fs_info, "fail to update qgroup status: %d", 3409 err); 3410 } 3411 } 3412 fs_info->qgroup_rescan_running = false; 3413 fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; 3414 complete_all(&fs_info->qgroup_rescan_completion); 3415 mutex_unlock(&fs_info->qgroup_rescan_lock); 3416 3417 if (!trans) 3418 return; 3419 3420 btrfs_end_transaction(trans); 3421 3422 if (stopped) { 3423 btrfs_info(fs_info, "qgroup scan paused"); 3424 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { 3425 btrfs_info(fs_info, "qgroup scan cancelled"); 3426 } else if (err >= 0) { 3427 btrfs_info(fs_info, "qgroup scan completed%s", 3428 err > 0 ? " (inconsistency flag cleared)" : ""); 3429 } else { 3430 btrfs_err(fs_info, "qgroup scan failed with %d", err); 3431 } 3432 } 3433 3434 /* 3435 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3436 * memory required for the rescan context. 3437 */ 3438 static int 3439 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3440 int init_flags) 3441 { 3442 int ret = 0; 3443 3444 if (!init_flags) { 3445 /* we're resuming qgroup rescan at mount time */ 3446 if (!(fs_info->qgroup_flags & 3447 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3448 btrfs_warn(fs_info, 3449 "qgroup rescan init failed, qgroup rescan is not queued"); 3450 ret = -EINVAL; 3451 } else if (!(fs_info->qgroup_flags & 3452 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3453 btrfs_warn(fs_info, 3454 "qgroup rescan init failed, qgroup is not enabled"); 3455 ret = -EINVAL; 3456 } 3457 3458 if (ret) 3459 return ret; 3460 } 3461 3462 mutex_lock(&fs_info->qgroup_rescan_lock); 3463 3464 if (init_flags) { 3465 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3466 btrfs_warn(fs_info, 3467 "qgroup rescan is already in progress"); 3468 ret = -EINPROGRESS; 3469 } else if (!(fs_info->qgroup_flags & 3470 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3471 btrfs_warn(fs_info, 3472 "qgroup rescan init failed, qgroup is not enabled"); 3473 ret = -EINVAL; 3474 } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 3475 /* Quota disable is in progress */ 3476 ret = -EBUSY; 3477 } 3478 3479 if (ret) { 3480 mutex_unlock(&fs_info->qgroup_rescan_lock); 3481 return ret; 3482 } 3483 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3484 } 3485 3486 memset(&fs_info->qgroup_rescan_progress, 0, 3487 sizeof(fs_info->qgroup_rescan_progress)); 3488 fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 3489 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 3490 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3491 init_completion(&fs_info->qgroup_rescan_completion); 3492 mutex_unlock(&fs_info->qgroup_rescan_lock); 3493 3494 btrfs_init_work(&fs_info->qgroup_rescan_work, 3495 btrfs_qgroup_rescan_worker, NULL, NULL); 3496 return 0; 3497 } 3498 3499 static void 3500 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 3501 { 3502 struct rb_node *n; 3503 struct btrfs_qgroup *qgroup; 3504 3505 spin_lock(&fs_info->qgroup_lock); 3506 /* clear all current qgroup tracking information */ 3507 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 3508 qgroup = rb_entry(n, struct btrfs_qgroup, node); 3509 qgroup->rfer = 0; 3510 qgroup->rfer_cmpr = 0; 3511 qgroup->excl = 0; 3512 qgroup->excl_cmpr = 0; 3513 qgroup_dirty(fs_info, qgroup); 3514 } 3515 spin_unlock(&fs_info->qgroup_lock); 3516 } 3517 3518 int 3519 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 3520 { 3521 int ret = 0; 3522 struct btrfs_trans_handle *trans; 3523 3524 ret = qgroup_rescan_init(fs_info, 0, 1); 3525 if (ret) 3526 return ret; 3527 3528 /* 3529 * We have set the rescan_progress to 0, which means no more 3530 * delayed refs will be accounted by btrfs_qgroup_account_ref. 3531 * However, btrfs_qgroup_account_ref may be right after its call 3532 * to btrfs_find_all_roots, in which case it would still do the 3533 * accounting. 3534 * To solve this, we're committing the transaction, which will 3535 * ensure we run all delayed refs and only after that, we are 3536 * going to clear all tracking information for a clean start. 3537 */ 3538 3539 trans = btrfs_join_transaction(fs_info->fs_root); 3540 if (IS_ERR(trans)) { 3541 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3542 return PTR_ERR(trans); 3543 } 3544 ret = btrfs_commit_transaction(trans); 3545 if (ret) { 3546 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3547 return ret; 3548 } 3549 3550 qgroup_rescan_zero_tracking(fs_info); 3551 3552 mutex_lock(&fs_info->qgroup_rescan_lock); 3553 fs_info->qgroup_rescan_running = true; 3554 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3555 &fs_info->qgroup_rescan_work); 3556 mutex_unlock(&fs_info->qgroup_rescan_lock); 3557 3558 return 0; 3559 } 3560 3561 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 3562 bool interruptible) 3563 { 3564 int running; 3565 int ret = 0; 3566 3567 mutex_lock(&fs_info->qgroup_rescan_lock); 3568 running = fs_info->qgroup_rescan_running; 3569 mutex_unlock(&fs_info->qgroup_rescan_lock); 3570 3571 if (!running) 3572 return 0; 3573 3574 if (interruptible) 3575 ret = wait_for_completion_interruptible( 3576 &fs_info->qgroup_rescan_completion); 3577 else 3578 wait_for_completion(&fs_info->qgroup_rescan_completion); 3579 3580 return ret; 3581 } 3582 3583 /* 3584 * this is only called from open_ctree where we're still single threaded, thus 3585 * locking is omitted here. 3586 */ 3587 void 3588 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 3589 { 3590 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3591 mutex_lock(&fs_info->qgroup_rescan_lock); 3592 fs_info->qgroup_rescan_running = true; 3593 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3594 &fs_info->qgroup_rescan_work); 3595 mutex_unlock(&fs_info->qgroup_rescan_lock); 3596 } 3597 } 3598 3599 #define rbtree_iterate_from_safe(node, next, start) \ 3600 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 3601 3602 static int qgroup_unreserve_range(struct btrfs_inode *inode, 3603 struct extent_changeset *reserved, u64 start, 3604 u64 len) 3605 { 3606 struct rb_node *node; 3607 struct rb_node *next; 3608 struct ulist_node *entry; 3609 int ret = 0; 3610 3611 node = reserved->range_changed.root.rb_node; 3612 if (!node) 3613 return 0; 3614 while (node) { 3615 entry = rb_entry(node, struct ulist_node, rb_node); 3616 if (entry->val < start) 3617 node = node->rb_right; 3618 else 3619 node = node->rb_left; 3620 } 3621 3622 if (entry->val > start && rb_prev(&entry->rb_node)) 3623 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 3624 rb_node); 3625 3626 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 3627 u64 entry_start; 3628 u64 entry_end; 3629 u64 entry_len; 3630 int clear_ret; 3631 3632 entry = rb_entry(node, struct ulist_node, rb_node); 3633 entry_start = entry->val; 3634 entry_end = entry->aux; 3635 entry_len = entry_end - entry_start + 1; 3636 3637 if (entry_start >= start + len) 3638 break; 3639 if (entry_start + entry_len <= start) 3640 continue; 3641 /* 3642 * Now the entry is in [start, start + len), revert the 3643 * EXTENT_QGROUP_RESERVED bit. 3644 */ 3645 clear_ret = clear_extent_bits(&inode->io_tree, entry_start, 3646 entry_end, EXTENT_QGROUP_RESERVED); 3647 if (!ret && clear_ret < 0) 3648 ret = clear_ret; 3649 3650 ulist_del(&reserved->range_changed, entry->val, entry->aux); 3651 if (likely(reserved->bytes_changed >= entry_len)) { 3652 reserved->bytes_changed -= entry_len; 3653 } else { 3654 WARN_ON(1); 3655 reserved->bytes_changed = 0; 3656 } 3657 } 3658 3659 return ret; 3660 } 3661 3662 /* 3663 * Try to free some space for qgroup. 3664 * 3665 * For qgroup, there are only 3 ways to free qgroup space: 3666 * - Flush nodatacow write 3667 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 3668 * In theory, we should only flush nodatacow inodes, but it's not yet 3669 * possible, so we need to flush the whole root. 3670 * 3671 * - Wait for ordered extents 3672 * When ordered extents are finished, their reserved metadata is finally 3673 * converted to per_trans status, which can be freed by later commit 3674 * transaction. 3675 * 3676 * - Commit transaction 3677 * This would free the meta_per_trans space. 3678 * In theory this shouldn't provide much space, but any more qgroup space 3679 * is needed. 3680 */ 3681 static int try_flush_qgroup(struct btrfs_root *root) 3682 { 3683 struct btrfs_trans_handle *trans; 3684 int ret; 3685 3686 /* Can't hold an open transaction or we run the risk of deadlocking. */ 3687 ASSERT(current->journal_info == NULL); 3688 if (WARN_ON(current->journal_info)) 3689 return 0; 3690 3691 /* 3692 * We don't want to run flush again and again, so if there is a running 3693 * one, we won't try to start a new flush, but exit directly. 3694 */ 3695 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3696 wait_event(root->qgroup_flush_wait, 3697 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3698 return 0; 3699 } 3700 3701 ret = btrfs_start_delalloc_snapshot(root, true); 3702 if (ret < 0) 3703 goto out; 3704 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 3705 3706 trans = btrfs_join_transaction(root); 3707 if (IS_ERR(trans)) { 3708 ret = PTR_ERR(trans); 3709 goto out; 3710 } 3711 3712 ret = btrfs_commit_transaction(trans); 3713 out: 3714 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 3715 wake_up(&root->qgroup_flush_wait); 3716 return ret; 3717 } 3718 3719 static int qgroup_reserve_data(struct btrfs_inode *inode, 3720 struct extent_changeset **reserved_ret, u64 start, 3721 u64 len) 3722 { 3723 struct btrfs_root *root = inode->root; 3724 struct extent_changeset *reserved; 3725 bool new_reserved = false; 3726 u64 orig_reserved; 3727 u64 to_reserve; 3728 int ret; 3729 3730 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || 3731 !is_fstree(root->root_key.objectid) || len == 0) 3732 return 0; 3733 3734 /* @reserved parameter is mandatory for qgroup */ 3735 if (WARN_ON(!reserved_ret)) 3736 return -EINVAL; 3737 if (!*reserved_ret) { 3738 new_reserved = true; 3739 *reserved_ret = extent_changeset_alloc(); 3740 if (!*reserved_ret) 3741 return -ENOMEM; 3742 } 3743 reserved = *reserved_ret; 3744 /* Record already reserved space */ 3745 orig_reserved = reserved->bytes_changed; 3746 ret = set_record_extent_bits(&inode->io_tree, start, 3747 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 3748 3749 /* Newly reserved space */ 3750 to_reserve = reserved->bytes_changed - orig_reserved; 3751 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 3752 to_reserve, QGROUP_RESERVE); 3753 if (ret < 0) 3754 goto out; 3755 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 3756 if (ret < 0) 3757 goto cleanup; 3758 3759 return ret; 3760 3761 cleanup: 3762 qgroup_unreserve_range(inode, reserved, start, len); 3763 out: 3764 if (new_reserved) { 3765 extent_changeset_free(reserved); 3766 *reserved_ret = NULL; 3767 } 3768 return ret; 3769 } 3770 3771 /* 3772 * Reserve qgroup space for range [start, start + len). 3773 * 3774 * This function will either reserve space from related qgroups or do nothing 3775 * if the range is already reserved. 3776 * 3777 * Return 0 for successful reservation 3778 * Return <0 for error (including -EQUOT) 3779 * 3780 * NOTE: This function may sleep for memory allocation, dirty page flushing and 3781 * commit transaction. So caller should not hold any dirty page locked. 3782 */ 3783 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 3784 struct extent_changeset **reserved_ret, u64 start, 3785 u64 len) 3786 { 3787 int ret; 3788 3789 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 3790 if (ret <= 0 && ret != -EDQUOT) 3791 return ret; 3792 3793 ret = try_flush_qgroup(inode->root); 3794 if (ret < 0) 3795 return ret; 3796 return qgroup_reserve_data(inode, reserved_ret, start, len); 3797 } 3798 3799 /* Free ranges specified by @reserved, normally in error path */ 3800 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 3801 struct extent_changeset *reserved, u64 start, u64 len) 3802 { 3803 struct btrfs_root *root = inode->root; 3804 struct ulist_node *unode; 3805 struct ulist_iterator uiter; 3806 struct extent_changeset changeset; 3807 int freed = 0; 3808 int ret; 3809 3810 extent_changeset_init(&changeset); 3811 len = round_up(start + len, root->fs_info->sectorsize); 3812 start = round_down(start, root->fs_info->sectorsize); 3813 3814 ULIST_ITER_INIT(&uiter); 3815 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 3816 u64 range_start = unode->val; 3817 /* unode->aux is the inclusive end */ 3818 u64 range_len = unode->aux - range_start + 1; 3819 u64 free_start; 3820 u64 free_len; 3821 3822 extent_changeset_release(&changeset); 3823 3824 /* Only free range in range [start, start + len) */ 3825 if (range_start >= start + len || 3826 range_start + range_len <= start) 3827 continue; 3828 free_start = max(range_start, start); 3829 free_len = min(start + len, range_start + range_len) - 3830 free_start; 3831 /* 3832 * TODO: To also modify reserved->ranges_reserved to reflect 3833 * the modification. 3834 * 3835 * However as long as we free qgroup reserved according to 3836 * EXTENT_QGROUP_RESERVED, we won't double free. 3837 * So not need to rush. 3838 */ 3839 ret = clear_record_extent_bits(&inode->io_tree, free_start, 3840 free_start + free_len - 1, 3841 EXTENT_QGROUP_RESERVED, &changeset); 3842 if (ret < 0) 3843 goto out; 3844 freed += changeset.bytes_changed; 3845 } 3846 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed, 3847 BTRFS_QGROUP_RSV_DATA); 3848 ret = freed; 3849 out: 3850 extent_changeset_release(&changeset); 3851 return ret; 3852 } 3853 3854 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 3855 struct extent_changeset *reserved, u64 start, u64 len, 3856 int free) 3857 { 3858 struct extent_changeset changeset; 3859 int trace_op = QGROUP_RELEASE; 3860 int ret; 3861 3862 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags)) 3863 return 0; 3864 3865 /* In release case, we shouldn't have @reserved */ 3866 WARN_ON(!free && reserved); 3867 if (free && reserved) 3868 return qgroup_free_reserved_data(inode, reserved, start, len); 3869 extent_changeset_init(&changeset); 3870 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, 3871 EXTENT_QGROUP_RESERVED, &changeset); 3872 if (ret < 0) 3873 goto out; 3874 3875 if (free) 3876 trace_op = QGROUP_FREE; 3877 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 3878 changeset.bytes_changed, trace_op); 3879 if (free) 3880 btrfs_qgroup_free_refroot(inode->root->fs_info, 3881 inode->root->root_key.objectid, 3882 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3883 ret = changeset.bytes_changed; 3884 out: 3885 extent_changeset_release(&changeset); 3886 return ret; 3887 } 3888 3889 /* 3890 * Free a reserved space range from io_tree and related qgroups 3891 * 3892 * Should be called when a range of pages get invalidated before reaching disk. 3893 * Or for error cleanup case. 3894 * if @reserved is given, only reserved range in [@start, @start + @len) will 3895 * be freed. 3896 * 3897 * For data written to disk, use btrfs_qgroup_release_data(). 3898 * 3899 * NOTE: This function may sleep for memory allocation. 3900 */ 3901 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 3902 struct extent_changeset *reserved, u64 start, u64 len) 3903 { 3904 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); 3905 } 3906 3907 /* 3908 * Release a reserved space range from io_tree only. 3909 * 3910 * Should be called when a range of pages get written to disk and corresponding 3911 * FILE_EXTENT is inserted into corresponding root. 3912 * 3913 * Since new qgroup accounting framework will only update qgroup numbers at 3914 * commit_transaction() time, its reserved space shouldn't be freed from 3915 * related qgroups. 3916 * 3917 * But we should release the range from io_tree, to allow further write to be 3918 * COWed. 3919 * 3920 * NOTE: This function may sleep for memory allocation. 3921 */ 3922 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len) 3923 { 3924 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); 3925 } 3926 3927 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3928 enum btrfs_qgroup_rsv_type type) 3929 { 3930 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3931 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3932 return; 3933 if (num_bytes == 0) 3934 return; 3935 3936 spin_lock(&root->qgroup_meta_rsv_lock); 3937 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 3938 root->qgroup_meta_rsv_prealloc += num_bytes; 3939 else 3940 root->qgroup_meta_rsv_pertrans += num_bytes; 3941 spin_unlock(&root->qgroup_meta_rsv_lock); 3942 } 3943 3944 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3945 enum btrfs_qgroup_rsv_type type) 3946 { 3947 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3948 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3949 return 0; 3950 if (num_bytes == 0) 3951 return 0; 3952 3953 spin_lock(&root->qgroup_meta_rsv_lock); 3954 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 3955 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 3956 num_bytes); 3957 root->qgroup_meta_rsv_prealloc -= num_bytes; 3958 } else { 3959 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 3960 num_bytes); 3961 root->qgroup_meta_rsv_pertrans -= num_bytes; 3962 } 3963 spin_unlock(&root->qgroup_meta_rsv_lock); 3964 return num_bytes; 3965 } 3966 3967 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3968 enum btrfs_qgroup_rsv_type type, bool enforce) 3969 { 3970 struct btrfs_fs_info *fs_info = root->fs_info; 3971 int ret; 3972 3973 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3974 !is_fstree(root->root_key.objectid) || num_bytes == 0) 3975 return 0; 3976 3977 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3978 trace_qgroup_meta_reserve(root, (s64)num_bytes, type); 3979 ret = qgroup_reserve(root, num_bytes, enforce, type); 3980 if (ret < 0) 3981 return ret; 3982 /* 3983 * Record what we have reserved into root. 3984 * 3985 * To avoid quota disabled->enabled underflow. 3986 * In that case, we may try to free space we haven't reserved 3987 * (since quota was disabled), so record what we reserved into root. 3988 * And ensure later release won't underflow this number. 3989 */ 3990 add_root_meta_rsv(root, num_bytes, type); 3991 return ret; 3992 } 3993 3994 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3995 enum btrfs_qgroup_rsv_type type, bool enforce, 3996 bool noflush) 3997 { 3998 int ret; 3999 4000 ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4001 if ((ret <= 0 && ret != -EDQUOT) || noflush) 4002 return ret; 4003 4004 ret = try_flush_qgroup(root); 4005 if (ret < 0) 4006 return ret; 4007 return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4008 } 4009 4010 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 4011 { 4012 struct btrfs_fs_info *fs_info = root->fs_info; 4013 4014 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 4015 !is_fstree(root->root_key.objectid)) 4016 return; 4017 4018 /* TODO: Update trace point to handle such free */ 4019 trace_qgroup_meta_free_all_pertrans(root); 4020 /* Special value -1 means to free all reserved space */ 4021 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1, 4022 BTRFS_QGROUP_RSV_META_PERTRANS); 4023 } 4024 4025 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 4026 enum btrfs_qgroup_rsv_type type) 4027 { 4028 struct btrfs_fs_info *fs_info = root->fs_info; 4029 4030 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 4031 !is_fstree(root->root_key.objectid)) 4032 return; 4033 4034 /* 4035 * reservation for META_PREALLOC can happen before quota is enabled, 4036 * which can lead to underflow. 4037 * Here ensure we will only free what we really have reserved. 4038 */ 4039 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 4040 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4041 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); 4042 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, 4043 num_bytes, type); 4044 } 4045 4046 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 4047 int num_bytes) 4048 { 4049 struct btrfs_qgroup *qgroup; 4050 struct ulist_node *unode; 4051 struct ulist_iterator uiter; 4052 int ret = 0; 4053 4054 if (num_bytes == 0) 4055 return; 4056 if (!fs_info->quota_root) 4057 return; 4058 4059 spin_lock(&fs_info->qgroup_lock); 4060 qgroup = find_qgroup_rb(fs_info, ref_root); 4061 if (!qgroup) 4062 goto out; 4063 ulist_reinit(fs_info->qgroup_ulist); 4064 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 4065 qgroup_to_aux(qgroup), GFP_ATOMIC); 4066 if (ret < 0) 4067 goto out; 4068 ULIST_ITER_INIT(&uiter); 4069 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 4070 struct btrfs_qgroup *qg; 4071 struct btrfs_qgroup_list *glist; 4072 4073 qg = unode_aux_to_qgroup(unode); 4074 4075 qgroup_rsv_release(fs_info, qg, num_bytes, 4076 BTRFS_QGROUP_RSV_META_PREALLOC); 4077 qgroup_rsv_add(fs_info, qg, num_bytes, 4078 BTRFS_QGROUP_RSV_META_PERTRANS); 4079 list_for_each_entry(glist, &qg->groups, next_group) { 4080 ret = ulist_add(fs_info->qgroup_ulist, 4081 glist->group->qgroupid, 4082 qgroup_to_aux(glist->group), GFP_ATOMIC); 4083 if (ret < 0) 4084 goto out; 4085 } 4086 } 4087 out: 4088 spin_unlock(&fs_info->qgroup_lock); 4089 } 4090 4091 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 4092 { 4093 struct btrfs_fs_info *fs_info = root->fs_info; 4094 4095 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 4096 !is_fstree(root->root_key.objectid)) 4097 return; 4098 /* Same as btrfs_qgroup_free_meta_prealloc() */ 4099 num_bytes = sub_root_meta_rsv(root, num_bytes, 4100 BTRFS_QGROUP_RSV_META_PREALLOC); 4101 trace_qgroup_meta_convert(root, num_bytes); 4102 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes); 4103 } 4104 4105 /* 4106 * Check qgroup reserved space leaking, normally at destroy inode 4107 * time 4108 */ 4109 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 4110 { 4111 struct extent_changeset changeset; 4112 struct ulist_node *unode; 4113 struct ulist_iterator iter; 4114 int ret; 4115 4116 extent_changeset_init(&changeset); 4117 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 4118 EXTENT_QGROUP_RESERVED, &changeset); 4119 4120 WARN_ON(ret < 0); 4121 if (WARN_ON(changeset.bytes_changed)) { 4122 ULIST_ITER_INIT(&iter); 4123 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 4124 btrfs_warn(inode->root->fs_info, 4125 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 4126 btrfs_ino(inode), unode->val, unode->aux); 4127 } 4128 btrfs_qgroup_free_refroot(inode->root->fs_info, 4129 inode->root->root_key.objectid, 4130 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4131 4132 } 4133 extent_changeset_release(&changeset); 4134 } 4135 4136 void btrfs_qgroup_init_swapped_blocks( 4137 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 4138 { 4139 int i; 4140 4141 spin_lock_init(&swapped_blocks->lock); 4142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 4143 swapped_blocks->blocks[i] = RB_ROOT; 4144 swapped_blocks->swapped = false; 4145 } 4146 4147 /* 4148 * Delete all swapped blocks record of @root. 4149 * Every record here means we skipped a full subtree scan for qgroup. 4150 * 4151 * Gets called when committing one transaction. 4152 */ 4153 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 4154 { 4155 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 4156 int i; 4157 4158 swapped_blocks = &root->swapped_blocks; 4159 4160 spin_lock(&swapped_blocks->lock); 4161 if (!swapped_blocks->swapped) 4162 goto out; 4163 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4164 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 4165 struct btrfs_qgroup_swapped_block *entry; 4166 struct btrfs_qgroup_swapped_block *next; 4167 4168 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 4169 node) 4170 kfree(entry); 4171 swapped_blocks->blocks[i] = RB_ROOT; 4172 } 4173 swapped_blocks->swapped = false; 4174 out: 4175 spin_unlock(&swapped_blocks->lock); 4176 } 4177 4178 /* 4179 * Add subtree roots record into @subvol_root. 4180 * 4181 * @subvol_root: tree root of the subvolume tree get swapped 4182 * @bg: block group under balance 4183 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4184 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4185 * BOTH POINTERS ARE BEFORE TREE SWAP 4186 * @last_snapshot: last snapshot generation of the subvolume tree 4187 */ 4188 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, 4189 struct btrfs_root *subvol_root, 4190 struct btrfs_block_group *bg, 4191 struct extent_buffer *subvol_parent, int subvol_slot, 4192 struct extent_buffer *reloc_parent, int reloc_slot, 4193 u64 last_snapshot) 4194 { 4195 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4196 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4197 struct btrfs_qgroup_swapped_block *block; 4198 struct rb_node **cur; 4199 struct rb_node *parent = NULL; 4200 int level = btrfs_header_level(subvol_parent) - 1; 4201 int ret = 0; 4202 4203 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4204 return 0; 4205 4206 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4207 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { 4208 btrfs_err_rl(fs_info, 4209 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4210 __func__, 4211 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4212 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4213 return -EUCLEAN; 4214 } 4215 4216 block = kmalloc(sizeof(*block), GFP_NOFS); 4217 if (!block) { 4218 ret = -ENOMEM; 4219 goto out; 4220 } 4221 4222 /* 4223 * @reloc_parent/slot is still before swap, while @block is going to 4224 * record the bytenr after swap, so we do the swap here. 4225 */ 4226 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4227 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4228 reloc_slot); 4229 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4230 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4231 subvol_slot); 4232 block->last_snapshot = last_snapshot; 4233 block->level = level; 4234 4235 /* 4236 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4237 * no one else can modify tree blocks thus we qgroup will not change 4238 * no matter the value of trace_leaf. 4239 */ 4240 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4241 block->trace_leaf = true; 4242 else 4243 block->trace_leaf = false; 4244 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4245 4246 /* Insert @block into @blocks */ 4247 spin_lock(&blocks->lock); 4248 cur = &blocks->blocks[level].rb_node; 4249 while (*cur) { 4250 struct btrfs_qgroup_swapped_block *entry; 4251 4252 parent = *cur; 4253 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, 4254 node); 4255 4256 if (entry->subvol_bytenr < block->subvol_bytenr) { 4257 cur = &(*cur)->rb_left; 4258 } else if (entry->subvol_bytenr > block->subvol_bytenr) { 4259 cur = &(*cur)->rb_right; 4260 } else { 4261 if (entry->subvol_generation != 4262 block->subvol_generation || 4263 entry->reloc_bytenr != block->reloc_bytenr || 4264 entry->reloc_generation != 4265 block->reloc_generation) { 4266 /* 4267 * Duplicated but mismatch entry found. 4268 * Shouldn't happen. 4269 * 4270 * Marking qgroup inconsistent should be enough 4271 * for end users. 4272 */ 4273 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4274 ret = -EEXIST; 4275 } 4276 kfree(block); 4277 goto out_unlock; 4278 } 4279 } 4280 rb_link_node(&block->node, parent, cur); 4281 rb_insert_color(&block->node, &blocks->blocks[level]); 4282 blocks->swapped = true; 4283 out_unlock: 4284 spin_unlock(&blocks->lock); 4285 out: 4286 if (ret < 0) 4287 qgroup_mark_inconsistent(fs_info); 4288 return ret; 4289 } 4290 4291 /* 4292 * Check if the tree block is a subtree root, and if so do the needed 4293 * delayed subtree trace for qgroup. 4294 * 4295 * This is called during btrfs_cow_block(). 4296 */ 4297 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4298 struct btrfs_root *root, 4299 struct extent_buffer *subvol_eb) 4300 { 4301 struct btrfs_fs_info *fs_info = root->fs_info; 4302 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4303 struct btrfs_qgroup_swapped_block *block; 4304 struct extent_buffer *reloc_eb = NULL; 4305 struct rb_node *node; 4306 bool found = false; 4307 bool swapped = false; 4308 int level = btrfs_header_level(subvol_eb); 4309 int ret = 0; 4310 int i; 4311 4312 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4313 return 0; 4314 if (!is_fstree(root->root_key.objectid) || !root->reloc_root) 4315 return 0; 4316 4317 spin_lock(&blocks->lock); 4318 if (!blocks->swapped) { 4319 spin_unlock(&blocks->lock); 4320 return 0; 4321 } 4322 node = blocks->blocks[level].rb_node; 4323 4324 while (node) { 4325 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4326 if (block->subvol_bytenr < subvol_eb->start) { 4327 node = node->rb_left; 4328 } else if (block->subvol_bytenr > subvol_eb->start) { 4329 node = node->rb_right; 4330 } else { 4331 found = true; 4332 break; 4333 } 4334 } 4335 if (!found) { 4336 spin_unlock(&blocks->lock); 4337 goto out; 4338 } 4339 /* Found one, remove it from @blocks first and update blocks->swapped */ 4340 rb_erase(&block->node, &blocks->blocks[level]); 4341 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4342 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4343 swapped = true; 4344 break; 4345 } 4346 } 4347 blocks->swapped = swapped; 4348 spin_unlock(&blocks->lock); 4349 4350 /* Read out reloc subtree root */ 4351 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0, 4352 block->reloc_generation, block->level, 4353 &block->first_key); 4354 if (IS_ERR(reloc_eb)) { 4355 ret = PTR_ERR(reloc_eb); 4356 reloc_eb = NULL; 4357 goto free_out; 4358 } 4359 if (!extent_buffer_uptodate(reloc_eb)) { 4360 ret = -EIO; 4361 goto free_out; 4362 } 4363 4364 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4365 block->last_snapshot, block->trace_leaf); 4366 free_out: 4367 kfree(block); 4368 free_extent_buffer(reloc_eb); 4369 out: 4370 if (ret < 0) { 4371 btrfs_err_rl(fs_info, 4372 "failed to account subtree at bytenr %llu: %d", 4373 subvol_eb->start, ret); 4374 qgroup_mark_inconsistent(fs_info); 4375 } 4376 return ret; 4377 } 4378 4379 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4380 { 4381 struct btrfs_qgroup_extent_record *entry; 4382 struct btrfs_qgroup_extent_record *next; 4383 struct rb_root *root; 4384 4385 root = &trans->delayed_refs.dirty_extent_root; 4386 rbtree_postorder_for_each_entry_safe(entry, next, root, node) { 4387 ulist_free(entry->old_roots); 4388 kfree(entry); 4389 } 4390 } 4391