1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 #include <linux/sched/mm.h> 15 16 #include "ctree.h" 17 #include "transaction.h" 18 #include "disk-io.h" 19 #include "locking.h" 20 #include "ulist.h" 21 #include "backref.h" 22 #include "extent_io.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "sysfs.h" 26 27 /* TODO XXX FIXME 28 * - subvol delete -> delete when ref goes to 0? delete limits also? 29 * - reorganize keys 30 * - compressed 31 * - sync 32 * - copy also limits on subvol creation 33 * - limit 34 * - caches for ulists 35 * - performance benchmarks 36 * - check all ioctl parameters 37 */ 38 39 /* 40 * Helpers to access qgroup reservation 41 * 42 * Callers should ensure the lock context and type are valid 43 */ 44 45 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 46 { 47 u64 ret = 0; 48 int i; 49 50 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 51 ret += qgroup->rsv.values[i]; 52 53 return ret; 54 } 55 56 #ifdef CONFIG_BTRFS_DEBUG 57 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 58 { 59 if (type == BTRFS_QGROUP_RSV_DATA) 60 return "data"; 61 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 62 return "meta_pertrans"; 63 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 64 return "meta_prealloc"; 65 return NULL; 66 } 67 #endif 68 69 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 70 struct btrfs_qgroup *qgroup, u64 num_bytes, 71 enum btrfs_qgroup_rsv_type type) 72 { 73 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 74 qgroup->rsv.values[type] += num_bytes; 75 } 76 77 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 78 struct btrfs_qgroup *qgroup, u64 num_bytes, 79 enum btrfs_qgroup_rsv_type type) 80 { 81 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 82 if (qgroup->rsv.values[type] >= num_bytes) { 83 qgroup->rsv.values[type] -= num_bytes; 84 return; 85 } 86 #ifdef CONFIG_BTRFS_DEBUG 87 WARN_RATELIMIT(1, 88 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 89 qgroup->qgroupid, qgroup_rsv_type_str(type), 90 qgroup->rsv.values[type], num_bytes); 91 #endif 92 qgroup->rsv.values[type] = 0; 93 } 94 95 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 96 struct btrfs_qgroup *dest, 97 struct btrfs_qgroup *src) 98 { 99 int i; 100 101 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 102 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 103 } 104 105 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 106 struct btrfs_qgroup *dest, 107 struct btrfs_qgroup *src) 108 { 109 int i; 110 111 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 112 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 113 } 114 115 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 116 int mod) 117 { 118 if (qg->old_refcnt < seq) 119 qg->old_refcnt = seq; 120 qg->old_refcnt += mod; 121 } 122 123 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 124 int mod) 125 { 126 if (qg->new_refcnt < seq) 127 qg->new_refcnt = seq; 128 qg->new_refcnt += mod; 129 } 130 131 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 132 { 133 if (qg->old_refcnt < seq) 134 return 0; 135 return qg->old_refcnt - seq; 136 } 137 138 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 139 { 140 if (qg->new_refcnt < seq) 141 return 0; 142 return qg->new_refcnt - seq; 143 } 144 145 /* 146 * glue structure to represent the relations between qgroups. 147 */ 148 struct btrfs_qgroup_list { 149 struct list_head next_group; 150 struct list_head next_member; 151 struct btrfs_qgroup *group; 152 struct btrfs_qgroup *member; 153 }; 154 155 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) 156 { 157 return (u64)(uintptr_t)qg; 158 } 159 160 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) 161 { 162 return (struct btrfs_qgroup *)(uintptr_t)n->aux; 163 } 164 165 static int 166 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 167 int init_flags); 168 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 169 170 /* must be called with qgroup_ioctl_lock held */ 171 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 172 u64 qgroupid) 173 { 174 struct rb_node *n = fs_info->qgroup_tree.rb_node; 175 struct btrfs_qgroup *qgroup; 176 177 while (n) { 178 qgroup = rb_entry(n, struct btrfs_qgroup, node); 179 if (qgroup->qgroupid < qgroupid) 180 n = n->rb_left; 181 else if (qgroup->qgroupid > qgroupid) 182 n = n->rb_right; 183 else 184 return qgroup; 185 } 186 return NULL; 187 } 188 189 /* must be called with qgroup_lock held */ 190 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 191 u64 qgroupid) 192 { 193 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 194 struct rb_node *parent = NULL; 195 struct btrfs_qgroup *qgroup; 196 197 while (*p) { 198 parent = *p; 199 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 200 201 if (qgroup->qgroupid < qgroupid) 202 p = &(*p)->rb_left; 203 else if (qgroup->qgroupid > qgroupid) 204 p = &(*p)->rb_right; 205 else 206 return qgroup; 207 } 208 209 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 210 if (!qgroup) 211 return ERR_PTR(-ENOMEM); 212 213 qgroup->qgroupid = qgroupid; 214 INIT_LIST_HEAD(&qgroup->groups); 215 INIT_LIST_HEAD(&qgroup->members); 216 INIT_LIST_HEAD(&qgroup->dirty); 217 218 rb_link_node(&qgroup->node, parent, p); 219 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 220 221 return qgroup; 222 } 223 224 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, 225 struct btrfs_qgroup *qgroup) 226 { 227 struct btrfs_qgroup_list *list; 228 229 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 230 list_del(&qgroup->dirty); 231 while (!list_empty(&qgroup->groups)) { 232 list = list_first_entry(&qgroup->groups, 233 struct btrfs_qgroup_list, next_group); 234 list_del(&list->next_group); 235 list_del(&list->next_member); 236 kfree(list); 237 } 238 239 while (!list_empty(&qgroup->members)) { 240 list = list_first_entry(&qgroup->members, 241 struct btrfs_qgroup_list, next_member); 242 list_del(&list->next_group); 243 list_del(&list->next_member); 244 kfree(list); 245 } 246 kfree(qgroup); 247 } 248 249 /* must be called with qgroup_lock held */ 250 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 251 { 252 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 253 254 if (!qgroup) 255 return -ENOENT; 256 257 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 258 __del_qgroup_rb(fs_info, qgroup); 259 return 0; 260 } 261 262 /* must be called with qgroup_lock held */ 263 static int add_relation_rb(struct btrfs_fs_info *fs_info, 264 u64 memberid, u64 parentid) 265 { 266 struct btrfs_qgroup *member; 267 struct btrfs_qgroup *parent; 268 struct btrfs_qgroup_list *list; 269 270 member = find_qgroup_rb(fs_info, memberid); 271 parent = find_qgroup_rb(fs_info, parentid); 272 if (!member || !parent) 273 return -ENOENT; 274 275 list = kzalloc(sizeof(*list), GFP_ATOMIC); 276 if (!list) 277 return -ENOMEM; 278 279 list->group = parent; 280 list->member = member; 281 list_add_tail(&list->next_group, &member->groups); 282 list_add_tail(&list->next_member, &parent->members); 283 284 return 0; 285 } 286 287 /* must be called with qgroup_lock held */ 288 static int del_relation_rb(struct btrfs_fs_info *fs_info, 289 u64 memberid, u64 parentid) 290 { 291 struct btrfs_qgroup *member; 292 struct btrfs_qgroup *parent; 293 struct btrfs_qgroup_list *list; 294 295 member = find_qgroup_rb(fs_info, memberid); 296 parent = find_qgroup_rb(fs_info, parentid); 297 if (!member || !parent) 298 return -ENOENT; 299 300 list_for_each_entry(list, &member->groups, next_group) { 301 if (list->group == parent) { 302 list_del(&list->next_group); 303 list_del(&list->next_member); 304 kfree(list); 305 return 0; 306 } 307 } 308 return -ENOENT; 309 } 310 311 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 312 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 313 u64 rfer, u64 excl) 314 { 315 struct btrfs_qgroup *qgroup; 316 317 qgroup = find_qgroup_rb(fs_info, qgroupid); 318 if (!qgroup) 319 return -EINVAL; 320 if (qgroup->rfer != rfer || qgroup->excl != excl) 321 return -EINVAL; 322 return 0; 323 } 324 #endif 325 326 /* 327 * The full config is read in one go, only called from open_ctree() 328 * It doesn't use any locking, as at this point we're still single-threaded 329 */ 330 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 331 { 332 struct btrfs_key key; 333 struct btrfs_key found_key; 334 struct btrfs_root *quota_root = fs_info->quota_root; 335 struct btrfs_path *path = NULL; 336 struct extent_buffer *l; 337 int slot; 338 int ret = 0; 339 u64 flags = 0; 340 u64 rescan_progress = 0; 341 342 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 343 return 0; 344 345 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 346 if (!fs_info->qgroup_ulist) { 347 ret = -ENOMEM; 348 goto out; 349 } 350 351 path = btrfs_alloc_path(); 352 if (!path) { 353 ret = -ENOMEM; 354 goto out; 355 } 356 357 ret = btrfs_sysfs_add_qgroups(fs_info); 358 if (ret < 0) 359 goto out; 360 /* default this to quota off, in case no status key is found */ 361 fs_info->qgroup_flags = 0; 362 363 /* 364 * pass 1: read status, all qgroup infos and limits 365 */ 366 key.objectid = 0; 367 key.type = 0; 368 key.offset = 0; 369 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 370 if (ret) 371 goto out; 372 373 while (1) { 374 struct btrfs_qgroup *qgroup; 375 376 slot = path->slots[0]; 377 l = path->nodes[0]; 378 btrfs_item_key_to_cpu(l, &found_key, slot); 379 380 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 381 struct btrfs_qgroup_status_item *ptr; 382 383 ptr = btrfs_item_ptr(l, slot, 384 struct btrfs_qgroup_status_item); 385 386 if (btrfs_qgroup_status_version(l, ptr) != 387 BTRFS_QGROUP_STATUS_VERSION) { 388 btrfs_err(fs_info, 389 "old qgroup version, quota disabled"); 390 goto out; 391 } 392 if (btrfs_qgroup_status_generation(l, ptr) != 393 fs_info->generation) { 394 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 395 btrfs_err(fs_info, 396 "qgroup generation mismatch, marked as inconsistent"); 397 } 398 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 399 ptr); 400 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 401 goto next1; 402 } 403 404 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 405 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 406 goto next1; 407 408 qgroup = find_qgroup_rb(fs_info, found_key.offset); 409 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 410 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 411 btrfs_err(fs_info, "inconsistent qgroup config"); 412 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 413 } 414 if (!qgroup) { 415 qgroup = add_qgroup_rb(fs_info, found_key.offset); 416 if (IS_ERR(qgroup)) { 417 ret = PTR_ERR(qgroup); 418 goto out; 419 } 420 } 421 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 422 if (ret < 0) 423 goto out; 424 425 switch (found_key.type) { 426 case BTRFS_QGROUP_INFO_KEY: { 427 struct btrfs_qgroup_info_item *ptr; 428 429 ptr = btrfs_item_ptr(l, slot, 430 struct btrfs_qgroup_info_item); 431 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 432 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 433 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 434 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 435 /* generation currently unused */ 436 break; 437 } 438 case BTRFS_QGROUP_LIMIT_KEY: { 439 struct btrfs_qgroup_limit_item *ptr; 440 441 ptr = btrfs_item_ptr(l, slot, 442 struct btrfs_qgroup_limit_item); 443 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 444 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 445 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 446 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 447 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 448 break; 449 } 450 } 451 next1: 452 ret = btrfs_next_item(quota_root, path); 453 if (ret < 0) 454 goto out; 455 if (ret) 456 break; 457 } 458 btrfs_release_path(path); 459 460 /* 461 * pass 2: read all qgroup relations 462 */ 463 key.objectid = 0; 464 key.type = BTRFS_QGROUP_RELATION_KEY; 465 key.offset = 0; 466 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 467 if (ret) 468 goto out; 469 while (1) { 470 slot = path->slots[0]; 471 l = path->nodes[0]; 472 btrfs_item_key_to_cpu(l, &found_key, slot); 473 474 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 475 goto next2; 476 477 if (found_key.objectid > found_key.offset) { 478 /* parent <- member, not needed to build config */ 479 /* FIXME should we omit the key completely? */ 480 goto next2; 481 } 482 483 ret = add_relation_rb(fs_info, found_key.objectid, 484 found_key.offset); 485 if (ret == -ENOENT) { 486 btrfs_warn(fs_info, 487 "orphan qgroup relation 0x%llx->0x%llx", 488 found_key.objectid, found_key.offset); 489 ret = 0; /* ignore the error */ 490 } 491 if (ret) 492 goto out; 493 next2: 494 ret = btrfs_next_item(quota_root, path); 495 if (ret < 0) 496 goto out; 497 if (ret) 498 break; 499 } 500 out: 501 btrfs_free_path(path); 502 fs_info->qgroup_flags |= flags; 503 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 504 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 505 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 506 ret >= 0) 507 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 508 509 if (ret < 0) { 510 ulist_free(fs_info->qgroup_ulist); 511 fs_info->qgroup_ulist = NULL; 512 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 513 btrfs_sysfs_del_qgroups(fs_info); 514 } 515 516 return ret < 0 ? ret : 0; 517 } 518 519 /* 520 * Called in close_ctree() when quota is still enabled. This verifies we don't 521 * leak some reserved space. 522 * 523 * Return false if no reserved space is left. 524 * Return true if some reserved space is leaked. 525 */ 526 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) 527 { 528 struct rb_node *node; 529 bool ret = false; 530 531 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 532 return ret; 533 /* 534 * Since we're unmounting, there is no race and no need to grab qgroup 535 * lock. And here we don't go post-order to provide a more user 536 * friendly sorted result. 537 */ 538 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 539 struct btrfs_qgroup *qgroup; 540 int i; 541 542 qgroup = rb_entry(node, struct btrfs_qgroup, node); 543 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 544 if (qgroup->rsv.values[i]) { 545 ret = true; 546 btrfs_warn(fs_info, 547 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 548 btrfs_qgroup_level(qgroup->qgroupid), 549 btrfs_qgroup_subvolid(qgroup->qgroupid), 550 i, qgroup->rsv.values[i]); 551 } 552 } 553 } 554 return ret; 555 } 556 557 /* 558 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 559 * first two are in single-threaded paths.And for the third one, we have set 560 * quota_root to be null with qgroup_lock held before, so it is safe to clean 561 * up the in-memory structures without qgroup_lock held. 562 */ 563 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 564 { 565 struct rb_node *n; 566 struct btrfs_qgroup *qgroup; 567 568 while ((n = rb_first(&fs_info->qgroup_tree))) { 569 qgroup = rb_entry(n, struct btrfs_qgroup, node); 570 rb_erase(n, &fs_info->qgroup_tree); 571 __del_qgroup_rb(fs_info, qgroup); 572 } 573 /* 574 * We call btrfs_free_qgroup_config() when unmounting 575 * filesystem and disabling quota, so we set qgroup_ulist 576 * to be null here to avoid double free. 577 */ 578 ulist_free(fs_info->qgroup_ulist); 579 fs_info->qgroup_ulist = NULL; 580 btrfs_sysfs_del_qgroups(fs_info); 581 } 582 583 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 584 u64 dst) 585 { 586 int ret; 587 struct btrfs_root *quota_root = trans->fs_info->quota_root; 588 struct btrfs_path *path; 589 struct btrfs_key key; 590 591 path = btrfs_alloc_path(); 592 if (!path) 593 return -ENOMEM; 594 595 key.objectid = src; 596 key.type = BTRFS_QGROUP_RELATION_KEY; 597 key.offset = dst; 598 599 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 600 601 btrfs_mark_buffer_dirty(path->nodes[0]); 602 603 btrfs_free_path(path); 604 return ret; 605 } 606 607 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 608 u64 dst) 609 { 610 int ret; 611 struct btrfs_root *quota_root = trans->fs_info->quota_root; 612 struct btrfs_path *path; 613 struct btrfs_key key; 614 615 path = btrfs_alloc_path(); 616 if (!path) 617 return -ENOMEM; 618 619 key.objectid = src; 620 key.type = BTRFS_QGROUP_RELATION_KEY; 621 key.offset = dst; 622 623 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 624 if (ret < 0) 625 goto out; 626 627 if (ret > 0) { 628 ret = -ENOENT; 629 goto out; 630 } 631 632 ret = btrfs_del_item(trans, quota_root, path); 633 out: 634 btrfs_free_path(path); 635 return ret; 636 } 637 638 static int add_qgroup_item(struct btrfs_trans_handle *trans, 639 struct btrfs_root *quota_root, u64 qgroupid) 640 { 641 int ret; 642 struct btrfs_path *path; 643 struct btrfs_qgroup_info_item *qgroup_info; 644 struct btrfs_qgroup_limit_item *qgroup_limit; 645 struct extent_buffer *leaf; 646 struct btrfs_key key; 647 648 if (btrfs_is_testing(quota_root->fs_info)) 649 return 0; 650 651 path = btrfs_alloc_path(); 652 if (!path) 653 return -ENOMEM; 654 655 key.objectid = 0; 656 key.type = BTRFS_QGROUP_INFO_KEY; 657 key.offset = qgroupid; 658 659 /* 660 * Avoid a transaction abort by catching -EEXIST here. In that 661 * case, we proceed by re-initializing the existing structure 662 * on disk. 663 */ 664 665 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 666 sizeof(*qgroup_info)); 667 if (ret && ret != -EEXIST) 668 goto out; 669 670 leaf = path->nodes[0]; 671 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 672 struct btrfs_qgroup_info_item); 673 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 674 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 675 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 676 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 677 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 678 679 btrfs_mark_buffer_dirty(leaf); 680 681 btrfs_release_path(path); 682 683 key.type = BTRFS_QGROUP_LIMIT_KEY; 684 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 685 sizeof(*qgroup_limit)); 686 if (ret && ret != -EEXIST) 687 goto out; 688 689 leaf = path->nodes[0]; 690 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 691 struct btrfs_qgroup_limit_item); 692 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 693 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 694 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 695 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 696 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 697 698 btrfs_mark_buffer_dirty(leaf); 699 700 ret = 0; 701 out: 702 btrfs_free_path(path); 703 return ret; 704 } 705 706 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 707 { 708 int ret; 709 struct btrfs_root *quota_root = trans->fs_info->quota_root; 710 struct btrfs_path *path; 711 struct btrfs_key key; 712 713 path = btrfs_alloc_path(); 714 if (!path) 715 return -ENOMEM; 716 717 key.objectid = 0; 718 key.type = BTRFS_QGROUP_INFO_KEY; 719 key.offset = qgroupid; 720 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 721 if (ret < 0) 722 goto out; 723 724 if (ret > 0) { 725 ret = -ENOENT; 726 goto out; 727 } 728 729 ret = btrfs_del_item(trans, quota_root, path); 730 if (ret) 731 goto out; 732 733 btrfs_release_path(path); 734 735 key.type = BTRFS_QGROUP_LIMIT_KEY; 736 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 737 if (ret < 0) 738 goto out; 739 740 if (ret > 0) { 741 ret = -ENOENT; 742 goto out; 743 } 744 745 ret = btrfs_del_item(trans, quota_root, path); 746 747 out: 748 btrfs_free_path(path); 749 return ret; 750 } 751 752 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 753 struct btrfs_qgroup *qgroup) 754 { 755 struct btrfs_root *quota_root = trans->fs_info->quota_root; 756 struct btrfs_path *path; 757 struct btrfs_key key; 758 struct extent_buffer *l; 759 struct btrfs_qgroup_limit_item *qgroup_limit; 760 int ret; 761 int slot; 762 763 key.objectid = 0; 764 key.type = BTRFS_QGROUP_LIMIT_KEY; 765 key.offset = qgroup->qgroupid; 766 767 path = btrfs_alloc_path(); 768 if (!path) 769 return -ENOMEM; 770 771 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 772 if (ret > 0) 773 ret = -ENOENT; 774 775 if (ret) 776 goto out; 777 778 l = path->nodes[0]; 779 slot = path->slots[0]; 780 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 781 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 782 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 783 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 784 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 785 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 786 787 btrfs_mark_buffer_dirty(l); 788 789 out: 790 btrfs_free_path(path); 791 return ret; 792 } 793 794 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 795 struct btrfs_qgroup *qgroup) 796 { 797 struct btrfs_fs_info *fs_info = trans->fs_info; 798 struct btrfs_root *quota_root = fs_info->quota_root; 799 struct btrfs_path *path; 800 struct btrfs_key key; 801 struct extent_buffer *l; 802 struct btrfs_qgroup_info_item *qgroup_info; 803 int ret; 804 int slot; 805 806 if (btrfs_is_testing(fs_info)) 807 return 0; 808 809 key.objectid = 0; 810 key.type = BTRFS_QGROUP_INFO_KEY; 811 key.offset = qgroup->qgroupid; 812 813 path = btrfs_alloc_path(); 814 if (!path) 815 return -ENOMEM; 816 817 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 818 if (ret > 0) 819 ret = -ENOENT; 820 821 if (ret) 822 goto out; 823 824 l = path->nodes[0]; 825 slot = path->slots[0]; 826 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 827 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 828 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 829 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 830 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 831 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 832 833 btrfs_mark_buffer_dirty(l); 834 835 out: 836 btrfs_free_path(path); 837 return ret; 838 } 839 840 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 841 { 842 struct btrfs_fs_info *fs_info = trans->fs_info; 843 struct btrfs_root *quota_root = fs_info->quota_root; 844 struct btrfs_path *path; 845 struct btrfs_key key; 846 struct extent_buffer *l; 847 struct btrfs_qgroup_status_item *ptr; 848 int ret; 849 int slot; 850 851 key.objectid = 0; 852 key.type = BTRFS_QGROUP_STATUS_KEY; 853 key.offset = 0; 854 855 path = btrfs_alloc_path(); 856 if (!path) 857 return -ENOMEM; 858 859 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 860 if (ret > 0) 861 ret = -ENOENT; 862 863 if (ret) 864 goto out; 865 866 l = path->nodes[0]; 867 slot = path->slots[0]; 868 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 869 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); 870 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 871 btrfs_set_qgroup_status_rescan(l, ptr, 872 fs_info->qgroup_rescan_progress.objectid); 873 874 btrfs_mark_buffer_dirty(l); 875 876 out: 877 btrfs_free_path(path); 878 return ret; 879 } 880 881 /* 882 * called with qgroup_lock held 883 */ 884 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 885 struct btrfs_root *root) 886 { 887 struct btrfs_path *path; 888 struct btrfs_key key; 889 struct extent_buffer *leaf = NULL; 890 int ret; 891 int nr = 0; 892 893 path = btrfs_alloc_path(); 894 if (!path) 895 return -ENOMEM; 896 897 key.objectid = 0; 898 key.offset = 0; 899 key.type = 0; 900 901 while (1) { 902 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 903 if (ret < 0) 904 goto out; 905 leaf = path->nodes[0]; 906 nr = btrfs_header_nritems(leaf); 907 if (!nr) 908 break; 909 /* 910 * delete the leaf one by one 911 * since the whole tree is going 912 * to be deleted. 913 */ 914 path->slots[0] = 0; 915 ret = btrfs_del_items(trans, root, path, 0, nr); 916 if (ret) 917 goto out; 918 919 btrfs_release_path(path); 920 } 921 ret = 0; 922 out: 923 btrfs_free_path(path); 924 return ret; 925 } 926 927 int btrfs_quota_enable(struct btrfs_fs_info *fs_info) 928 { 929 struct btrfs_root *quota_root; 930 struct btrfs_root *tree_root = fs_info->tree_root; 931 struct btrfs_path *path = NULL; 932 struct btrfs_qgroup_status_item *ptr; 933 struct extent_buffer *leaf; 934 struct btrfs_key key; 935 struct btrfs_key found_key; 936 struct btrfs_qgroup *qgroup = NULL; 937 struct btrfs_trans_handle *trans = NULL; 938 struct ulist *ulist = NULL; 939 int ret = 0; 940 int slot; 941 942 mutex_lock(&fs_info->qgroup_ioctl_lock); 943 if (fs_info->quota_root) 944 goto out; 945 946 ulist = ulist_alloc(GFP_KERNEL); 947 if (!ulist) { 948 ret = -ENOMEM; 949 goto out; 950 } 951 952 ret = btrfs_sysfs_add_qgroups(fs_info); 953 if (ret < 0) 954 goto out; 955 956 /* 957 * Unlock qgroup_ioctl_lock before starting the transaction. This is to 958 * avoid lock acquisition inversion problems (reported by lockdep) between 959 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we 960 * start a transaction. 961 * After we started the transaction lock qgroup_ioctl_lock again and 962 * check if someone else created the quota root in the meanwhile. If so, 963 * just return success and release the transaction handle. 964 * 965 * Also we don't need to worry about someone else calling 966 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because 967 * that function returns 0 (success) when the sysfs entries already exist. 968 */ 969 mutex_unlock(&fs_info->qgroup_ioctl_lock); 970 971 /* 972 * 1 for quota root item 973 * 1 for BTRFS_QGROUP_STATUS item 974 * 975 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 976 * per subvolume. However those are not currently reserved since it 977 * would be a lot of overkill. 978 */ 979 trans = btrfs_start_transaction(tree_root, 2); 980 981 mutex_lock(&fs_info->qgroup_ioctl_lock); 982 if (IS_ERR(trans)) { 983 ret = PTR_ERR(trans); 984 trans = NULL; 985 goto out; 986 } 987 988 if (fs_info->quota_root) 989 goto out; 990 991 fs_info->qgroup_ulist = ulist; 992 ulist = NULL; 993 994 /* 995 * initially create the quota tree 996 */ 997 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 998 if (IS_ERR(quota_root)) { 999 ret = PTR_ERR(quota_root); 1000 btrfs_abort_transaction(trans, ret); 1001 goto out; 1002 } 1003 1004 path = btrfs_alloc_path(); 1005 if (!path) { 1006 ret = -ENOMEM; 1007 btrfs_abort_transaction(trans, ret); 1008 goto out_free_root; 1009 } 1010 1011 key.objectid = 0; 1012 key.type = BTRFS_QGROUP_STATUS_KEY; 1013 key.offset = 0; 1014 1015 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 1016 sizeof(*ptr)); 1017 if (ret) { 1018 btrfs_abort_transaction(trans, ret); 1019 goto out_free_path; 1020 } 1021 1022 leaf = path->nodes[0]; 1023 ptr = btrfs_item_ptr(leaf, path->slots[0], 1024 struct btrfs_qgroup_status_item); 1025 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1026 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1027 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 1028 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1029 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); 1030 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1031 1032 btrfs_mark_buffer_dirty(leaf); 1033 1034 key.objectid = 0; 1035 key.type = BTRFS_ROOT_REF_KEY; 1036 key.offset = 0; 1037 1038 btrfs_release_path(path); 1039 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1040 if (ret > 0) 1041 goto out_add_root; 1042 if (ret < 0) { 1043 btrfs_abort_transaction(trans, ret); 1044 goto out_free_path; 1045 } 1046 1047 while (1) { 1048 slot = path->slots[0]; 1049 leaf = path->nodes[0]; 1050 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1051 1052 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1053 1054 /* Release locks on tree_root before we access quota_root */ 1055 btrfs_release_path(path); 1056 1057 ret = add_qgroup_item(trans, quota_root, 1058 found_key.offset); 1059 if (ret) { 1060 btrfs_abort_transaction(trans, ret); 1061 goto out_free_path; 1062 } 1063 1064 qgroup = add_qgroup_rb(fs_info, found_key.offset); 1065 if (IS_ERR(qgroup)) { 1066 ret = PTR_ERR(qgroup); 1067 btrfs_abort_transaction(trans, ret); 1068 goto out_free_path; 1069 } 1070 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1071 if (ret < 0) { 1072 btrfs_abort_transaction(trans, ret); 1073 goto out_free_path; 1074 } 1075 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1076 path, 1, 0); 1077 if (ret < 0) { 1078 btrfs_abort_transaction(trans, ret); 1079 goto out_free_path; 1080 } 1081 if (ret > 0) { 1082 /* 1083 * Shouldn't happen, but in case it does we 1084 * don't need to do the btrfs_next_item, just 1085 * continue. 1086 */ 1087 continue; 1088 } 1089 } 1090 ret = btrfs_next_item(tree_root, path); 1091 if (ret < 0) { 1092 btrfs_abort_transaction(trans, ret); 1093 goto out_free_path; 1094 } 1095 if (ret) 1096 break; 1097 } 1098 1099 out_add_root: 1100 btrfs_release_path(path); 1101 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1102 if (ret) { 1103 btrfs_abort_transaction(trans, ret); 1104 goto out_free_path; 1105 } 1106 1107 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); 1108 if (IS_ERR(qgroup)) { 1109 ret = PTR_ERR(qgroup); 1110 btrfs_abort_transaction(trans, ret); 1111 goto out_free_path; 1112 } 1113 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1114 if (ret < 0) { 1115 btrfs_abort_transaction(trans, ret); 1116 goto out_free_path; 1117 } 1118 1119 ret = btrfs_commit_transaction(trans); 1120 trans = NULL; 1121 if (ret) 1122 goto out_free_path; 1123 1124 /* 1125 * Set quota enabled flag after committing the transaction, to avoid 1126 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1127 * creation. 1128 */ 1129 spin_lock(&fs_info->qgroup_lock); 1130 fs_info->quota_root = quota_root; 1131 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1132 spin_unlock(&fs_info->qgroup_lock); 1133 1134 ret = qgroup_rescan_init(fs_info, 0, 1); 1135 if (!ret) { 1136 qgroup_rescan_zero_tracking(fs_info); 1137 fs_info->qgroup_rescan_running = true; 1138 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1139 &fs_info->qgroup_rescan_work); 1140 } 1141 1142 out_free_path: 1143 btrfs_free_path(path); 1144 out_free_root: 1145 if (ret) 1146 btrfs_put_root(quota_root); 1147 out: 1148 if (ret) { 1149 ulist_free(fs_info->qgroup_ulist); 1150 fs_info->qgroup_ulist = NULL; 1151 btrfs_sysfs_del_qgroups(fs_info); 1152 } 1153 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1154 if (ret && trans) 1155 btrfs_end_transaction(trans); 1156 else if (trans) 1157 ret = btrfs_end_transaction(trans); 1158 ulist_free(ulist); 1159 return ret; 1160 } 1161 1162 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1163 { 1164 struct btrfs_root *quota_root; 1165 struct btrfs_trans_handle *trans = NULL; 1166 int ret = 0; 1167 1168 mutex_lock(&fs_info->qgroup_ioctl_lock); 1169 if (!fs_info->quota_root) 1170 goto out; 1171 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1172 1173 /* 1174 * 1 For the root item 1175 * 1176 * We should also reserve enough items for the quota tree deletion in 1177 * btrfs_clean_quota_tree but this is not done. 1178 * 1179 * Also, we must always start a transaction without holding the mutex 1180 * qgroup_ioctl_lock, see btrfs_quota_enable(). 1181 */ 1182 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1183 1184 mutex_lock(&fs_info->qgroup_ioctl_lock); 1185 if (IS_ERR(trans)) { 1186 ret = PTR_ERR(trans); 1187 trans = NULL; 1188 goto out; 1189 } 1190 1191 if (!fs_info->quota_root) 1192 goto out; 1193 1194 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1195 btrfs_qgroup_wait_for_completion(fs_info, false); 1196 spin_lock(&fs_info->qgroup_lock); 1197 quota_root = fs_info->quota_root; 1198 fs_info->quota_root = NULL; 1199 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1200 spin_unlock(&fs_info->qgroup_lock); 1201 1202 btrfs_free_qgroup_config(fs_info); 1203 1204 ret = btrfs_clean_quota_tree(trans, quota_root); 1205 if (ret) { 1206 btrfs_abort_transaction(trans, ret); 1207 goto out; 1208 } 1209 1210 ret = btrfs_del_root(trans, "a_root->root_key); 1211 if (ret) { 1212 btrfs_abort_transaction(trans, ret); 1213 goto out; 1214 } 1215 1216 list_del("a_root->dirty_list); 1217 1218 btrfs_tree_lock(quota_root->node); 1219 btrfs_clean_tree_block(quota_root->node); 1220 btrfs_tree_unlock(quota_root->node); 1221 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 1222 1223 btrfs_put_root(quota_root); 1224 1225 out: 1226 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1227 if (ret && trans) 1228 btrfs_end_transaction(trans); 1229 else if (trans) 1230 ret = btrfs_end_transaction(trans); 1231 1232 return ret; 1233 } 1234 1235 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1236 struct btrfs_qgroup *qgroup) 1237 { 1238 if (list_empty(&qgroup->dirty)) 1239 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1240 } 1241 1242 /* 1243 * The easy accounting, we're updating qgroup relationship whose child qgroup 1244 * only has exclusive extents. 1245 * 1246 * In this case, all exclusive extents will also be exclusive for parent, so 1247 * excl/rfer just get added/removed. 1248 * 1249 * So is qgroup reservation space, which should also be added/removed to 1250 * parent. 1251 * Or when child tries to release reservation space, parent will underflow its 1252 * reservation (for relationship adding case). 1253 * 1254 * Caller should hold fs_info->qgroup_lock. 1255 */ 1256 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1257 struct ulist *tmp, u64 ref_root, 1258 struct btrfs_qgroup *src, int sign) 1259 { 1260 struct btrfs_qgroup *qgroup; 1261 struct btrfs_qgroup_list *glist; 1262 struct ulist_node *unode; 1263 struct ulist_iterator uiter; 1264 u64 num_bytes = src->excl; 1265 int ret = 0; 1266 1267 qgroup = find_qgroup_rb(fs_info, ref_root); 1268 if (!qgroup) 1269 goto out; 1270 1271 qgroup->rfer += sign * num_bytes; 1272 qgroup->rfer_cmpr += sign * num_bytes; 1273 1274 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1275 qgroup->excl += sign * num_bytes; 1276 qgroup->excl_cmpr += sign * num_bytes; 1277 1278 if (sign > 0) 1279 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1280 else 1281 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1282 1283 qgroup_dirty(fs_info, qgroup); 1284 1285 /* Get all of the parent groups that contain this qgroup */ 1286 list_for_each_entry(glist, &qgroup->groups, next_group) { 1287 ret = ulist_add(tmp, glist->group->qgroupid, 1288 qgroup_to_aux(glist->group), GFP_ATOMIC); 1289 if (ret < 0) 1290 goto out; 1291 } 1292 1293 /* Iterate all of the parents and adjust their reference counts */ 1294 ULIST_ITER_INIT(&uiter); 1295 while ((unode = ulist_next(tmp, &uiter))) { 1296 qgroup = unode_aux_to_qgroup(unode); 1297 qgroup->rfer += sign * num_bytes; 1298 qgroup->rfer_cmpr += sign * num_bytes; 1299 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1300 qgroup->excl += sign * num_bytes; 1301 if (sign > 0) 1302 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1303 else 1304 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1305 qgroup->excl_cmpr += sign * num_bytes; 1306 qgroup_dirty(fs_info, qgroup); 1307 1308 /* Add any parents of the parents */ 1309 list_for_each_entry(glist, &qgroup->groups, next_group) { 1310 ret = ulist_add(tmp, glist->group->qgroupid, 1311 qgroup_to_aux(glist->group), GFP_ATOMIC); 1312 if (ret < 0) 1313 goto out; 1314 } 1315 } 1316 ret = 0; 1317 out: 1318 return ret; 1319 } 1320 1321 1322 /* 1323 * Quick path for updating qgroup with only excl refs. 1324 * 1325 * In that case, just update all parent will be enough. 1326 * Or we needs to do a full rescan. 1327 * Caller should also hold fs_info->qgroup_lock. 1328 * 1329 * Return 0 for quick update, return >0 for need to full rescan 1330 * and mark INCONSISTENT flag. 1331 * Return < 0 for other error. 1332 */ 1333 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1334 struct ulist *tmp, u64 src, u64 dst, 1335 int sign) 1336 { 1337 struct btrfs_qgroup *qgroup; 1338 int ret = 1; 1339 int err = 0; 1340 1341 qgroup = find_qgroup_rb(fs_info, src); 1342 if (!qgroup) 1343 goto out; 1344 if (qgroup->excl == qgroup->rfer) { 1345 ret = 0; 1346 err = __qgroup_excl_accounting(fs_info, tmp, dst, 1347 qgroup, sign); 1348 if (err < 0) { 1349 ret = err; 1350 goto out; 1351 } 1352 } 1353 out: 1354 if (ret) 1355 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1356 return ret; 1357 } 1358 1359 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1360 u64 dst) 1361 { 1362 struct btrfs_fs_info *fs_info = trans->fs_info; 1363 struct btrfs_qgroup *parent; 1364 struct btrfs_qgroup *member; 1365 struct btrfs_qgroup_list *list; 1366 struct ulist *tmp; 1367 unsigned int nofs_flag; 1368 int ret = 0; 1369 1370 /* Check the level of src and dst first */ 1371 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1372 return -EINVAL; 1373 1374 /* We hold a transaction handle open, must do a NOFS allocation. */ 1375 nofs_flag = memalloc_nofs_save(); 1376 tmp = ulist_alloc(GFP_KERNEL); 1377 memalloc_nofs_restore(nofs_flag); 1378 if (!tmp) 1379 return -ENOMEM; 1380 1381 mutex_lock(&fs_info->qgroup_ioctl_lock); 1382 if (!fs_info->quota_root) { 1383 ret = -ENOTCONN; 1384 goto out; 1385 } 1386 member = find_qgroup_rb(fs_info, src); 1387 parent = find_qgroup_rb(fs_info, dst); 1388 if (!member || !parent) { 1389 ret = -EINVAL; 1390 goto out; 1391 } 1392 1393 /* check if such qgroup relation exist firstly */ 1394 list_for_each_entry(list, &member->groups, next_group) { 1395 if (list->group == parent) { 1396 ret = -EEXIST; 1397 goto out; 1398 } 1399 } 1400 1401 ret = add_qgroup_relation_item(trans, src, dst); 1402 if (ret) 1403 goto out; 1404 1405 ret = add_qgroup_relation_item(trans, dst, src); 1406 if (ret) { 1407 del_qgroup_relation_item(trans, src, dst); 1408 goto out; 1409 } 1410 1411 spin_lock(&fs_info->qgroup_lock); 1412 ret = add_relation_rb(fs_info, src, dst); 1413 if (ret < 0) { 1414 spin_unlock(&fs_info->qgroup_lock); 1415 goto out; 1416 } 1417 ret = quick_update_accounting(fs_info, tmp, src, dst, 1); 1418 spin_unlock(&fs_info->qgroup_lock); 1419 out: 1420 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1421 ulist_free(tmp); 1422 return ret; 1423 } 1424 1425 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1426 u64 dst) 1427 { 1428 struct btrfs_fs_info *fs_info = trans->fs_info; 1429 struct btrfs_qgroup *parent; 1430 struct btrfs_qgroup *member; 1431 struct btrfs_qgroup_list *list; 1432 struct ulist *tmp; 1433 bool found = false; 1434 unsigned int nofs_flag; 1435 int ret = 0; 1436 int ret2; 1437 1438 /* We hold a transaction handle open, must do a NOFS allocation. */ 1439 nofs_flag = memalloc_nofs_save(); 1440 tmp = ulist_alloc(GFP_KERNEL); 1441 memalloc_nofs_restore(nofs_flag); 1442 if (!tmp) 1443 return -ENOMEM; 1444 1445 if (!fs_info->quota_root) { 1446 ret = -ENOTCONN; 1447 goto out; 1448 } 1449 1450 member = find_qgroup_rb(fs_info, src); 1451 parent = find_qgroup_rb(fs_info, dst); 1452 /* 1453 * The parent/member pair doesn't exist, then try to delete the dead 1454 * relation items only. 1455 */ 1456 if (!member || !parent) 1457 goto delete_item; 1458 1459 /* check if such qgroup relation exist firstly */ 1460 list_for_each_entry(list, &member->groups, next_group) { 1461 if (list->group == parent) { 1462 found = true; 1463 break; 1464 } 1465 } 1466 1467 delete_item: 1468 ret = del_qgroup_relation_item(trans, src, dst); 1469 if (ret < 0 && ret != -ENOENT) 1470 goto out; 1471 ret2 = del_qgroup_relation_item(trans, dst, src); 1472 if (ret2 < 0 && ret2 != -ENOENT) 1473 goto out; 1474 1475 /* At least one deletion succeeded, return 0 */ 1476 if (!ret || !ret2) 1477 ret = 0; 1478 1479 if (found) { 1480 spin_lock(&fs_info->qgroup_lock); 1481 del_relation_rb(fs_info, src, dst); 1482 ret = quick_update_accounting(fs_info, tmp, src, dst, -1); 1483 spin_unlock(&fs_info->qgroup_lock); 1484 } 1485 out: 1486 ulist_free(tmp); 1487 return ret; 1488 } 1489 1490 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1491 u64 dst) 1492 { 1493 struct btrfs_fs_info *fs_info = trans->fs_info; 1494 int ret = 0; 1495 1496 mutex_lock(&fs_info->qgroup_ioctl_lock); 1497 ret = __del_qgroup_relation(trans, src, dst); 1498 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1499 1500 return ret; 1501 } 1502 1503 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1504 { 1505 struct btrfs_fs_info *fs_info = trans->fs_info; 1506 struct btrfs_root *quota_root; 1507 struct btrfs_qgroup *qgroup; 1508 int ret = 0; 1509 1510 mutex_lock(&fs_info->qgroup_ioctl_lock); 1511 if (!fs_info->quota_root) { 1512 ret = -ENOTCONN; 1513 goto out; 1514 } 1515 quota_root = fs_info->quota_root; 1516 qgroup = find_qgroup_rb(fs_info, qgroupid); 1517 if (qgroup) { 1518 ret = -EEXIST; 1519 goto out; 1520 } 1521 1522 ret = add_qgroup_item(trans, quota_root, qgroupid); 1523 if (ret) 1524 goto out; 1525 1526 spin_lock(&fs_info->qgroup_lock); 1527 qgroup = add_qgroup_rb(fs_info, qgroupid); 1528 spin_unlock(&fs_info->qgroup_lock); 1529 1530 if (IS_ERR(qgroup)) { 1531 ret = PTR_ERR(qgroup); 1532 goto out; 1533 } 1534 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1535 out: 1536 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1537 return ret; 1538 } 1539 1540 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1541 { 1542 struct btrfs_fs_info *fs_info = trans->fs_info; 1543 struct btrfs_qgroup *qgroup; 1544 struct btrfs_qgroup_list *list; 1545 int ret = 0; 1546 1547 mutex_lock(&fs_info->qgroup_ioctl_lock); 1548 if (!fs_info->quota_root) { 1549 ret = -ENOTCONN; 1550 goto out; 1551 } 1552 1553 qgroup = find_qgroup_rb(fs_info, qgroupid); 1554 if (!qgroup) { 1555 ret = -ENOENT; 1556 goto out; 1557 } 1558 1559 /* Check if there are no children of this qgroup */ 1560 if (!list_empty(&qgroup->members)) { 1561 ret = -EBUSY; 1562 goto out; 1563 } 1564 1565 ret = del_qgroup_item(trans, qgroupid); 1566 if (ret && ret != -ENOENT) 1567 goto out; 1568 1569 while (!list_empty(&qgroup->groups)) { 1570 list = list_first_entry(&qgroup->groups, 1571 struct btrfs_qgroup_list, next_group); 1572 ret = __del_qgroup_relation(trans, qgroupid, 1573 list->group->qgroupid); 1574 if (ret) 1575 goto out; 1576 } 1577 1578 spin_lock(&fs_info->qgroup_lock); 1579 del_qgroup_rb(fs_info, qgroupid); 1580 spin_unlock(&fs_info->qgroup_lock); 1581 out: 1582 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1583 return ret; 1584 } 1585 1586 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1587 struct btrfs_qgroup_limit *limit) 1588 { 1589 struct btrfs_fs_info *fs_info = trans->fs_info; 1590 struct btrfs_qgroup *qgroup; 1591 int ret = 0; 1592 /* Sometimes we would want to clear the limit on this qgroup. 1593 * To meet this requirement, we treat the -1 as a special value 1594 * which tell kernel to clear the limit on this qgroup. 1595 */ 1596 const u64 CLEAR_VALUE = -1; 1597 1598 mutex_lock(&fs_info->qgroup_ioctl_lock); 1599 if (!fs_info->quota_root) { 1600 ret = -ENOTCONN; 1601 goto out; 1602 } 1603 1604 qgroup = find_qgroup_rb(fs_info, qgroupid); 1605 if (!qgroup) { 1606 ret = -ENOENT; 1607 goto out; 1608 } 1609 1610 spin_lock(&fs_info->qgroup_lock); 1611 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1612 if (limit->max_rfer == CLEAR_VALUE) { 1613 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1614 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1615 qgroup->max_rfer = 0; 1616 } else { 1617 qgroup->max_rfer = limit->max_rfer; 1618 } 1619 } 1620 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1621 if (limit->max_excl == CLEAR_VALUE) { 1622 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1623 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1624 qgroup->max_excl = 0; 1625 } else { 1626 qgroup->max_excl = limit->max_excl; 1627 } 1628 } 1629 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1630 if (limit->rsv_rfer == CLEAR_VALUE) { 1631 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1632 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1633 qgroup->rsv_rfer = 0; 1634 } else { 1635 qgroup->rsv_rfer = limit->rsv_rfer; 1636 } 1637 } 1638 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1639 if (limit->rsv_excl == CLEAR_VALUE) { 1640 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1641 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1642 qgroup->rsv_excl = 0; 1643 } else { 1644 qgroup->rsv_excl = limit->rsv_excl; 1645 } 1646 } 1647 qgroup->lim_flags |= limit->flags; 1648 1649 spin_unlock(&fs_info->qgroup_lock); 1650 1651 ret = update_qgroup_limit_item(trans, qgroup); 1652 if (ret) { 1653 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1654 btrfs_info(fs_info, "unable to update quota limit for %llu", 1655 qgroupid); 1656 } 1657 1658 out: 1659 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1660 return ret; 1661 } 1662 1663 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 1664 struct btrfs_delayed_ref_root *delayed_refs, 1665 struct btrfs_qgroup_extent_record *record) 1666 { 1667 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1668 struct rb_node *parent_node = NULL; 1669 struct btrfs_qgroup_extent_record *entry; 1670 u64 bytenr = record->bytenr; 1671 1672 lockdep_assert_held(&delayed_refs->lock); 1673 trace_btrfs_qgroup_trace_extent(fs_info, record); 1674 1675 while (*p) { 1676 parent_node = *p; 1677 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1678 node); 1679 if (bytenr < entry->bytenr) { 1680 p = &(*p)->rb_left; 1681 } else if (bytenr > entry->bytenr) { 1682 p = &(*p)->rb_right; 1683 } else { 1684 if (record->data_rsv && !entry->data_rsv) { 1685 entry->data_rsv = record->data_rsv; 1686 entry->data_rsv_refroot = 1687 record->data_rsv_refroot; 1688 } 1689 return 1; 1690 } 1691 } 1692 1693 rb_link_node(&record->node, parent_node, p); 1694 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1695 return 0; 1696 } 1697 1698 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, 1699 struct btrfs_qgroup_extent_record *qrecord) 1700 { 1701 struct ulist *old_root; 1702 u64 bytenr = qrecord->bytenr; 1703 int ret; 1704 1705 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); 1706 if (ret < 0) { 1707 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1708 btrfs_warn(fs_info, 1709 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 1710 ret); 1711 return 0; 1712 } 1713 1714 /* 1715 * Here we don't need to get the lock of 1716 * trans->transaction->delayed_refs, since inserted qrecord won't 1717 * be deleted, only qrecord->node may be modified (new qrecord insert) 1718 * 1719 * So modifying qrecord->old_roots is safe here 1720 */ 1721 qrecord->old_roots = old_root; 1722 return 0; 1723 } 1724 1725 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 1726 u64 num_bytes, gfp_t gfp_flag) 1727 { 1728 struct btrfs_fs_info *fs_info = trans->fs_info; 1729 struct btrfs_qgroup_extent_record *record; 1730 struct btrfs_delayed_ref_root *delayed_refs; 1731 int ret; 1732 1733 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) 1734 || bytenr == 0 || num_bytes == 0) 1735 return 0; 1736 record = kzalloc(sizeof(*record), gfp_flag); 1737 if (!record) 1738 return -ENOMEM; 1739 1740 delayed_refs = &trans->transaction->delayed_refs; 1741 record->bytenr = bytenr; 1742 record->num_bytes = num_bytes; 1743 record->old_roots = NULL; 1744 1745 spin_lock(&delayed_refs->lock); 1746 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 1747 spin_unlock(&delayed_refs->lock); 1748 if (ret > 0) { 1749 kfree(record); 1750 return 0; 1751 } 1752 return btrfs_qgroup_trace_extent_post(fs_info, record); 1753 } 1754 1755 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 1756 struct extent_buffer *eb) 1757 { 1758 struct btrfs_fs_info *fs_info = trans->fs_info; 1759 int nr = btrfs_header_nritems(eb); 1760 int i, extent_type, ret; 1761 struct btrfs_key key; 1762 struct btrfs_file_extent_item *fi; 1763 u64 bytenr, num_bytes; 1764 1765 /* We can be called directly from walk_up_proc() */ 1766 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 1767 return 0; 1768 1769 for (i = 0; i < nr; i++) { 1770 btrfs_item_key_to_cpu(eb, &key, i); 1771 1772 if (key.type != BTRFS_EXTENT_DATA_KEY) 1773 continue; 1774 1775 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 1776 /* filter out non qgroup-accountable extents */ 1777 extent_type = btrfs_file_extent_type(eb, fi); 1778 1779 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 1780 continue; 1781 1782 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 1783 if (!bytenr) 1784 continue; 1785 1786 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 1787 1788 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes, 1789 GFP_NOFS); 1790 if (ret) 1791 return ret; 1792 } 1793 cond_resched(); 1794 return 0; 1795 } 1796 1797 /* 1798 * Walk up the tree from the bottom, freeing leaves and any interior 1799 * nodes which have had all slots visited. If a node (leaf or 1800 * interior) is freed, the node above it will have it's slot 1801 * incremented. The root node will never be freed. 1802 * 1803 * At the end of this function, we should have a path which has all 1804 * slots incremented to the next position for a search. If we need to 1805 * read a new node it will be NULL and the node above it will have the 1806 * correct slot selected for a later read. 1807 * 1808 * If we increment the root nodes slot counter past the number of 1809 * elements, 1 is returned to signal completion of the search. 1810 */ 1811 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 1812 { 1813 int level = 0; 1814 int nr, slot; 1815 struct extent_buffer *eb; 1816 1817 if (root_level == 0) 1818 return 1; 1819 1820 while (level <= root_level) { 1821 eb = path->nodes[level]; 1822 nr = btrfs_header_nritems(eb); 1823 path->slots[level]++; 1824 slot = path->slots[level]; 1825 if (slot >= nr || level == 0) { 1826 /* 1827 * Don't free the root - we will detect this 1828 * condition after our loop and return a 1829 * positive value for caller to stop walking the tree. 1830 */ 1831 if (level != root_level) { 1832 btrfs_tree_unlock_rw(eb, path->locks[level]); 1833 path->locks[level] = 0; 1834 1835 free_extent_buffer(eb); 1836 path->nodes[level] = NULL; 1837 path->slots[level] = 0; 1838 } 1839 } else { 1840 /* 1841 * We have a valid slot to walk back down 1842 * from. Stop here so caller can process these 1843 * new nodes. 1844 */ 1845 break; 1846 } 1847 1848 level++; 1849 } 1850 1851 eb = path->nodes[root_level]; 1852 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 1853 return 1; 1854 1855 return 0; 1856 } 1857 1858 /* 1859 * Helper function to trace a subtree tree block swap. 1860 * 1861 * The swap will happen in highest tree block, but there may be a lot of 1862 * tree blocks involved. 1863 * 1864 * For example: 1865 * OO = Old tree blocks 1866 * NN = New tree blocks allocated during balance 1867 * 1868 * File tree (257) Reloc tree for 257 1869 * L2 OO NN 1870 * / \ / \ 1871 * L1 OO OO (a) OO NN (a) 1872 * / \ / \ / \ / \ 1873 * L0 OO OO OO OO OO OO NN NN 1874 * (b) (c) (b) (c) 1875 * 1876 * When calling qgroup_trace_extent_swap(), we will pass: 1877 * @src_eb = OO(a) 1878 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 1879 * @dst_level = 0 1880 * @root_level = 1 1881 * 1882 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 1883 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 1884 * 1885 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 1886 * 1887 * 1) Tree search from @src_eb 1888 * It should acts as a simplified btrfs_search_slot(). 1889 * The key for search can be extracted from @dst_path->nodes[dst_level] 1890 * (first key). 1891 * 1892 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 1893 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 1894 * They should be marked during previous (@dst_level = 1) iteration. 1895 * 1896 * 3) Mark file extents in leaves dirty 1897 * We don't have good way to pick out new file extents only. 1898 * So we still follow the old method by scanning all file extents in 1899 * the leave. 1900 * 1901 * This function can free us from keeping two paths, thus later we only need 1902 * to care about how to iterate all new tree blocks in reloc tree. 1903 */ 1904 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 1905 struct extent_buffer *src_eb, 1906 struct btrfs_path *dst_path, 1907 int dst_level, int root_level, 1908 bool trace_leaf) 1909 { 1910 struct btrfs_key key; 1911 struct btrfs_path *src_path; 1912 struct btrfs_fs_info *fs_info = trans->fs_info; 1913 u32 nodesize = fs_info->nodesize; 1914 int cur_level = root_level; 1915 int ret; 1916 1917 BUG_ON(dst_level > root_level); 1918 /* Level mismatch */ 1919 if (btrfs_header_level(src_eb) != root_level) 1920 return -EINVAL; 1921 1922 src_path = btrfs_alloc_path(); 1923 if (!src_path) { 1924 ret = -ENOMEM; 1925 goto out; 1926 } 1927 1928 if (dst_level) 1929 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 1930 else 1931 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 1932 1933 /* For src_path */ 1934 atomic_inc(&src_eb->refs); 1935 src_path->nodes[root_level] = src_eb; 1936 src_path->slots[root_level] = dst_path->slots[root_level]; 1937 src_path->locks[root_level] = 0; 1938 1939 /* A simplified version of btrfs_search_slot() */ 1940 while (cur_level >= dst_level) { 1941 struct btrfs_key src_key; 1942 struct btrfs_key dst_key; 1943 1944 if (src_path->nodes[cur_level] == NULL) { 1945 struct extent_buffer *eb; 1946 int parent_slot; 1947 1948 eb = src_path->nodes[cur_level + 1]; 1949 parent_slot = src_path->slots[cur_level + 1]; 1950 1951 eb = btrfs_read_node_slot(eb, parent_slot); 1952 if (IS_ERR(eb)) { 1953 ret = PTR_ERR(eb); 1954 goto out; 1955 } 1956 1957 src_path->nodes[cur_level] = eb; 1958 1959 btrfs_tree_read_lock(eb); 1960 src_path->locks[cur_level] = BTRFS_READ_LOCK; 1961 } 1962 1963 src_path->slots[cur_level] = dst_path->slots[cur_level]; 1964 if (cur_level) { 1965 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 1966 &dst_key, dst_path->slots[cur_level]); 1967 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 1968 &src_key, src_path->slots[cur_level]); 1969 } else { 1970 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 1971 &dst_key, dst_path->slots[cur_level]); 1972 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 1973 &src_key, src_path->slots[cur_level]); 1974 } 1975 /* Content mismatch, something went wrong */ 1976 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { 1977 ret = -ENOENT; 1978 goto out; 1979 } 1980 cur_level--; 1981 } 1982 1983 /* 1984 * Now both @dst_path and @src_path have been populated, record the tree 1985 * blocks for qgroup accounting. 1986 */ 1987 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 1988 nodesize, GFP_NOFS); 1989 if (ret < 0) 1990 goto out; 1991 ret = btrfs_qgroup_trace_extent(trans, 1992 dst_path->nodes[dst_level]->start, 1993 nodesize, GFP_NOFS); 1994 if (ret < 0) 1995 goto out; 1996 1997 /* Record leaf file extents */ 1998 if (dst_level == 0 && trace_leaf) { 1999 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 2000 if (ret < 0) 2001 goto out; 2002 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 2003 } 2004 out: 2005 btrfs_free_path(src_path); 2006 return ret; 2007 } 2008 2009 /* 2010 * Helper function to do recursive generation-aware depth-first search, to 2011 * locate all new tree blocks in a subtree of reloc tree. 2012 * 2013 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 2014 * reloc tree 2015 * L2 NN (a) 2016 * / \ 2017 * L1 OO NN (b) 2018 * / \ / \ 2019 * L0 OO OO OO NN 2020 * (c) (d) 2021 * If we pass: 2022 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 2023 * @cur_level = 1 2024 * @root_level = 1 2025 * 2026 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 2027 * above tree blocks along with their counter parts in file tree. 2028 * While during search, old tree blocks OO(c) will be skipped as tree block swap 2029 * won't affect OO(c). 2030 */ 2031 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 2032 struct extent_buffer *src_eb, 2033 struct btrfs_path *dst_path, 2034 int cur_level, int root_level, 2035 u64 last_snapshot, bool trace_leaf) 2036 { 2037 struct btrfs_fs_info *fs_info = trans->fs_info; 2038 struct extent_buffer *eb; 2039 bool need_cleanup = false; 2040 int ret = 0; 2041 int i; 2042 2043 /* Level sanity check */ 2044 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2045 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2046 root_level < cur_level) { 2047 btrfs_err_rl(fs_info, 2048 "%s: bad levels, cur_level=%d root_level=%d", 2049 __func__, cur_level, root_level); 2050 return -EUCLEAN; 2051 } 2052 2053 /* Read the tree block if needed */ 2054 if (dst_path->nodes[cur_level] == NULL) { 2055 int parent_slot; 2056 u64 child_gen; 2057 2058 /* 2059 * dst_path->nodes[root_level] must be initialized before 2060 * calling this function. 2061 */ 2062 if (cur_level == root_level) { 2063 btrfs_err_rl(fs_info, 2064 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2065 __func__, root_level, root_level, cur_level); 2066 return -EUCLEAN; 2067 } 2068 2069 /* 2070 * We need to get child blockptr/gen from parent before we can 2071 * read it. 2072 */ 2073 eb = dst_path->nodes[cur_level + 1]; 2074 parent_slot = dst_path->slots[cur_level + 1]; 2075 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2076 2077 /* This node is old, no need to trace */ 2078 if (child_gen < last_snapshot) 2079 goto out; 2080 2081 eb = btrfs_read_node_slot(eb, parent_slot); 2082 if (IS_ERR(eb)) { 2083 ret = PTR_ERR(eb); 2084 goto out; 2085 } 2086 2087 dst_path->nodes[cur_level] = eb; 2088 dst_path->slots[cur_level] = 0; 2089 2090 btrfs_tree_read_lock(eb); 2091 dst_path->locks[cur_level] = BTRFS_READ_LOCK; 2092 need_cleanup = true; 2093 } 2094 2095 /* Now record this tree block and its counter part for qgroups */ 2096 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2097 root_level, trace_leaf); 2098 if (ret < 0) 2099 goto cleanup; 2100 2101 eb = dst_path->nodes[cur_level]; 2102 2103 if (cur_level > 0) { 2104 /* Iterate all child tree blocks */ 2105 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2106 /* Skip old tree blocks as they won't be swapped */ 2107 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2108 continue; 2109 dst_path->slots[cur_level] = i; 2110 2111 /* Recursive call (at most 7 times) */ 2112 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2113 dst_path, cur_level - 1, root_level, 2114 last_snapshot, trace_leaf); 2115 if (ret < 0) 2116 goto cleanup; 2117 } 2118 } 2119 2120 cleanup: 2121 if (need_cleanup) { 2122 /* Clean up */ 2123 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2124 dst_path->locks[cur_level]); 2125 free_extent_buffer(dst_path->nodes[cur_level]); 2126 dst_path->nodes[cur_level] = NULL; 2127 dst_path->slots[cur_level] = 0; 2128 dst_path->locks[cur_level] = 0; 2129 } 2130 out: 2131 return ret; 2132 } 2133 2134 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2135 struct extent_buffer *src_eb, 2136 struct extent_buffer *dst_eb, 2137 u64 last_snapshot, bool trace_leaf) 2138 { 2139 struct btrfs_fs_info *fs_info = trans->fs_info; 2140 struct btrfs_path *dst_path = NULL; 2141 int level; 2142 int ret; 2143 2144 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2145 return 0; 2146 2147 /* Wrong parameter order */ 2148 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { 2149 btrfs_err_rl(fs_info, 2150 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2151 btrfs_header_generation(src_eb), 2152 btrfs_header_generation(dst_eb)); 2153 return -EUCLEAN; 2154 } 2155 2156 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2157 ret = -EIO; 2158 goto out; 2159 } 2160 2161 level = btrfs_header_level(dst_eb); 2162 dst_path = btrfs_alloc_path(); 2163 if (!dst_path) { 2164 ret = -ENOMEM; 2165 goto out; 2166 } 2167 /* For dst_path */ 2168 atomic_inc(&dst_eb->refs); 2169 dst_path->nodes[level] = dst_eb; 2170 dst_path->slots[level] = 0; 2171 dst_path->locks[level] = 0; 2172 2173 /* Do the generation aware breadth-first search */ 2174 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2175 level, last_snapshot, trace_leaf); 2176 if (ret < 0) 2177 goto out; 2178 ret = 0; 2179 2180 out: 2181 btrfs_free_path(dst_path); 2182 if (ret < 0) 2183 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2184 return ret; 2185 } 2186 2187 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2188 struct extent_buffer *root_eb, 2189 u64 root_gen, int root_level) 2190 { 2191 struct btrfs_fs_info *fs_info = trans->fs_info; 2192 int ret = 0; 2193 int level; 2194 struct extent_buffer *eb = root_eb; 2195 struct btrfs_path *path = NULL; 2196 2197 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); 2198 BUG_ON(root_eb == NULL); 2199 2200 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2201 return 0; 2202 2203 if (!extent_buffer_uptodate(root_eb)) { 2204 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL); 2205 if (ret) 2206 goto out; 2207 } 2208 2209 if (root_level == 0) { 2210 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); 2211 goto out; 2212 } 2213 2214 path = btrfs_alloc_path(); 2215 if (!path) 2216 return -ENOMEM; 2217 2218 /* 2219 * Walk down the tree. Missing extent blocks are filled in as 2220 * we go. Metadata is accounted every time we read a new 2221 * extent block. 2222 * 2223 * When we reach a leaf, we account for file extent items in it, 2224 * walk back up the tree (adjusting slot pointers as we go) 2225 * and restart the search process. 2226 */ 2227 atomic_inc(&root_eb->refs); /* For path */ 2228 path->nodes[root_level] = root_eb; 2229 path->slots[root_level] = 0; 2230 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2231 walk_down: 2232 level = root_level; 2233 while (level >= 0) { 2234 if (path->nodes[level] == NULL) { 2235 int parent_slot; 2236 u64 child_bytenr; 2237 2238 /* 2239 * We need to get child blockptr from parent before we 2240 * can read it. 2241 */ 2242 eb = path->nodes[level + 1]; 2243 parent_slot = path->slots[level + 1]; 2244 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2245 2246 eb = btrfs_read_node_slot(eb, parent_slot); 2247 if (IS_ERR(eb)) { 2248 ret = PTR_ERR(eb); 2249 goto out; 2250 } 2251 2252 path->nodes[level] = eb; 2253 path->slots[level] = 0; 2254 2255 btrfs_tree_read_lock(eb); 2256 path->locks[level] = BTRFS_READ_LOCK; 2257 2258 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2259 fs_info->nodesize, 2260 GFP_NOFS); 2261 if (ret) 2262 goto out; 2263 } 2264 2265 if (level == 0) { 2266 ret = btrfs_qgroup_trace_leaf_items(trans, 2267 path->nodes[level]); 2268 if (ret) 2269 goto out; 2270 2271 /* Nonzero return here means we completed our search */ 2272 ret = adjust_slots_upwards(path, root_level); 2273 if (ret) 2274 break; 2275 2276 /* Restart search with new slots */ 2277 goto walk_down; 2278 } 2279 2280 level--; 2281 } 2282 2283 ret = 0; 2284 out: 2285 btrfs_free_path(path); 2286 2287 return ret; 2288 } 2289 2290 #define UPDATE_NEW 0 2291 #define UPDATE_OLD 1 2292 /* 2293 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2294 */ 2295 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2296 struct ulist *roots, struct ulist *tmp, 2297 struct ulist *qgroups, u64 seq, int update_old) 2298 { 2299 struct ulist_node *unode; 2300 struct ulist_iterator uiter; 2301 struct ulist_node *tmp_unode; 2302 struct ulist_iterator tmp_uiter; 2303 struct btrfs_qgroup *qg; 2304 int ret = 0; 2305 2306 if (!roots) 2307 return 0; 2308 ULIST_ITER_INIT(&uiter); 2309 while ((unode = ulist_next(roots, &uiter))) { 2310 qg = find_qgroup_rb(fs_info, unode->val); 2311 if (!qg) 2312 continue; 2313 2314 ulist_reinit(tmp); 2315 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), 2316 GFP_ATOMIC); 2317 if (ret < 0) 2318 return ret; 2319 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); 2320 if (ret < 0) 2321 return ret; 2322 ULIST_ITER_INIT(&tmp_uiter); 2323 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 2324 struct btrfs_qgroup_list *glist; 2325 2326 qg = unode_aux_to_qgroup(tmp_unode); 2327 if (update_old) 2328 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2329 else 2330 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2331 list_for_each_entry(glist, &qg->groups, next_group) { 2332 ret = ulist_add(qgroups, glist->group->qgroupid, 2333 qgroup_to_aux(glist->group), 2334 GFP_ATOMIC); 2335 if (ret < 0) 2336 return ret; 2337 ret = ulist_add(tmp, glist->group->qgroupid, 2338 qgroup_to_aux(glist->group), 2339 GFP_ATOMIC); 2340 if (ret < 0) 2341 return ret; 2342 } 2343 } 2344 } 2345 return 0; 2346 } 2347 2348 /* 2349 * Update qgroup rfer/excl counters. 2350 * Rfer update is easy, codes can explain themselves. 2351 * 2352 * Excl update is tricky, the update is split into 2 parts. 2353 * Part 1: Possible exclusive <-> sharing detect: 2354 * | A | !A | 2355 * ------------------------------------- 2356 * B | * | - | 2357 * ------------------------------------- 2358 * !B | + | ** | 2359 * ------------------------------------- 2360 * 2361 * Conditions: 2362 * A: cur_old_roots < nr_old_roots (not exclusive before) 2363 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2364 * B: cur_new_roots < nr_new_roots (not exclusive now) 2365 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2366 * 2367 * Results: 2368 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2369 * *: Definitely not changed. **: Possible unchanged. 2370 * 2371 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2372 * 2373 * To make the logic clear, we first use condition A and B to split 2374 * combination into 4 results. 2375 * 2376 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2377 * only on variant maybe 0. 2378 * 2379 * Lastly, check result **, since there are 2 variants maybe 0, split them 2380 * again(2x2). 2381 * But this time we don't need to consider other things, the codes and logic 2382 * is easy to understand now. 2383 */ 2384 static int qgroup_update_counters(struct btrfs_fs_info *fs_info, 2385 struct ulist *qgroups, 2386 u64 nr_old_roots, 2387 u64 nr_new_roots, 2388 u64 num_bytes, u64 seq) 2389 { 2390 struct ulist_node *unode; 2391 struct ulist_iterator uiter; 2392 struct btrfs_qgroup *qg; 2393 u64 cur_new_count, cur_old_count; 2394 2395 ULIST_ITER_INIT(&uiter); 2396 while ((unode = ulist_next(qgroups, &uiter))) { 2397 bool dirty = false; 2398 2399 qg = unode_aux_to_qgroup(unode); 2400 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2401 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2402 2403 trace_qgroup_update_counters(fs_info, qg, cur_old_count, 2404 cur_new_count); 2405 2406 /* Rfer update part */ 2407 if (cur_old_count == 0 && cur_new_count > 0) { 2408 qg->rfer += num_bytes; 2409 qg->rfer_cmpr += num_bytes; 2410 dirty = true; 2411 } 2412 if (cur_old_count > 0 && cur_new_count == 0) { 2413 qg->rfer -= num_bytes; 2414 qg->rfer_cmpr -= num_bytes; 2415 dirty = true; 2416 } 2417 2418 /* Excl update part */ 2419 /* Exclusive/none -> shared case */ 2420 if (cur_old_count == nr_old_roots && 2421 cur_new_count < nr_new_roots) { 2422 /* Exclusive -> shared */ 2423 if (cur_old_count != 0) { 2424 qg->excl -= num_bytes; 2425 qg->excl_cmpr -= num_bytes; 2426 dirty = true; 2427 } 2428 } 2429 2430 /* Shared -> exclusive/none case */ 2431 if (cur_old_count < nr_old_roots && 2432 cur_new_count == nr_new_roots) { 2433 /* Shared->exclusive */ 2434 if (cur_new_count != 0) { 2435 qg->excl += num_bytes; 2436 qg->excl_cmpr += num_bytes; 2437 dirty = true; 2438 } 2439 } 2440 2441 /* Exclusive/none -> exclusive/none case */ 2442 if (cur_old_count == nr_old_roots && 2443 cur_new_count == nr_new_roots) { 2444 if (cur_old_count == 0) { 2445 /* None -> exclusive/none */ 2446 2447 if (cur_new_count != 0) { 2448 /* None -> exclusive */ 2449 qg->excl += num_bytes; 2450 qg->excl_cmpr += num_bytes; 2451 dirty = true; 2452 } 2453 /* None -> none, nothing changed */ 2454 } else { 2455 /* Exclusive -> exclusive/none */ 2456 2457 if (cur_new_count == 0) { 2458 /* Exclusive -> none */ 2459 qg->excl -= num_bytes; 2460 qg->excl_cmpr -= num_bytes; 2461 dirty = true; 2462 } 2463 /* Exclusive -> exclusive, nothing changed */ 2464 } 2465 } 2466 2467 if (dirty) 2468 qgroup_dirty(fs_info, qg); 2469 } 2470 return 0; 2471 } 2472 2473 /* 2474 * Check if the @roots potentially is a list of fs tree roots 2475 * 2476 * Return 0 for definitely not a fs/subvol tree roots ulist 2477 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2478 * one as well) 2479 */ 2480 static int maybe_fs_roots(struct ulist *roots) 2481 { 2482 struct ulist_node *unode; 2483 struct ulist_iterator uiter; 2484 2485 /* Empty one, still possible for fs roots */ 2486 if (!roots || roots->nnodes == 0) 2487 return 1; 2488 2489 ULIST_ITER_INIT(&uiter); 2490 unode = ulist_next(roots, &uiter); 2491 if (!unode) 2492 return 1; 2493 2494 /* 2495 * If it contains fs tree roots, then it must belong to fs/subvol 2496 * trees. 2497 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2498 */ 2499 return is_fstree(unode->val); 2500 } 2501 2502 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2503 u64 num_bytes, struct ulist *old_roots, 2504 struct ulist *new_roots) 2505 { 2506 struct btrfs_fs_info *fs_info = trans->fs_info; 2507 struct ulist *qgroups = NULL; 2508 struct ulist *tmp = NULL; 2509 u64 seq; 2510 u64 nr_new_roots = 0; 2511 u64 nr_old_roots = 0; 2512 int ret = 0; 2513 2514 /* 2515 * If quotas get disabled meanwhile, the resouces need to be freed and 2516 * we can't just exit here. 2517 */ 2518 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2519 goto out_free; 2520 2521 if (new_roots) { 2522 if (!maybe_fs_roots(new_roots)) 2523 goto out_free; 2524 nr_new_roots = new_roots->nnodes; 2525 } 2526 if (old_roots) { 2527 if (!maybe_fs_roots(old_roots)) 2528 goto out_free; 2529 nr_old_roots = old_roots->nnodes; 2530 } 2531 2532 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2533 if (nr_old_roots == 0 && nr_new_roots == 0) 2534 goto out_free; 2535 2536 BUG_ON(!fs_info->quota_root); 2537 2538 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2539 num_bytes, nr_old_roots, nr_new_roots); 2540 2541 qgroups = ulist_alloc(GFP_NOFS); 2542 if (!qgroups) { 2543 ret = -ENOMEM; 2544 goto out_free; 2545 } 2546 tmp = ulist_alloc(GFP_NOFS); 2547 if (!tmp) { 2548 ret = -ENOMEM; 2549 goto out_free; 2550 } 2551 2552 mutex_lock(&fs_info->qgroup_rescan_lock); 2553 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2554 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2555 mutex_unlock(&fs_info->qgroup_rescan_lock); 2556 ret = 0; 2557 goto out_free; 2558 } 2559 } 2560 mutex_unlock(&fs_info->qgroup_rescan_lock); 2561 2562 spin_lock(&fs_info->qgroup_lock); 2563 seq = fs_info->qgroup_seq; 2564 2565 /* Update old refcnts using old_roots */ 2566 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, 2567 UPDATE_OLD); 2568 if (ret < 0) 2569 goto out; 2570 2571 /* Update new refcnts using new_roots */ 2572 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, 2573 UPDATE_NEW); 2574 if (ret < 0) 2575 goto out; 2576 2577 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, 2578 num_bytes, seq); 2579 2580 /* 2581 * Bump qgroup_seq to avoid seq overlap 2582 */ 2583 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2584 out: 2585 spin_unlock(&fs_info->qgroup_lock); 2586 out_free: 2587 ulist_free(tmp); 2588 ulist_free(qgroups); 2589 ulist_free(old_roots); 2590 ulist_free(new_roots); 2591 return ret; 2592 } 2593 2594 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 2595 { 2596 struct btrfs_fs_info *fs_info = trans->fs_info; 2597 struct btrfs_qgroup_extent_record *record; 2598 struct btrfs_delayed_ref_root *delayed_refs; 2599 struct ulist *new_roots = NULL; 2600 struct rb_node *node; 2601 u64 num_dirty_extents = 0; 2602 u64 qgroup_to_skip; 2603 int ret = 0; 2604 2605 delayed_refs = &trans->transaction->delayed_refs; 2606 qgroup_to_skip = delayed_refs->qgroup_to_skip; 2607 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 2608 record = rb_entry(node, struct btrfs_qgroup_extent_record, 2609 node); 2610 2611 num_dirty_extents++; 2612 trace_btrfs_qgroup_account_extents(fs_info, record); 2613 2614 if (!ret) { 2615 /* 2616 * Old roots should be searched when inserting qgroup 2617 * extent record 2618 */ 2619 if (WARN_ON(!record->old_roots)) { 2620 /* Search commit root to find old_roots */ 2621 ret = btrfs_find_all_roots(NULL, fs_info, 2622 record->bytenr, 0, 2623 &record->old_roots, false); 2624 if (ret < 0) 2625 goto cleanup; 2626 } 2627 2628 /* Free the reserved data space */ 2629 btrfs_qgroup_free_refroot(fs_info, 2630 record->data_rsv_refroot, 2631 record->data_rsv, 2632 BTRFS_QGROUP_RSV_DATA); 2633 /* 2634 * Use SEQ_LAST as time_seq to do special search, which 2635 * doesn't lock tree or delayed_refs and search current 2636 * root. It's safe inside commit_transaction(). 2637 */ 2638 ret = btrfs_find_all_roots(trans, fs_info, 2639 record->bytenr, SEQ_LAST, &new_roots, false); 2640 if (ret < 0) 2641 goto cleanup; 2642 if (qgroup_to_skip) { 2643 ulist_del(new_roots, qgroup_to_skip, 0); 2644 ulist_del(record->old_roots, qgroup_to_skip, 2645 0); 2646 } 2647 ret = btrfs_qgroup_account_extent(trans, record->bytenr, 2648 record->num_bytes, 2649 record->old_roots, 2650 new_roots); 2651 record->old_roots = NULL; 2652 new_roots = NULL; 2653 } 2654 cleanup: 2655 ulist_free(record->old_roots); 2656 ulist_free(new_roots); 2657 new_roots = NULL; 2658 rb_erase(node, &delayed_refs->dirty_extent_root); 2659 kfree(record); 2660 2661 } 2662 trace_qgroup_num_dirty_extents(fs_info, trans->transid, 2663 num_dirty_extents); 2664 return ret; 2665 } 2666 2667 /* 2668 * called from commit_transaction. Writes all changed qgroups to disk. 2669 */ 2670 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 2671 { 2672 struct btrfs_fs_info *fs_info = trans->fs_info; 2673 int ret = 0; 2674 2675 if (!fs_info->quota_root) 2676 return ret; 2677 2678 spin_lock(&fs_info->qgroup_lock); 2679 while (!list_empty(&fs_info->dirty_qgroups)) { 2680 struct btrfs_qgroup *qgroup; 2681 qgroup = list_first_entry(&fs_info->dirty_qgroups, 2682 struct btrfs_qgroup, dirty); 2683 list_del_init(&qgroup->dirty); 2684 spin_unlock(&fs_info->qgroup_lock); 2685 ret = update_qgroup_info_item(trans, qgroup); 2686 if (ret) 2687 fs_info->qgroup_flags |= 2688 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2689 ret = update_qgroup_limit_item(trans, qgroup); 2690 if (ret) 2691 fs_info->qgroup_flags |= 2692 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2693 spin_lock(&fs_info->qgroup_lock); 2694 } 2695 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2696 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 2697 else 2698 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 2699 spin_unlock(&fs_info->qgroup_lock); 2700 2701 ret = update_qgroup_status_item(trans); 2702 if (ret) 2703 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2704 2705 return ret; 2706 } 2707 2708 /* 2709 * Copy the accounting information between qgroups. This is necessary 2710 * when a snapshot or a subvolume is created. Throwing an error will 2711 * cause a transaction abort so we take extra care here to only error 2712 * when a readonly fs is a reasonable outcome. 2713 */ 2714 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 2715 u64 objectid, struct btrfs_qgroup_inherit *inherit) 2716 { 2717 int ret = 0; 2718 int i; 2719 u64 *i_qgroups; 2720 bool committing = false; 2721 struct btrfs_fs_info *fs_info = trans->fs_info; 2722 struct btrfs_root *quota_root; 2723 struct btrfs_qgroup *srcgroup; 2724 struct btrfs_qgroup *dstgroup; 2725 bool need_rescan = false; 2726 u32 level_size = 0; 2727 u64 nums; 2728 2729 /* 2730 * There are only two callers of this function. 2731 * 2732 * One in create_subvol() in the ioctl context, which needs to hold 2733 * the qgroup_ioctl_lock. 2734 * 2735 * The other one in create_pending_snapshot() where no other qgroup 2736 * code can modify the fs as they all need to either start a new trans 2737 * or hold a trans handler, thus we don't need to hold 2738 * qgroup_ioctl_lock. 2739 * This would avoid long and complex lock chain and make lockdep happy. 2740 */ 2741 spin_lock(&fs_info->trans_lock); 2742 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 2743 committing = true; 2744 spin_unlock(&fs_info->trans_lock); 2745 2746 if (!committing) 2747 mutex_lock(&fs_info->qgroup_ioctl_lock); 2748 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 2749 goto out; 2750 2751 quota_root = fs_info->quota_root; 2752 if (!quota_root) { 2753 ret = -EINVAL; 2754 goto out; 2755 } 2756 2757 if (inherit) { 2758 i_qgroups = (u64 *)(inherit + 1); 2759 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 2760 2 * inherit->num_excl_copies; 2761 for (i = 0; i < nums; ++i) { 2762 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 2763 2764 /* 2765 * Zero out invalid groups so we can ignore 2766 * them later. 2767 */ 2768 if (!srcgroup || 2769 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 2770 *i_qgroups = 0ULL; 2771 2772 ++i_qgroups; 2773 } 2774 } 2775 2776 /* 2777 * create a tracking group for the subvol itself 2778 */ 2779 ret = add_qgroup_item(trans, quota_root, objectid); 2780 if (ret) 2781 goto out; 2782 2783 /* 2784 * add qgroup to all inherited groups 2785 */ 2786 if (inherit) { 2787 i_qgroups = (u64 *)(inherit + 1); 2788 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { 2789 if (*i_qgroups == 0) 2790 continue; 2791 ret = add_qgroup_relation_item(trans, objectid, 2792 *i_qgroups); 2793 if (ret && ret != -EEXIST) 2794 goto out; 2795 ret = add_qgroup_relation_item(trans, *i_qgroups, 2796 objectid); 2797 if (ret && ret != -EEXIST) 2798 goto out; 2799 } 2800 ret = 0; 2801 } 2802 2803 2804 spin_lock(&fs_info->qgroup_lock); 2805 2806 dstgroup = add_qgroup_rb(fs_info, objectid); 2807 if (IS_ERR(dstgroup)) { 2808 ret = PTR_ERR(dstgroup); 2809 goto unlock; 2810 } 2811 2812 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 2813 dstgroup->lim_flags = inherit->lim.flags; 2814 dstgroup->max_rfer = inherit->lim.max_rfer; 2815 dstgroup->max_excl = inherit->lim.max_excl; 2816 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 2817 dstgroup->rsv_excl = inherit->lim.rsv_excl; 2818 2819 ret = update_qgroup_limit_item(trans, dstgroup); 2820 if (ret) { 2821 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2822 btrfs_info(fs_info, 2823 "unable to update quota limit for %llu", 2824 dstgroup->qgroupid); 2825 goto unlock; 2826 } 2827 } 2828 2829 if (srcid) { 2830 srcgroup = find_qgroup_rb(fs_info, srcid); 2831 if (!srcgroup) 2832 goto unlock; 2833 2834 /* 2835 * We call inherit after we clone the root in order to make sure 2836 * our counts don't go crazy, so at this point the only 2837 * difference between the two roots should be the root node. 2838 */ 2839 level_size = fs_info->nodesize; 2840 dstgroup->rfer = srcgroup->rfer; 2841 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 2842 dstgroup->excl = level_size; 2843 dstgroup->excl_cmpr = level_size; 2844 srcgroup->excl = level_size; 2845 srcgroup->excl_cmpr = level_size; 2846 2847 /* inherit the limit info */ 2848 dstgroup->lim_flags = srcgroup->lim_flags; 2849 dstgroup->max_rfer = srcgroup->max_rfer; 2850 dstgroup->max_excl = srcgroup->max_excl; 2851 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 2852 dstgroup->rsv_excl = srcgroup->rsv_excl; 2853 2854 qgroup_dirty(fs_info, dstgroup); 2855 qgroup_dirty(fs_info, srcgroup); 2856 } 2857 2858 if (!inherit) 2859 goto unlock; 2860 2861 i_qgroups = (u64 *)(inherit + 1); 2862 for (i = 0; i < inherit->num_qgroups; ++i) { 2863 if (*i_qgroups) { 2864 ret = add_relation_rb(fs_info, objectid, *i_qgroups); 2865 if (ret) 2866 goto unlock; 2867 } 2868 ++i_qgroups; 2869 2870 /* 2871 * If we're doing a snapshot, and adding the snapshot to a new 2872 * qgroup, the numbers are guaranteed to be incorrect. 2873 */ 2874 if (srcid) 2875 need_rescan = true; 2876 } 2877 2878 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { 2879 struct btrfs_qgroup *src; 2880 struct btrfs_qgroup *dst; 2881 2882 if (!i_qgroups[0] || !i_qgroups[1]) 2883 continue; 2884 2885 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2886 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2887 2888 if (!src || !dst) { 2889 ret = -EINVAL; 2890 goto unlock; 2891 } 2892 2893 dst->rfer = src->rfer - level_size; 2894 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2895 2896 /* Manually tweaking numbers certainly needs a rescan */ 2897 need_rescan = true; 2898 } 2899 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 2900 struct btrfs_qgroup *src; 2901 struct btrfs_qgroup *dst; 2902 2903 if (!i_qgroups[0] || !i_qgroups[1]) 2904 continue; 2905 2906 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2907 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2908 2909 if (!src || !dst) { 2910 ret = -EINVAL; 2911 goto unlock; 2912 } 2913 2914 dst->excl = src->excl + level_size; 2915 dst->excl_cmpr = src->excl_cmpr + level_size; 2916 need_rescan = true; 2917 } 2918 2919 unlock: 2920 spin_unlock(&fs_info->qgroup_lock); 2921 if (!ret) 2922 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 2923 out: 2924 if (!committing) 2925 mutex_unlock(&fs_info->qgroup_ioctl_lock); 2926 if (need_rescan) 2927 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2928 return ret; 2929 } 2930 2931 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 2932 { 2933 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2934 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 2935 return false; 2936 2937 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 2938 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 2939 return false; 2940 2941 return true; 2942 } 2943 2944 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 2945 enum btrfs_qgroup_rsv_type type) 2946 { 2947 struct btrfs_qgroup *qgroup; 2948 struct btrfs_fs_info *fs_info = root->fs_info; 2949 u64 ref_root = root->root_key.objectid; 2950 int ret = 0; 2951 struct ulist_node *unode; 2952 struct ulist_iterator uiter; 2953 2954 if (!is_fstree(ref_root)) 2955 return 0; 2956 2957 if (num_bytes == 0) 2958 return 0; 2959 2960 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 2961 capable(CAP_SYS_RESOURCE)) 2962 enforce = false; 2963 2964 spin_lock(&fs_info->qgroup_lock); 2965 if (!fs_info->quota_root) 2966 goto out; 2967 2968 qgroup = find_qgroup_rb(fs_info, ref_root); 2969 if (!qgroup) 2970 goto out; 2971 2972 /* 2973 * in a first step, we check all affected qgroups if any limits would 2974 * be exceeded 2975 */ 2976 ulist_reinit(fs_info->qgroup_ulist); 2977 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2978 qgroup_to_aux(qgroup), GFP_ATOMIC); 2979 if (ret < 0) 2980 goto out; 2981 ULIST_ITER_INIT(&uiter); 2982 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2983 struct btrfs_qgroup *qg; 2984 struct btrfs_qgroup_list *glist; 2985 2986 qg = unode_aux_to_qgroup(unode); 2987 2988 if (enforce && !qgroup_check_limits(qg, num_bytes)) { 2989 ret = -EDQUOT; 2990 goto out; 2991 } 2992 2993 list_for_each_entry(glist, &qg->groups, next_group) { 2994 ret = ulist_add(fs_info->qgroup_ulist, 2995 glist->group->qgroupid, 2996 qgroup_to_aux(glist->group), GFP_ATOMIC); 2997 if (ret < 0) 2998 goto out; 2999 } 3000 } 3001 ret = 0; 3002 /* 3003 * no limits exceeded, now record the reservation into all qgroups 3004 */ 3005 ULIST_ITER_INIT(&uiter); 3006 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3007 struct btrfs_qgroup *qg; 3008 3009 qg = unode_aux_to_qgroup(unode); 3010 3011 qgroup_rsv_add(fs_info, qg, num_bytes, type); 3012 } 3013 3014 out: 3015 spin_unlock(&fs_info->qgroup_lock); 3016 return ret; 3017 } 3018 3019 /* 3020 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3021 * qgroup). 3022 * 3023 * Will handle all higher level qgroup too. 3024 * 3025 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3026 * This special case is only used for META_PERTRANS type. 3027 */ 3028 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3029 u64 ref_root, u64 num_bytes, 3030 enum btrfs_qgroup_rsv_type type) 3031 { 3032 struct btrfs_qgroup *qgroup; 3033 struct ulist_node *unode; 3034 struct ulist_iterator uiter; 3035 int ret = 0; 3036 3037 if (!is_fstree(ref_root)) 3038 return; 3039 3040 if (num_bytes == 0) 3041 return; 3042 3043 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3044 WARN(1, "%s: Invalid type to free", __func__); 3045 return; 3046 } 3047 spin_lock(&fs_info->qgroup_lock); 3048 3049 if (!fs_info->quota_root) 3050 goto out; 3051 3052 qgroup = find_qgroup_rb(fs_info, ref_root); 3053 if (!qgroup) 3054 goto out; 3055 3056 if (num_bytes == (u64)-1) 3057 /* 3058 * We're freeing all pertrans rsv, get reserved value from 3059 * level 0 qgroup as real num_bytes to free. 3060 */ 3061 num_bytes = qgroup->rsv.values[type]; 3062 3063 ulist_reinit(fs_info->qgroup_ulist); 3064 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3065 qgroup_to_aux(qgroup), GFP_ATOMIC); 3066 if (ret < 0) 3067 goto out; 3068 ULIST_ITER_INIT(&uiter); 3069 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3070 struct btrfs_qgroup *qg; 3071 struct btrfs_qgroup_list *glist; 3072 3073 qg = unode_aux_to_qgroup(unode); 3074 3075 qgroup_rsv_release(fs_info, qg, num_bytes, type); 3076 3077 list_for_each_entry(glist, &qg->groups, next_group) { 3078 ret = ulist_add(fs_info->qgroup_ulist, 3079 glist->group->qgroupid, 3080 qgroup_to_aux(glist->group), GFP_ATOMIC); 3081 if (ret < 0) 3082 goto out; 3083 } 3084 } 3085 3086 out: 3087 spin_unlock(&fs_info->qgroup_lock); 3088 } 3089 3090 /* 3091 * Check if the leaf is the last leaf. Which means all node pointers 3092 * are at their last position. 3093 */ 3094 static bool is_last_leaf(struct btrfs_path *path) 3095 { 3096 int i; 3097 3098 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3099 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3100 return false; 3101 } 3102 return true; 3103 } 3104 3105 /* 3106 * returns < 0 on error, 0 when more leafs are to be scanned. 3107 * returns 1 when done. 3108 */ 3109 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3110 struct btrfs_path *path) 3111 { 3112 struct btrfs_fs_info *fs_info = trans->fs_info; 3113 struct btrfs_key found; 3114 struct extent_buffer *scratch_leaf = NULL; 3115 struct ulist *roots = NULL; 3116 u64 num_bytes; 3117 bool done; 3118 int slot; 3119 int ret; 3120 3121 mutex_lock(&fs_info->qgroup_rescan_lock); 3122 ret = btrfs_search_slot_for_read(fs_info->extent_root, 3123 &fs_info->qgroup_rescan_progress, 3124 path, 1, 0); 3125 3126 btrfs_debug(fs_info, 3127 "current progress key (%llu %u %llu), search_slot ret %d", 3128 fs_info->qgroup_rescan_progress.objectid, 3129 fs_info->qgroup_rescan_progress.type, 3130 fs_info->qgroup_rescan_progress.offset, ret); 3131 3132 if (ret) { 3133 /* 3134 * The rescan is about to end, we will not be scanning any 3135 * further blocks. We cannot unset the RESCAN flag here, because 3136 * we want to commit the transaction if everything went well. 3137 * To make the live accounting work in this phase, we set our 3138 * scan progress pointer such that every real extent objectid 3139 * will be smaller. 3140 */ 3141 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3142 btrfs_release_path(path); 3143 mutex_unlock(&fs_info->qgroup_rescan_lock); 3144 return ret; 3145 } 3146 done = is_last_leaf(path); 3147 3148 btrfs_item_key_to_cpu(path->nodes[0], &found, 3149 btrfs_header_nritems(path->nodes[0]) - 1); 3150 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3151 3152 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3153 if (!scratch_leaf) { 3154 ret = -ENOMEM; 3155 mutex_unlock(&fs_info->qgroup_rescan_lock); 3156 goto out; 3157 } 3158 slot = path->slots[0]; 3159 btrfs_release_path(path); 3160 mutex_unlock(&fs_info->qgroup_rescan_lock); 3161 3162 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3163 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3164 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3165 found.type != BTRFS_METADATA_ITEM_KEY) 3166 continue; 3167 if (found.type == BTRFS_METADATA_ITEM_KEY) 3168 num_bytes = fs_info->nodesize; 3169 else 3170 num_bytes = found.offset; 3171 3172 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, 3173 &roots, false); 3174 if (ret < 0) 3175 goto out; 3176 /* For rescan, just pass old_roots as NULL */ 3177 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3178 num_bytes, NULL, roots); 3179 if (ret < 0) 3180 goto out; 3181 } 3182 out: 3183 if (scratch_leaf) 3184 free_extent_buffer(scratch_leaf); 3185 3186 if (done && !ret) { 3187 ret = 1; 3188 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3189 } 3190 return ret; 3191 } 3192 3193 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3194 { 3195 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3196 qgroup_rescan_work); 3197 struct btrfs_path *path; 3198 struct btrfs_trans_handle *trans = NULL; 3199 int err = -ENOMEM; 3200 int ret = 0; 3201 3202 path = btrfs_alloc_path(); 3203 if (!path) 3204 goto out; 3205 /* 3206 * Rescan should only search for commit root, and any later difference 3207 * should be recorded by qgroup 3208 */ 3209 path->search_commit_root = 1; 3210 path->skip_locking = 1; 3211 3212 err = 0; 3213 while (!err && !btrfs_fs_closing(fs_info)) { 3214 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3215 if (IS_ERR(trans)) { 3216 err = PTR_ERR(trans); 3217 break; 3218 } 3219 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 3220 err = -EINTR; 3221 } else { 3222 err = qgroup_rescan_leaf(trans, path); 3223 } 3224 if (err > 0) 3225 btrfs_commit_transaction(trans); 3226 else 3227 btrfs_end_transaction(trans); 3228 } 3229 3230 out: 3231 btrfs_free_path(path); 3232 3233 mutex_lock(&fs_info->qgroup_rescan_lock); 3234 if (err > 0 && 3235 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3236 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3237 } else if (err < 0) { 3238 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3239 } 3240 mutex_unlock(&fs_info->qgroup_rescan_lock); 3241 3242 /* 3243 * only update status, since the previous part has already updated the 3244 * qgroup info. 3245 */ 3246 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3247 if (IS_ERR(trans)) { 3248 err = PTR_ERR(trans); 3249 trans = NULL; 3250 btrfs_err(fs_info, 3251 "fail to start transaction for status update: %d", 3252 err); 3253 } 3254 3255 mutex_lock(&fs_info->qgroup_rescan_lock); 3256 if (!btrfs_fs_closing(fs_info)) 3257 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3258 if (trans) { 3259 ret = update_qgroup_status_item(trans); 3260 if (ret < 0) { 3261 err = ret; 3262 btrfs_err(fs_info, "fail to update qgroup status: %d", 3263 err); 3264 } 3265 } 3266 fs_info->qgroup_rescan_running = false; 3267 complete_all(&fs_info->qgroup_rescan_completion); 3268 mutex_unlock(&fs_info->qgroup_rescan_lock); 3269 3270 if (!trans) 3271 return; 3272 3273 btrfs_end_transaction(trans); 3274 3275 if (btrfs_fs_closing(fs_info)) { 3276 btrfs_info(fs_info, "qgroup scan paused"); 3277 } else if (err >= 0) { 3278 btrfs_info(fs_info, "qgroup scan completed%s", 3279 err > 0 ? " (inconsistency flag cleared)" : ""); 3280 } else { 3281 btrfs_err(fs_info, "qgroup scan failed with %d", err); 3282 } 3283 } 3284 3285 /* 3286 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3287 * memory required for the rescan context. 3288 */ 3289 static int 3290 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3291 int init_flags) 3292 { 3293 int ret = 0; 3294 3295 if (!init_flags) { 3296 /* we're resuming qgroup rescan at mount time */ 3297 if (!(fs_info->qgroup_flags & 3298 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3299 btrfs_warn(fs_info, 3300 "qgroup rescan init failed, qgroup rescan is not queued"); 3301 ret = -EINVAL; 3302 } else if (!(fs_info->qgroup_flags & 3303 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3304 btrfs_warn(fs_info, 3305 "qgroup rescan init failed, qgroup is not enabled"); 3306 ret = -EINVAL; 3307 } 3308 3309 if (ret) 3310 return ret; 3311 } 3312 3313 mutex_lock(&fs_info->qgroup_rescan_lock); 3314 3315 if (init_flags) { 3316 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3317 btrfs_warn(fs_info, 3318 "qgroup rescan is already in progress"); 3319 ret = -EINPROGRESS; 3320 } else if (!(fs_info->qgroup_flags & 3321 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3322 btrfs_warn(fs_info, 3323 "qgroup rescan init failed, qgroup is not enabled"); 3324 ret = -EINVAL; 3325 } 3326 3327 if (ret) { 3328 mutex_unlock(&fs_info->qgroup_rescan_lock); 3329 return ret; 3330 } 3331 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3332 } 3333 3334 memset(&fs_info->qgroup_rescan_progress, 0, 3335 sizeof(fs_info->qgroup_rescan_progress)); 3336 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3337 init_completion(&fs_info->qgroup_rescan_completion); 3338 mutex_unlock(&fs_info->qgroup_rescan_lock); 3339 3340 btrfs_init_work(&fs_info->qgroup_rescan_work, 3341 btrfs_qgroup_rescan_worker, NULL, NULL); 3342 return 0; 3343 } 3344 3345 static void 3346 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 3347 { 3348 struct rb_node *n; 3349 struct btrfs_qgroup *qgroup; 3350 3351 spin_lock(&fs_info->qgroup_lock); 3352 /* clear all current qgroup tracking information */ 3353 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 3354 qgroup = rb_entry(n, struct btrfs_qgroup, node); 3355 qgroup->rfer = 0; 3356 qgroup->rfer_cmpr = 0; 3357 qgroup->excl = 0; 3358 qgroup->excl_cmpr = 0; 3359 qgroup_dirty(fs_info, qgroup); 3360 } 3361 spin_unlock(&fs_info->qgroup_lock); 3362 } 3363 3364 int 3365 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 3366 { 3367 int ret = 0; 3368 struct btrfs_trans_handle *trans; 3369 3370 ret = qgroup_rescan_init(fs_info, 0, 1); 3371 if (ret) 3372 return ret; 3373 3374 /* 3375 * We have set the rescan_progress to 0, which means no more 3376 * delayed refs will be accounted by btrfs_qgroup_account_ref. 3377 * However, btrfs_qgroup_account_ref may be right after its call 3378 * to btrfs_find_all_roots, in which case it would still do the 3379 * accounting. 3380 * To solve this, we're committing the transaction, which will 3381 * ensure we run all delayed refs and only after that, we are 3382 * going to clear all tracking information for a clean start. 3383 */ 3384 3385 trans = btrfs_join_transaction(fs_info->fs_root); 3386 if (IS_ERR(trans)) { 3387 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3388 return PTR_ERR(trans); 3389 } 3390 ret = btrfs_commit_transaction(trans); 3391 if (ret) { 3392 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3393 return ret; 3394 } 3395 3396 qgroup_rescan_zero_tracking(fs_info); 3397 3398 mutex_lock(&fs_info->qgroup_rescan_lock); 3399 fs_info->qgroup_rescan_running = true; 3400 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3401 &fs_info->qgroup_rescan_work); 3402 mutex_unlock(&fs_info->qgroup_rescan_lock); 3403 3404 return 0; 3405 } 3406 3407 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 3408 bool interruptible) 3409 { 3410 int running; 3411 int ret = 0; 3412 3413 mutex_lock(&fs_info->qgroup_rescan_lock); 3414 running = fs_info->qgroup_rescan_running; 3415 mutex_unlock(&fs_info->qgroup_rescan_lock); 3416 3417 if (!running) 3418 return 0; 3419 3420 if (interruptible) 3421 ret = wait_for_completion_interruptible( 3422 &fs_info->qgroup_rescan_completion); 3423 else 3424 wait_for_completion(&fs_info->qgroup_rescan_completion); 3425 3426 return ret; 3427 } 3428 3429 /* 3430 * this is only called from open_ctree where we're still single threaded, thus 3431 * locking is omitted here. 3432 */ 3433 void 3434 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 3435 { 3436 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3437 mutex_lock(&fs_info->qgroup_rescan_lock); 3438 fs_info->qgroup_rescan_running = true; 3439 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3440 &fs_info->qgroup_rescan_work); 3441 mutex_unlock(&fs_info->qgroup_rescan_lock); 3442 } 3443 } 3444 3445 #define rbtree_iterate_from_safe(node, next, start) \ 3446 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 3447 3448 static int qgroup_unreserve_range(struct btrfs_inode *inode, 3449 struct extent_changeset *reserved, u64 start, 3450 u64 len) 3451 { 3452 struct rb_node *node; 3453 struct rb_node *next; 3454 struct ulist_node *entry; 3455 int ret = 0; 3456 3457 node = reserved->range_changed.root.rb_node; 3458 if (!node) 3459 return 0; 3460 while (node) { 3461 entry = rb_entry(node, struct ulist_node, rb_node); 3462 if (entry->val < start) 3463 node = node->rb_right; 3464 else 3465 node = node->rb_left; 3466 } 3467 3468 if (entry->val > start && rb_prev(&entry->rb_node)) 3469 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 3470 rb_node); 3471 3472 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 3473 u64 entry_start; 3474 u64 entry_end; 3475 u64 entry_len; 3476 int clear_ret; 3477 3478 entry = rb_entry(node, struct ulist_node, rb_node); 3479 entry_start = entry->val; 3480 entry_end = entry->aux; 3481 entry_len = entry_end - entry_start + 1; 3482 3483 if (entry_start >= start + len) 3484 break; 3485 if (entry_start + entry_len <= start) 3486 continue; 3487 /* 3488 * Now the entry is in [start, start + len), revert the 3489 * EXTENT_QGROUP_RESERVED bit. 3490 */ 3491 clear_ret = clear_extent_bits(&inode->io_tree, entry_start, 3492 entry_end, EXTENT_QGROUP_RESERVED); 3493 if (!ret && clear_ret < 0) 3494 ret = clear_ret; 3495 3496 ulist_del(&reserved->range_changed, entry->val, entry->aux); 3497 if (likely(reserved->bytes_changed >= entry_len)) { 3498 reserved->bytes_changed -= entry_len; 3499 } else { 3500 WARN_ON(1); 3501 reserved->bytes_changed = 0; 3502 } 3503 } 3504 3505 return ret; 3506 } 3507 3508 /* 3509 * Try to free some space for qgroup. 3510 * 3511 * For qgroup, there are only 3 ways to free qgroup space: 3512 * - Flush nodatacow write 3513 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 3514 * In theory, we should only flush nodatacow inodes, but it's not yet 3515 * possible, so we need to flush the whole root. 3516 * 3517 * - Wait for ordered extents 3518 * When ordered extents are finished, their reserved metadata is finally 3519 * converted to per_trans status, which can be freed by later commit 3520 * transaction. 3521 * 3522 * - Commit transaction 3523 * This would free the meta_per_trans space. 3524 * In theory this shouldn't provide much space, but any more qgroup space 3525 * is needed. 3526 */ 3527 static int try_flush_qgroup(struct btrfs_root *root) 3528 { 3529 struct btrfs_trans_handle *trans; 3530 int ret; 3531 bool can_commit = true; 3532 3533 /* 3534 * We don't want to run flush again and again, so if there is a running 3535 * one, we won't try to start a new flush, but exit directly. 3536 */ 3537 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3538 wait_event(root->qgroup_flush_wait, 3539 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3540 return 0; 3541 } 3542 3543 /* 3544 * If current process holds a transaction, we shouldn't flush, as we 3545 * assume all space reservation happens before a transaction handle is 3546 * held. 3547 * 3548 * But there are cases like btrfs_delayed_item_reserve_metadata() where 3549 * we try to reserve space with one transction handle already held. 3550 * In that case we can't commit transaction, but at least try to end it 3551 * and hope the started data writes can free some space. 3552 */ 3553 if (current->journal_info && 3554 current->journal_info != BTRFS_SEND_TRANS_STUB) 3555 can_commit = false; 3556 3557 ret = btrfs_start_delalloc_snapshot(root); 3558 if (ret < 0) 3559 goto out; 3560 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 3561 3562 trans = btrfs_join_transaction(root); 3563 if (IS_ERR(trans)) { 3564 ret = PTR_ERR(trans); 3565 goto out; 3566 } 3567 3568 if (can_commit) 3569 ret = btrfs_commit_transaction(trans); 3570 else 3571 ret = btrfs_end_transaction(trans); 3572 out: 3573 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 3574 wake_up(&root->qgroup_flush_wait); 3575 return ret; 3576 } 3577 3578 static int qgroup_reserve_data(struct btrfs_inode *inode, 3579 struct extent_changeset **reserved_ret, u64 start, 3580 u64 len) 3581 { 3582 struct btrfs_root *root = inode->root; 3583 struct extent_changeset *reserved; 3584 bool new_reserved = false; 3585 u64 orig_reserved; 3586 u64 to_reserve; 3587 int ret; 3588 3589 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || 3590 !is_fstree(root->root_key.objectid) || len == 0) 3591 return 0; 3592 3593 /* @reserved parameter is mandatory for qgroup */ 3594 if (WARN_ON(!reserved_ret)) 3595 return -EINVAL; 3596 if (!*reserved_ret) { 3597 new_reserved = true; 3598 *reserved_ret = extent_changeset_alloc(); 3599 if (!*reserved_ret) 3600 return -ENOMEM; 3601 } 3602 reserved = *reserved_ret; 3603 /* Record already reserved space */ 3604 orig_reserved = reserved->bytes_changed; 3605 ret = set_record_extent_bits(&inode->io_tree, start, 3606 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 3607 3608 /* Newly reserved space */ 3609 to_reserve = reserved->bytes_changed - orig_reserved; 3610 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 3611 to_reserve, QGROUP_RESERVE); 3612 if (ret < 0) 3613 goto out; 3614 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 3615 if (ret < 0) 3616 goto cleanup; 3617 3618 return ret; 3619 3620 cleanup: 3621 qgroup_unreserve_range(inode, reserved, start, len); 3622 out: 3623 if (new_reserved) { 3624 extent_changeset_release(reserved); 3625 kfree(reserved); 3626 *reserved_ret = NULL; 3627 } 3628 return ret; 3629 } 3630 3631 /* 3632 * Reserve qgroup space for range [start, start + len). 3633 * 3634 * This function will either reserve space from related qgroups or do nothing 3635 * if the range is already reserved. 3636 * 3637 * Return 0 for successful reservation 3638 * Return <0 for error (including -EQUOT) 3639 * 3640 * NOTE: This function may sleep for memory allocation, dirty page flushing and 3641 * commit transaction. So caller should not hold any dirty page locked. 3642 */ 3643 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 3644 struct extent_changeset **reserved_ret, u64 start, 3645 u64 len) 3646 { 3647 int ret; 3648 3649 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 3650 if (ret <= 0 && ret != -EDQUOT) 3651 return ret; 3652 3653 ret = try_flush_qgroup(inode->root); 3654 if (ret < 0) 3655 return ret; 3656 return qgroup_reserve_data(inode, reserved_ret, start, len); 3657 } 3658 3659 /* Free ranges specified by @reserved, normally in error path */ 3660 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 3661 struct extent_changeset *reserved, u64 start, u64 len) 3662 { 3663 struct btrfs_root *root = inode->root; 3664 struct ulist_node *unode; 3665 struct ulist_iterator uiter; 3666 struct extent_changeset changeset; 3667 int freed = 0; 3668 int ret; 3669 3670 extent_changeset_init(&changeset); 3671 len = round_up(start + len, root->fs_info->sectorsize); 3672 start = round_down(start, root->fs_info->sectorsize); 3673 3674 ULIST_ITER_INIT(&uiter); 3675 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 3676 u64 range_start = unode->val; 3677 /* unode->aux is the inclusive end */ 3678 u64 range_len = unode->aux - range_start + 1; 3679 u64 free_start; 3680 u64 free_len; 3681 3682 extent_changeset_release(&changeset); 3683 3684 /* Only free range in range [start, start + len) */ 3685 if (range_start >= start + len || 3686 range_start + range_len <= start) 3687 continue; 3688 free_start = max(range_start, start); 3689 free_len = min(start + len, range_start + range_len) - 3690 free_start; 3691 /* 3692 * TODO: To also modify reserved->ranges_reserved to reflect 3693 * the modification. 3694 * 3695 * However as long as we free qgroup reserved according to 3696 * EXTENT_QGROUP_RESERVED, we won't double free. 3697 * So not need to rush. 3698 */ 3699 ret = clear_record_extent_bits(&inode->io_tree, free_start, 3700 free_start + free_len - 1, 3701 EXTENT_QGROUP_RESERVED, &changeset); 3702 if (ret < 0) 3703 goto out; 3704 freed += changeset.bytes_changed; 3705 } 3706 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed, 3707 BTRFS_QGROUP_RSV_DATA); 3708 ret = freed; 3709 out: 3710 extent_changeset_release(&changeset); 3711 return ret; 3712 } 3713 3714 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 3715 struct extent_changeset *reserved, u64 start, u64 len, 3716 int free) 3717 { 3718 struct extent_changeset changeset; 3719 int trace_op = QGROUP_RELEASE; 3720 int ret; 3721 3722 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags)) 3723 return 0; 3724 3725 /* In release case, we shouldn't have @reserved */ 3726 WARN_ON(!free && reserved); 3727 if (free && reserved) 3728 return qgroup_free_reserved_data(inode, reserved, start, len); 3729 extent_changeset_init(&changeset); 3730 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, 3731 EXTENT_QGROUP_RESERVED, &changeset); 3732 if (ret < 0) 3733 goto out; 3734 3735 if (free) 3736 trace_op = QGROUP_FREE; 3737 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 3738 changeset.bytes_changed, trace_op); 3739 if (free) 3740 btrfs_qgroup_free_refroot(inode->root->fs_info, 3741 inode->root->root_key.objectid, 3742 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3743 ret = changeset.bytes_changed; 3744 out: 3745 extent_changeset_release(&changeset); 3746 return ret; 3747 } 3748 3749 /* 3750 * Free a reserved space range from io_tree and related qgroups 3751 * 3752 * Should be called when a range of pages get invalidated before reaching disk. 3753 * Or for error cleanup case. 3754 * if @reserved is given, only reserved range in [@start, @start + @len) will 3755 * be freed. 3756 * 3757 * For data written to disk, use btrfs_qgroup_release_data(). 3758 * 3759 * NOTE: This function may sleep for memory allocation. 3760 */ 3761 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 3762 struct extent_changeset *reserved, u64 start, u64 len) 3763 { 3764 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); 3765 } 3766 3767 /* 3768 * Release a reserved space range from io_tree only. 3769 * 3770 * Should be called when a range of pages get written to disk and corresponding 3771 * FILE_EXTENT is inserted into corresponding root. 3772 * 3773 * Since new qgroup accounting framework will only update qgroup numbers at 3774 * commit_transaction() time, its reserved space shouldn't be freed from 3775 * related qgroups. 3776 * 3777 * But we should release the range from io_tree, to allow further write to be 3778 * COWed. 3779 * 3780 * NOTE: This function may sleep for memory allocation. 3781 */ 3782 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len) 3783 { 3784 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); 3785 } 3786 3787 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3788 enum btrfs_qgroup_rsv_type type) 3789 { 3790 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3791 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3792 return; 3793 if (num_bytes == 0) 3794 return; 3795 3796 spin_lock(&root->qgroup_meta_rsv_lock); 3797 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 3798 root->qgroup_meta_rsv_prealloc += num_bytes; 3799 else 3800 root->qgroup_meta_rsv_pertrans += num_bytes; 3801 spin_unlock(&root->qgroup_meta_rsv_lock); 3802 } 3803 3804 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 3805 enum btrfs_qgroup_rsv_type type) 3806 { 3807 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 3808 type != BTRFS_QGROUP_RSV_META_PERTRANS) 3809 return 0; 3810 if (num_bytes == 0) 3811 return 0; 3812 3813 spin_lock(&root->qgroup_meta_rsv_lock); 3814 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 3815 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 3816 num_bytes); 3817 root->qgroup_meta_rsv_prealloc -= num_bytes; 3818 } else { 3819 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 3820 num_bytes); 3821 root->qgroup_meta_rsv_pertrans -= num_bytes; 3822 } 3823 spin_unlock(&root->qgroup_meta_rsv_lock); 3824 return num_bytes; 3825 } 3826 3827 static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3828 enum btrfs_qgroup_rsv_type type, bool enforce) 3829 { 3830 struct btrfs_fs_info *fs_info = root->fs_info; 3831 int ret; 3832 3833 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3834 !is_fstree(root->root_key.objectid) || num_bytes == 0) 3835 return 0; 3836 3837 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3838 trace_qgroup_meta_reserve(root, (s64)num_bytes, type); 3839 ret = qgroup_reserve(root, num_bytes, enforce, type); 3840 if (ret < 0) 3841 return ret; 3842 /* 3843 * Record what we have reserved into root. 3844 * 3845 * To avoid quota disabled->enabled underflow. 3846 * In that case, we may try to free space we haven't reserved 3847 * (since quota was disabled), so record what we reserved into root. 3848 * And ensure later release won't underflow this number. 3849 */ 3850 add_root_meta_rsv(root, num_bytes, type); 3851 return ret; 3852 } 3853 3854 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 3855 enum btrfs_qgroup_rsv_type type, bool enforce) 3856 { 3857 int ret; 3858 3859 ret = qgroup_reserve_meta(root, num_bytes, type, enforce); 3860 if (ret <= 0 && ret != -EDQUOT) 3861 return ret; 3862 3863 ret = try_flush_qgroup(root); 3864 if (ret < 0) 3865 return ret; 3866 return qgroup_reserve_meta(root, num_bytes, type, enforce); 3867 } 3868 3869 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 3870 { 3871 struct btrfs_fs_info *fs_info = root->fs_info; 3872 3873 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3874 !is_fstree(root->root_key.objectid)) 3875 return; 3876 3877 /* TODO: Update trace point to handle such free */ 3878 trace_qgroup_meta_free_all_pertrans(root); 3879 /* Special value -1 means to free all reserved space */ 3880 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1, 3881 BTRFS_QGROUP_RSV_META_PERTRANS); 3882 } 3883 3884 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 3885 enum btrfs_qgroup_rsv_type type) 3886 { 3887 struct btrfs_fs_info *fs_info = root->fs_info; 3888 3889 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3890 !is_fstree(root->root_key.objectid)) 3891 return; 3892 3893 /* 3894 * reservation for META_PREALLOC can happen before quota is enabled, 3895 * which can lead to underflow. 3896 * Here ensure we will only free what we really have reserved. 3897 */ 3898 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 3899 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 3900 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); 3901 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, 3902 num_bytes, type); 3903 } 3904 3905 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 3906 int num_bytes) 3907 { 3908 struct btrfs_qgroup *qgroup; 3909 struct ulist_node *unode; 3910 struct ulist_iterator uiter; 3911 int ret = 0; 3912 3913 if (num_bytes == 0) 3914 return; 3915 if (!fs_info->quota_root) 3916 return; 3917 3918 spin_lock(&fs_info->qgroup_lock); 3919 qgroup = find_qgroup_rb(fs_info, ref_root); 3920 if (!qgroup) 3921 goto out; 3922 ulist_reinit(fs_info->qgroup_ulist); 3923 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 3924 qgroup_to_aux(qgroup), GFP_ATOMIC); 3925 if (ret < 0) 3926 goto out; 3927 ULIST_ITER_INIT(&uiter); 3928 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 3929 struct btrfs_qgroup *qg; 3930 struct btrfs_qgroup_list *glist; 3931 3932 qg = unode_aux_to_qgroup(unode); 3933 3934 qgroup_rsv_release(fs_info, qg, num_bytes, 3935 BTRFS_QGROUP_RSV_META_PREALLOC); 3936 qgroup_rsv_add(fs_info, qg, num_bytes, 3937 BTRFS_QGROUP_RSV_META_PERTRANS); 3938 list_for_each_entry(glist, &qg->groups, next_group) { 3939 ret = ulist_add(fs_info->qgroup_ulist, 3940 glist->group->qgroupid, 3941 qgroup_to_aux(glist->group), GFP_ATOMIC); 3942 if (ret < 0) 3943 goto out; 3944 } 3945 } 3946 out: 3947 spin_unlock(&fs_info->qgroup_lock); 3948 } 3949 3950 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 3951 { 3952 struct btrfs_fs_info *fs_info = root->fs_info; 3953 3954 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 3955 !is_fstree(root->root_key.objectid)) 3956 return; 3957 /* Same as btrfs_qgroup_free_meta_prealloc() */ 3958 num_bytes = sub_root_meta_rsv(root, num_bytes, 3959 BTRFS_QGROUP_RSV_META_PREALLOC); 3960 trace_qgroup_meta_convert(root, num_bytes); 3961 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes); 3962 } 3963 3964 /* 3965 * Check qgroup reserved space leaking, normally at destroy inode 3966 * time 3967 */ 3968 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 3969 { 3970 struct extent_changeset changeset; 3971 struct ulist_node *unode; 3972 struct ulist_iterator iter; 3973 int ret; 3974 3975 extent_changeset_init(&changeset); 3976 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 3977 EXTENT_QGROUP_RESERVED, &changeset); 3978 3979 WARN_ON(ret < 0); 3980 if (WARN_ON(changeset.bytes_changed)) { 3981 ULIST_ITER_INIT(&iter); 3982 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 3983 btrfs_warn(inode->root->fs_info, 3984 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 3985 btrfs_ino(inode), unode->val, unode->aux); 3986 } 3987 btrfs_qgroup_free_refroot(inode->root->fs_info, 3988 inode->root->root_key.objectid, 3989 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 3990 3991 } 3992 extent_changeset_release(&changeset); 3993 } 3994 3995 void btrfs_qgroup_init_swapped_blocks( 3996 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 3997 { 3998 int i; 3999 4000 spin_lock_init(&swapped_blocks->lock); 4001 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 4002 swapped_blocks->blocks[i] = RB_ROOT; 4003 swapped_blocks->swapped = false; 4004 } 4005 4006 /* 4007 * Delete all swapped blocks record of @root. 4008 * Every record here means we skipped a full subtree scan for qgroup. 4009 * 4010 * Gets called when committing one transaction. 4011 */ 4012 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 4013 { 4014 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 4015 int i; 4016 4017 swapped_blocks = &root->swapped_blocks; 4018 4019 spin_lock(&swapped_blocks->lock); 4020 if (!swapped_blocks->swapped) 4021 goto out; 4022 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4023 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 4024 struct btrfs_qgroup_swapped_block *entry; 4025 struct btrfs_qgroup_swapped_block *next; 4026 4027 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 4028 node) 4029 kfree(entry); 4030 swapped_blocks->blocks[i] = RB_ROOT; 4031 } 4032 swapped_blocks->swapped = false; 4033 out: 4034 spin_unlock(&swapped_blocks->lock); 4035 } 4036 4037 /* 4038 * Add subtree roots record into @subvol_root. 4039 * 4040 * @subvol_root: tree root of the subvolume tree get swapped 4041 * @bg: block group under balance 4042 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4043 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4044 * BOTH POINTERS ARE BEFORE TREE SWAP 4045 * @last_snapshot: last snapshot generation of the subvolume tree 4046 */ 4047 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, 4048 struct btrfs_root *subvol_root, 4049 struct btrfs_block_group *bg, 4050 struct extent_buffer *subvol_parent, int subvol_slot, 4051 struct extent_buffer *reloc_parent, int reloc_slot, 4052 u64 last_snapshot) 4053 { 4054 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4055 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4056 struct btrfs_qgroup_swapped_block *block; 4057 struct rb_node **cur; 4058 struct rb_node *parent = NULL; 4059 int level = btrfs_header_level(subvol_parent) - 1; 4060 int ret = 0; 4061 4062 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4063 return 0; 4064 4065 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4066 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { 4067 btrfs_err_rl(fs_info, 4068 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4069 __func__, 4070 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4071 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4072 return -EUCLEAN; 4073 } 4074 4075 block = kmalloc(sizeof(*block), GFP_NOFS); 4076 if (!block) { 4077 ret = -ENOMEM; 4078 goto out; 4079 } 4080 4081 /* 4082 * @reloc_parent/slot is still before swap, while @block is going to 4083 * record the bytenr after swap, so we do the swap here. 4084 */ 4085 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4086 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4087 reloc_slot); 4088 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4089 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4090 subvol_slot); 4091 block->last_snapshot = last_snapshot; 4092 block->level = level; 4093 4094 /* 4095 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4096 * no one else can modify tree blocks thus we qgroup will not change 4097 * no matter the value of trace_leaf. 4098 */ 4099 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4100 block->trace_leaf = true; 4101 else 4102 block->trace_leaf = false; 4103 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4104 4105 /* Insert @block into @blocks */ 4106 spin_lock(&blocks->lock); 4107 cur = &blocks->blocks[level].rb_node; 4108 while (*cur) { 4109 struct btrfs_qgroup_swapped_block *entry; 4110 4111 parent = *cur; 4112 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, 4113 node); 4114 4115 if (entry->subvol_bytenr < block->subvol_bytenr) { 4116 cur = &(*cur)->rb_left; 4117 } else if (entry->subvol_bytenr > block->subvol_bytenr) { 4118 cur = &(*cur)->rb_right; 4119 } else { 4120 if (entry->subvol_generation != 4121 block->subvol_generation || 4122 entry->reloc_bytenr != block->reloc_bytenr || 4123 entry->reloc_generation != 4124 block->reloc_generation) { 4125 /* 4126 * Duplicated but mismatch entry found. 4127 * Shouldn't happen. 4128 * 4129 * Marking qgroup inconsistent should be enough 4130 * for end users. 4131 */ 4132 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4133 ret = -EEXIST; 4134 } 4135 kfree(block); 4136 goto out_unlock; 4137 } 4138 } 4139 rb_link_node(&block->node, parent, cur); 4140 rb_insert_color(&block->node, &blocks->blocks[level]); 4141 blocks->swapped = true; 4142 out_unlock: 4143 spin_unlock(&blocks->lock); 4144 out: 4145 if (ret < 0) 4146 fs_info->qgroup_flags |= 4147 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 4148 return ret; 4149 } 4150 4151 /* 4152 * Check if the tree block is a subtree root, and if so do the needed 4153 * delayed subtree trace for qgroup. 4154 * 4155 * This is called during btrfs_cow_block(). 4156 */ 4157 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4158 struct btrfs_root *root, 4159 struct extent_buffer *subvol_eb) 4160 { 4161 struct btrfs_fs_info *fs_info = root->fs_info; 4162 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4163 struct btrfs_qgroup_swapped_block *block; 4164 struct extent_buffer *reloc_eb = NULL; 4165 struct rb_node *node; 4166 bool found = false; 4167 bool swapped = false; 4168 int level = btrfs_header_level(subvol_eb); 4169 int ret = 0; 4170 int i; 4171 4172 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 4173 return 0; 4174 if (!is_fstree(root->root_key.objectid) || !root->reloc_root) 4175 return 0; 4176 4177 spin_lock(&blocks->lock); 4178 if (!blocks->swapped) { 4179 spin_unlock(&blocks->lock); 4180 return 0; 4181 } 4182 node = blocks->blocks[level].rb_node; 4183 4184 while (node) { 4185 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4186 if (block->subvol_bytenr < subvol_eb->start) { 4187 node = node->rb_left; 4188 } else if (block->subvol_bytenr > subvol_eb->start) { 4189 node = node->rb_right; 4190 } else { 4191 found = true; 4192 break; 4193 } 4194 } 4195 if (!found) { 4196 spin_unlock(&blocks->lock); 4197 goto out; 4198 } 4199 /* Found one, remove it from @blocks first and update blocks->swapped */ 4200 rb_erase(&block->node, &blocks->blocks[level]); 4201 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4202 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4203 swapped = true; 4204 break; 4205 } 4206 } 4207 blocks->swapped = swapped; 4208 spin_unlock(&blocks->lock); 4209 4210 /* Read out reloc subtree root */ 4211 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0, 4212 block->reloc_generation, block->level, 4213 &block->first_key); 4214 if (IS_ERR(reloc_eb)) { 4215 ret = PTR_ERR(reloc_eb); 4216 reloc_eb = NULL; 4217 goto free_out; 4218 } 4219 if (!extent_buffer_uptodate(reloc_eb)) { 4220 ret = -EIO; 4221 goto free_out; 4222 } 4223 4224 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4225 block->last_snapshot, block->trace_leaf); 4226 free_out: 4227 kfree(block); 4228 free_extent_buffer(reloc_eb); 4229 out: 4230 if (ret < 0) { 4231 btrfs_err_rl(fs_info, 4232 "failed to account subtree at bytenr %llu: %d", 4233 subvol_eb->start, ret); 4234 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 4235 } 4236 return ret; 4237 } 4238 4239 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4240 { 4241 struct btrfs_qgroup_extent_record *entry; 4242 struct btrfs_qgroup_extent_record *next; 4243 struct rb_root *root; 4244 4245 root = &trans->delayed_refs.dirty_extent_root; 4246 rbtree_postorder_for_each_entry_safe(entry, next, root, node) { 4247 ulist_free(entry->old_roots); 4248 kfree(entry); 4249 } 4250 } 4251