1 /* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/pagemap.h> 21 #include <linux/writeback.h> 22 #include <linux/blkdev.h> 23 #include <linux/rbtree.h> 24 #include <linux/slab.h> 25 #include <linux/workqueue.h> 26 #include <linux/btrfs.h> 27 28 #include "ctree.h" 29 #include "transaction.h" 30 #include "disk-io.h" 31 #include "locking.h" 32 #include "ulist.h" 33 #include "backref.h" 34 #include "extent_io.h" 35 #include "qgroup.h" 36 37 38 /* TODO XXX FIXME 39 * - subvol delete -> delete when ref goes to 0? delete limits also? 40 * - reorganize keys 41 * - compressed 42 * - sync 43 * - copy also limits on subvol creation 44 * - limit 45 * - caches fuer ulists 46 * - performance benchmarks 47 * - check all ioctl parameters 48 */ 49 50 /* 51 * one struct for each qgroup, organized in fs_info->qgroup_tree. 52 */ 53 struct btrfs_qgroup { 54 u64 qgroupid; 55 56 /* 57 * state 58 */ 59 u64 rfer; /* referenced */ 60 u64 rfer_cmpr; /* referenced compressed */ 61 u64 excl; /* exclusive */ 62 u64 excl_cmpr; /* exclusive compressed */ 63 64 /* 65 * limits 66 */ 67 u64 lim_flags; /* which limits are set */ 68 u64 max_rfer; 69 u64 max_excl; 70 u64 rsv_rfer; 71 u64 rsv_excl; 72 73 /* 74 * reservation tracking 75 */ 76 u64 reserved; 77 78 /* 79 * lists 80 */ 81 struct list_head groups; /* groups this group is member of */ 82 struct list_head members; /* groups that are members of this group */ 83 struct list_head dirty; /* dirty groups */ 84 struct rb_node node; /* tree of qgroups */ 85 86 /* 87 * temp variables for accounting operations 88 * Refer to qgroup_shared_accouting() for details. 89 */ 90 u64 old_refcnt; 91 u64 new_refcnt; 92 }; 93 94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 95 int mod) 96 { 97 if (qg->old_refcnt < seq) 98 qg->old_refcnt = seq; 99 qg->old_refcnt += mod; 100 } 101 102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 103 int mod) 104 { 105 if (qg->new_refcnt < seq) 106 qg->new_refcnt = seq; 107 qg->new_refcnt += mod; 108 } 109 110 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 111 { 112 if (qg->old_refcnt < seq) 113 return 0; 114 return qg->old_refcnt - seq; 115 } 116 117 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 118 { 119 if (qg->new_refcnt < seq) 120 return 0; 121 return qg->new_refcnt - seq; 122 } 123 124 /* 125 * glue structure to represent the relations between qgroups. 126 */ 127 struct btrfs_qgroup_list { 128 struct list_head next_group; 129 struct list_head next_member; 130 struct btrfs_qgroup *group; 131 struct btrfs_qgroup *member; 132 }; 133 134 #define ptr_to_u64(x) ((u64)(uintptr_t)x) 135 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x) 136 137 static int 138 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 139 int init_flags); 140 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 141 142 /* must be called with qgroup_ioctl_lock held */ 143 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 144 u64 qgroupid) 145 { 146 struct rb_node *n = fs_info->qgroup_tree.rb_node; 147 struct btrfs_qgroup *qgroup; 148 149 while (n) { 150 qgroup = rb_entry(n, struct btrfs_qgroup, node); 151 if (qgroup->qgroupid < qgroupid) 152 n = n->rb_left; 153 else if (qgroup->qgroupid > qgroupid) 154 n = n->rb_right; 155 else 156 return qgroup; 157 } 158 return NULL; 159 } 160 161 /* must be called with qgroup_lock held */ 162 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 163 u64 qgroupid) 164 { 165 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 166 struct rb_node *parent = NULL; 167 struct btrfs_qgroup *qgroup; 168 169 while (*p) { 170 parent = *p; 171 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 172 173 if (qgroup->qgroupid < qgroupid) 174 p = &(*p)->rb_left; 175 else if (qgroup->qgroupid > qgroupid) 176 p = &(*p)->rb_right; 177 else 178 return qgroup; 179 } 180 181 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); 182 if (!qgroup) 183 return ERR_PTR(-ENOMEM); 184 185 qgroup->qgroupid = qgroupid; 186 INIT_LIST_HEAD(&qgroup->groups); 187 INIT_LIST_HEAD(&qgroup->members); 188 INIT_LIST_HEAD(&qgroup->dirty); 189 190 rb_link_node(&qgroup->node, parent, p); 191 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 192 193 return qgroup; 194 } 195 196 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) 197 { 198 struct btrfs_qgroup_list *list; 199 200 list_del(&qgroup->dirty); 201 while (!list_empty(&qgroup->groups)) { 202 list = list_first_entry(&qgroup->groups, 203 struct btrfs_qgroup_list, next_group); 204 list_del(&list->next_group); 205 list_del(&list->next_member); 206 kfree(list); 207 } 208 209 while (!list_empty(&qgroup->members)) { 210 list = list_first_entry(&qgroup->members, 211 struct btrfs_qgroup_list, next_member); 212 list_del(&list->next_group); 213 list_del(&list->next_member); 214 kfree(list); 215 } 216 kfree(qgroup); 217 } 218 219 /* must be called with qgroup_lock held */ 220 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 221 { 222 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 223 224 if (!qgroup) 225 return -ENOENT; 226 227 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 228 __del_qgroup_rb(qgroup); 229 return 0; 230 } 231 232 /* must be called with qgroup_lock held */ 233 static int add_relation_rb(struct btrfs_fs_info *fs_info, 234 u64 memberid, u64 parentid) 235 { 236 struct btrfs_qgroup *member; 237 struct btrfs_qgroup *parent; 238 struct btrfs_qgroup_list *list; 239 240 member = find_qgroup_rb(fs_info, memberid); 241 parent = find_qgroup_rb(fs_info, parentid); 242 if (!member || !parent) 243 return -ENOENT; 244 245 list = kzalloc(sizeof(*list), GFP_ATOMIC); 246 if (!list) 247 return -ENOMEM; 248 249 list->group = parent; 250 list->member = member; 251 list_add_tail(&list->next_group, &member->groups); 252 list_add_tail(&list->next_member, &parent->members); 253 254 return 0; 255 } 256 257 /* must be called with qgroup_lock held */ 258 static int del_relation_rb(struct btrfs_fs_info *fs_info, 259 u64 memberid, u64 parentid) 260 { 261 struct btrfs_qgroup *member; 262 struct btrfs_qgroup *parent; 263 struct btrfs_qgroup_list *list; 264 265 member = find_qgroup_rb(fs_info, memberid); 266 parent = find_qgroup_rb(fs_info, parentid); 267 if (!member || !parent) 268 return -ENOENT; 269 270 list_for_each_entry(list, &member->groups, next_group) { 271 if (list->group == parent) { 272 list_del(&list->next_group); 273 list_del(&list->next_member); 274 kfree(list); 275 return 0; 276 } 277 } 278 return -ENOENT; 279 } 280 281 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 282 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 283 u64 rfer, u64 excl) 284 { 285 struct btrfs_qgroup *qgroup; 286 287 qgroup = find_qgroup_rb(fs_info, qgroupid); 288 if (!qgroup) 289 return -EINVAL; 290 if (qgroup->rfer != rfer || qgroup->excl != excl) 291 return -EINVAL; 292 return 0; 293 } 294 #endif 295 296 /* 297 * The full config is read in one go, only called from open_ctree() 298 * It doesn't use any locking, as at this point we're still single-threaded 299 */ 300 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 301 { 302 struct btrfs_key key; 303 struct btrfs_key found_key; 304 struct btrfs_root *quota_root = fs_info->quota_root; 305 struct btrfs_path *path = NULL; 306 struct extent_buffer *l; 307 int slot; 308 int ret = 0; 309 u64 flags = 0; 310 u64 rescan_progress = 0; 311 312 if (!fs_info->quota_enabled) 313 return 0; 314 315 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); 316 if (!fs_info->qgroup_ulist) { 317 ret = -ENOMEM; 318 goto out; 319 } 320 321 path = btrfs_alloc_path(); 322 if (!path) { 323 ret = -ENOMEM; 324 goto out; 325 } 326 327 /* default this to quota off, in case no status key is found */ 328 fs_info->qgroup_flags = 0; 329 330 /* 331 * pass 1: read status, all qgroup infos and limits 332 */ 333 key.objectid = 0; 334 key.type = 0; 335 key.offset = 0; 336 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 337 if (ret) 338 goto out; 339 340 while (1) { 341 struct btrfs_qgroup *qgroup; 342 343 slot = path->slots[0]; 344 l = path->nodes[0]; 345 btrfs_item_key_to_cpu(l, &found_key, slot); 346 347 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 348 struct btrfs_qgroup_status_item *ptr; 349 350 ptr = btrfs_item_ptr(l, slot, 351 struct btrfs_qgroup_status_item); 352 353 if (btrfs_qgroup_status_version(l, ptr) != 354 BTRFS_QGROUP_STATUS_VERSION) { 355 btrfs_err(fs_info, 356 "old qgroup version, quota disabled"); 357 goto out; 358 } 359 if (btrfs_qgroup_status_generation(l, ptr) != 360 fs_info->generation) { 361 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 362 btrfs_err(fs_info, 363 "qgroup generation mismatch, " 364 "marked as inconsistent"); 365 } 366 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, 367 ptr); 368 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 369 goto next1; 370 } 371 372 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 373 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 374 goto next1; 375 376 qgroup = find_qgroup_rb(fs_info, found_key.offset); 377 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 378 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 379 btrfs_err(fs_info, "inconsistent qgroup config"); 380 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 381 } 382 if (!qgroup) { 383 qgroup = add_qgroup_rb(fs_info, found_key.offset); 384 if (IS_ERR(qgroup)) { 385 ret = PTR_ERR(qgroup); 386 goto out; 387 } 388 } 389 switch (found_key.type) { 390 case BTRFS_QGROUP_INFO_KEY: { 391 struct btrfs_qgroup_info_item *ptr; 392 393 ptr = btrfs_item_ptr(l, slot, 394 struct btrfs_qgroup_info_item); 395 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 396 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 397 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 398 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 399 /* generation currently unused */ 400 break; 401 } 402 case BTRFS_QGROUP_LIMIT_KEY: { 403 struct btrfs_qgroup_limit_item *ptr; 404 405 ptr = btrfs_item_ptr(l, slot, 406 struct btrfs_qgroup_limit_item); 407 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 408 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 409 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 410 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 411 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 412 break; 413 } 414 } 415 next1: 416 ret = btrfs_next_item(quota_root, path); 417 if (ret < 0) 418 goto out; 419 if (ret) 420 break; 421 } 422 btrfs_release_path(path); 423 424 /* 425 * pass 2: read all qgroup relations 426 */ 427 key.objectid = 0; 428 key.type = BTRFS_QGROUP_RELATION_KEY; 429 key.offset = 0; 430 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 431 if (ret) 432 goto out; 433 while (1) { 434 slot = path->slots[0]; 435 l = path->nodes[0]; 436 btrfs_item_key_to_cpu(l, &found_key, slot); 437 438 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 439 goto next2; 440 441 if (found_key.objectid > found_key.offset) { 442 /* parent <- member, not needed to build config */ 443 /* FIXME should we omit the key completely? */ 444 goto next2; 445 } 446 447 ret = add_relation_rb(fs_info, found_key.objectid, 448 found_key.offset); 449 if (ret == -ENOENT) { 450 btrfs_warn(fs_info, 451 "orphan qgroup relation 0x%llx->0x%llx", 452 found_key.objectid, found_key.offset); 453 ret = 0; /* ignore the error */ 454 } 455 if (ret) 456 goto out; 457 next2: 458 ret = btrfs_next_item(quota_root, path); 459 if (ret < 0) 460 goto out; 461 if (ret) 462 break; 463 } 464 out: 465 fs_info->qgroup_flags |= flags; 466 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) { 467 fs_info->quota_enabled = 0; 468 fs_info->pending_quota_state = 0; 469 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && 470 ret >= 0) { 471 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 472 } 473 btrfs_free_path(path); 474 475 if (ret < 0) { 476 ulist_free(fs_info->qgroup_ulist); 477 fs_info->qgroup_ulist = NULL; 478 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 479 } 480 481 return ret < 0 ? ret : 0; 482 } 483 484 /* 485 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 486 * first two are in single-threaded paths.And for the third one, we have set 487 * quota_root to be null with qgroup_lock held before, so it is safe to clean 488 * up the in-memory structures without qgroup_lock held. 489 */ 490 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 491 { 492 struct rb_node *n; 493 struct btrfs_qgroup *qgroup; 494 495 while ((n = rb_first(&fs_info->qgroup_tree))) { 496 qgroup = rb_entry(n, struct btrfs_qgroup, node); 497 rb_erase(n, &fs_info->qgroup_tree); 498 __del_qgroup_rb(qgroup); 499 } 500 /* 501 * we call btrfs_free_qgroup_config() when umounting 502 * filesystem and disabling quota, so we set qgroup_ulit 503 * to be null here to avoid double free. 504 */ 505 ulist_free(fs_info->qgroup_ulist); 506 fs_info->qgroup_ulist = NULL; 507 } 508 509 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, 510 struct btrfs_root *quota_root, 511 u64 src, u64 dst) 512 { 513 int ret; 514 struct btrfs_path *path; 515 struct btrfs_key key; 516 517 path = btrfs_alloc_path(); 518 if (!path) 519 return -ENOMEM; 520 521 key.objectid = src; 522 key.type = BTRFS_QGROUP_RELATION_KEY; 523 key.offset = dst; 524 525 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 526 527 btrfs_mark_buffer_dirty(path->nodes[0]); 528 529 btrfs_free_path(path); 530 return ret; 531 } 532 533 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, 534 struct btrfs_root *quota_root, 535 u64 src, u64 dst) 536 { 537 int ret; 538 struct btrfs_path *path; 539 struct btrfs_key key; 540 541 path = btrfs_alloc_path(); 542 if (!path) 543 return -ENOMEM; 544 545 key.objectid = src; 546 key.type = BTRFS_QGROUP_RELATION_KEY; 547 key.offset = dst; 548 549 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 550 if (ret < 0) 551 goto out; 552 553 if (ret > 0) { 554 ret = -ENOENT; 555 goto out; 556 } 557 558 ret = btrfs_del_item(trans, quota_root, path); 559 out: 560 btrfs_free_path(path); 561 return ret; 562 } 563 564 static int add_qgroup_item(struct btrfs_trans_handle *trans, 565 struct btrfs_root *quota_root, u64 qgroupid) 566 { 567 int ret; 568 struct btrfs_path *path; 569 struct btrfs_qgroup_info_item *qgroup_info; 570 struct btrfs_qgroup_limit_item *qgroup_limit; 571 struct extent_buffer *leaf; 572 struct btrfs_key key; 573 574 if (btrfs_test_is_dummy_root(quota_root)) 575 return 0; 576 577 path = btrfs_alloc_path(); 578 if (!path) 579 return -ENOMEM; 580 581 key.objectid = 0; 582 key.type = BTRFS_QGROUP_INFO_KEY; 583 key.offset = qgroupid; 584 585 /* 586 * Avoid a transaction abort by catching -EEXIST here. In that 587 * case, we proceed by re-initializing the existing structure 588 * on disk. 589 */ 590 591 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 592 sizeof(*qgroup_info)); 593 if (ret && ret != -EEXIST) 594 goto out; 595 596 leaf = path->nodes[0]; 597 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 598 struct btrfs_qgroup_info_item); 599 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 600 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 601 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 602 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 603 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 604 605 btrfs_mark_buffer_dirty(leaf); 606 607 btrfs_release_path(path); 608 609 key.type = BTRFS_QGROUP_LIMIT_KEY; 610 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 611 sizeof(*qgroup_limit)); 612 if (ret && ret != -EEXIST) 613 goto out; 614 615 leaf = path->nodes[0]; 616 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 617 struct btrfs_qgroup_limit_item); 618 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 619 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 620 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 621 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 622 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 623 624 btrfs_mark_buffer_dirty(leaf); 625 626 ret = 0; 627 out: 628 btrfs_free_path(path); 629 return ret; 630 } 631 632 static int del_qgroup_item(struct btrfs_trans_handle *trans, 633 struct btrfs_root *quota_root, u64 qgroupid) 634 { 635 int ret; 636 struct btrfs_path *path; 637 struct btrfs_key key; 638 639 path = btrfs_alloc_path(); 640 if (!path) 641 return -ENOMEM; 642 643 key.objectid = 0; 644 key.type = BTRFS_QGROUP_INFO_KEY; 645 key.offset = qgroupid; 646 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 647 if (ret < 0) 648 goto out; 649 650 if (ret > 0) { 651 ret = -ENOENT; 652 goto out; 653 } 654 655 ret = btrfs_del_item(trans, quota_root, path); 656 if (ret) 657 goto out; 658 659 btrfs_release_path(path); 660 661 key.type = BTRFS_QGROUP_LIMIT_KEY; 662 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 663 if (ret < 0) 664 goto out; 665 666 if (ret > 0) { 667 ret = -ENOENT; 668 goto out; 669 } 670 671 ret = btrfs_del_item(trans, quota_root, path); 672 673 out: 674 btrfs_free_path(path); 675 return ret; 676 } 677 678 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 679 struct btrfs_root *root, 680 struct btrfs_qgroup *qgroup) 681 { 682 struct btrfs_path *path; 683 struct btrfs_key key; 684 struct extent_buffer *l; 685 struct btrfs_qgroup_limit_item *qgroup_limit; 686 int ret; 687 int slot; 688 689 key.objectid = 0; 690 key.type = BTRFS_QGROUP_LIMIT_KEY; 691 key.offset = qgroup->qgroupid; 692 693 path = btrfs_alloc_path(); 694 if (!path) 695 return -ENOMEM; 696 697 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 698 if (ret > 0) 699 ret = -ENOENT; 700 701 if (ret) 702 goto out; 703 704 l = path->nodes[0]; 705 slot = path->slots[0]; 706 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 707 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 708 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 709 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 710 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 711 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 712 713 btrfs_mark_buffer_dirty(l); 714 715 out: 716 btrfs_free_path(path); 717 return ret; 718 } 719 720 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 721 struct btrfs_root *root, 722 struct btrfs_qgroup *qgroup) 723 { 724 struct btrfs_path *path; 725 struct btrfs_key key; 726 struct extent_buffer *l; 727 struct btrfs_qgroup_info_item *qgroup_info; 728 int ret; 729 int slot; 730 731 if (btrfs_test_is_dummy_root(root)) 732 return 0; 733 734 key.objectid = 0; 735 key.type = BTRFS_QGROUP_INFO_KEY; 736 key.offset = qgroup->qgroupid; 737 738 path = btrfs_alloc_path(); 739 if (!path) 740 return -ENOMEM; 741 742 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 743 if (ret > 0) 744 ret = -ENOENT; 745 746 if (ret) 747 goto out; 748 749 l = path->nodes[0]; 750 slot = path->slots[0]; 751 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 752 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 753 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 754 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 755 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 756 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 757 758 btrfs_mark_buffer_dirty(l); 759 760 out: 761 btrfs_free_path(path); 762 return ret; 763 } 764 765 static int update_qgroup_status_item(struct btrfs_trans_handle *trans, 766 struct btrfs_fs_info *fs_info, 767 struct btrfs_root *root) 768 { 769 struct btrfs_path *path; 770 struct btrfs_key key; 771 struct extent_buffer *l; 772 struct btrfs_qgroup_status_item *ptr; 773 int ret; 774 int slot; 775 776 key.objectid = 0; 777 key.type = BTRFS_QGROUP_STATUS_KEY; 778 key.offset = 0; 779 780 path = btrfs_alloc_path(); 781 if (!path) 782 return -ENOMEM; 783 784 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 785 if (ret > 0) 786 ret = -ENOENT; 787 788 if (ret) 789 goto out; 790 791 l = path->nodes[0]; 792 slot = path->slots[0]; 793 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 794 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); 795 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 796 btrfs_set_qgroup_status_rescan(l, ptr, 797 fs_info->qgroup_rescan_progress.objectid); 798 799 btrfs_mark_buffer_dirty(l); 800 801 out: 802 btrfs_free_path(path); 803 return ret; 804 } 805 806 /* 807 * called with qgroup_lock held 808 */ 809 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 810 struct btrfs_root *root) 811 { 812 struct btrfs_path *path; 813 struct btrfs_key key; 814 struct extent_buffer *leaf = NULL; 815 int ret; 816 int nr = 0; 817 818 path = btrfs_alloc_path(); 819 if (!path) 820 return -ENOMEM; 821 822 path->leave_spinning = 1; 823 824 key.objectid = 0; 825 key.offset = 0; 826 key.type = 0; 827 828 while (1) { 829 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 830 if (ret < 0) 831 goto out; 832 leaf = path->nodes[0]; 833 nr = btrfs_header_nritems(leaf); 834 if (!nr) 835 break; 836 /* 837 * delete the leaf one by one 838 * since the whole tree is going 839 * to be deleted. 840 */ 841 path->slots[0] = 0; 842 ret = btrfs_del_items(trans, root, path, 0, nr); 843 if (ret) 844 goto out; 845 846 btrfs_release_path(path); 847 } 848 ret = 0; 849 out: 850 root->fs_info->pending_quota_state = 0; 851 btrfs_free_path(path); 852 return ret; 853 } 854 855 int btrfs_quota_enable(struct btrfs_trans_handle *trans, 856 struct btrfs_fs_info *fs_info) 857 { 858 struct btrfs_root *quota_root; 859 struct btrfs_root *tree_root = fs_info->tree_root; 860 struct btrfs_path *path = NULL; 861 struct btrfs_qgroup_status_item *ptr; 862 struct extent_buffer *leaf; 863 struct btrfs_key key; 864 struct btrfs_key found_key; 865 struct btrfs_qgroup *qgroup = NULL; 866 int ret = 0; 867 int slot; 868 869 mutex_lock(&fs_info->qgroup_ioctl_lock); 870 if (fs_info->quota_root) { 871 fs_info->pending_quota_state = 1; 872 goto out; 873 } 874 875 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); 876 if (!fs_info->qgroup_ulist) { 877 ret = -ENOMEM; 878 goto out; 879 } 880 881 /* 882 * initially create the quota tree 883 */ 884 quota_root = btrfs_create_tree(trans, fs_info, 885 BTRFS_QUOTA_TREE_OBJECTID); 886 if (IS_ERR(quota_root)) { 887 ret = PTR_ERR(quota_root); 888 goto out; 889 } 890 891 path = btrfs_alloc_path(); 892 if (!path) { 893 ret = -ENOMEM; 894 goto out_free_root; 895 } 896 897 key.objectid = 0; 898 key.type = BTRFS_QGROUP_STATUS_KEY; 899 key.offset = 0; 900 901 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 902 sizeof(*ptr)); 903 if (ret) 904 goto out_free_path; 905 906 leaf = path->nodes[0]; 907 ptr = btrfs_item_ptr(leaf, path->slots[0], 908 struct btrfs_qgroup_status_item); 909 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 910 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 911 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | 912 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 913 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); 914 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 915 916 btrfs_mark_buffer_dirty(leaf); 917 918 key.objectid = 0; 919 key.type = BTRFS_ROOT_REF_KEY; 920 key.offset = 0; 921 922 btrfs_release_path(path); 923 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 924 if (ret > 0) 925 goto out_add_root; 926 if (ret < 0) 927 goto out_free_path; 928 929 930 while (1) { 931 slot = path->slots[0]; 932 leaf = path->nodes[0]; 933 btrfs_item_key_to_cpu(leaf, &found_key, slot); 934 935 if (found_key.type == BTRFS_ROOT_REF_KEY) { 936 ret = add_qgroup_item(trans, quota_root, 937 found_key.offset); 938 if (ret) 939 goto out_free_path; 940 941 qgroup = add_qgroup_rb(fs_info, found_key.offset); 942 if (IS_ERR(qgroup)) { 943 ret = PTR_ERR(qgroup); 944 goto out_free_path; 945 } 946 } 947 ret = btrfs_next_item(tree_root, path); 948 if (ret < 0) 949 goto out_free_path; 950 if (ret) 951 break; 952 } 953 954 out_add_root: 955 btrfs_release_path(path); 956 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 957 if (ret) 958 goto out_free_path; 959 960 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); 961 if (IS_ERR(qgroup)) { 962 ret = PTR_ERR(qgroup); 963 goto out_free_path; 964 } 965 spin_lock(&fs_info->qgroup_lock); 966 fs_info->quota_root = quota_root; 967 fs_info->pending_quota_state = 1; 968 spin_unlock(&fs_info->qgroup_lock); 969 out_free_path: 970 btrfs_free_path(path); 971 out_free_root: 972 if (ret) { 973 free_extent_buffer(quota_root->node); 974 free_extent_buffer(quota_root->commit_root); 975 kfree(quota_root); 976 } 977 out: 978 if (ret) { 979 ulist_free(fs_info->qgroup_ulist); 980 fs_info->qgroup_ulist = NULL; 981 } 982 mutex_unlock(&fs_info->qgroup_ioctl_lock); 983 return ret; 984 } 985 986 int btrfs_quota_disable(struct btrfs_trans_handle *trans, 987 struct btrfs_fs_info *fs_info) 988 { 989 struct btrfs_root *tree_root = fs_info->tree_root; 990 struct btrfs_root *quota_root; 991 int ret = 0; 992 993 mutex_lock(&fs_info->qgroup_ioctl_lock); 994 if (!fs_info->quota_root) 995 goto out; 996 spin_lock(&fs_info->qgroup_lock); 997 fs_info->quota_enabled = 0; 998 fs_info->pending_quota_state = 0; 999 quota_root = fs_info->quota_root; 1000 fs_info->quota_root = NULL; 1001 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1002 spin_unlock(&fs_info->qgroup_lock); 1003 1004 btrfs_free_qgroup_config(fs_info); 1005 1006 ret = btrfs_clean_quota_tree(trans, quota_root); 1007 if (ret) 1008 goto out; 1009 1010 ret = btrfs_del_root(trans, tree_root, "a_root->root_key); 1011 if (ret) 1012 goto out; 1013 1014 list_del("a_root->dirty_list); 1015 1016 btrfs_tree_lock(quota_root->node); 1017 clean_tree_block(trans, tree_root->fs_info, quota_root->node); 1018 btrfs_tree_unlock(quota_root->node); 1019 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); 1020 1021 free_extent_buffer(quota_root->node); 1022 free_extent_buffer(quota_root->commit_root); 1023 kfree(quota_root); 1024 out: 1025 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1026 return ret; 1027 } 1028 1029 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1030 struct btrfs_qgroup *qgroup) 1031 { 1032 if (list_empty(&qgroup->dirty)) 1033 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1034 } 1035 1036 /* 1037 * The easy accounting, if we are adding/removing the only ref for an extent 1038 * then this qgroup and all of the parent qgroups get their refrence and 1039 * exclusive counts adjusted. 1040 * 1041 * Caller should hold fs_info->qgroup_lock. 1042 */ 1043 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1044 struct ulist *tmp, u64 ref_root, 1045 u64 num_bytes, int sign) 1046 { 1047 struct btrfs_qgroup *qgroup; 1048 struct btrfs_qgroup_list *glist; 1049 struct ulist_node *unode; 1050 struct ulist_iterator uiter; 1051 int ret = 0; 1052 1053 qgroup = find_qgroup_rb(fs_info, ref_root); 1054 if (!qgroup) 1055 goto out; 1056 1057 qgroup->rfer += sign * num_bytes; 1058 qgroup->rfer_cmpr += sign * num_bytes; 1059 1060 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1061 qgroup->excl += sign * num_bytes; 1062 qgroup->excl_cmpr += sign * num_bytes; 1063 if (sign > 0) 1064 qgroup->reserved -= num_bytes; 1065 1066 qgroup_dirty(fs_info, qgroup); 1067 1068 /* Get all of the parent groups that contain this qgroup */ 1069 list_for_each_entry(glist, &qgroup->groups, next_group) { 1070 ret = ulist_add(tmp, glist->group->qgroupid, 1071 ptr_to_u64(glist->group), GFP_ATOMIC); 1072 if (ret < 0) 1073 goto out; 1074 } 1075 1076 /* Iterate all of the parents and adjust their reference counts */ 1077 ULIST_ITER_INIT(&uiter); 1078 while ((unode = ulist_next(tmp, &uiter))) { 1079 qgroup = u64_to_ptr(unode->aux); 1080 qgroup->rfer += sign * num_bytes; 1081 qgroup->rfer_cmpr += sign * num_bytes; 1082 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1083 qgroup->excl += sign * num_bytes; 1084 if (sign > 0) 1085 qgroup->reserved -= num_bytes; 1086 qgroup->excl_cmpr += sign * num_bytes; 1087 qgroup_dirty(fs_info, qgroup); 1088 1089 /* Add any parents of the parents */ 1090 list_for_each_entry(glist, &qgroup->groups, next_group) { 1091 ret = ulist_add(tmp, glist->group->qgroupid, 1092 ptr_to_u64(glist->group), GFP_ATOMIC); 1093 if (ret < 0) 1094 goto out; 1095 } 1096 } 1097 ret = 0; 1098 out: 1099 return ret; 1100 } 1101 1102 1103 /* 1104 * Quick path for updating qgroup with only excl refs. 1105 * 1106 * In that case, just update all parent will be enough. 1107 * Or we needs to do a full rescan. 1108 * Caller should also hold fs_info->qgroup_lock. 1109 * 1110 * Return 0 for quick update, return >0 for need to full rescan 1111 * and mark INCONSISTENT flag. 1112 * Return < 0 for other error. 1113 */ 1114 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1115 struct ulist *tmp, u64 src, u64 dst, 1116 int sign) 1117 { 1118 struct btrfs_qgroup *qgroup; 1119 int ret = 1; 1120 int err = 0; 1121 1122 qgroup = find_qgroup_rb(fs_info, src); 1123 if (!qgroup) 1124 goto out; 1125 if (qgroup->excl == qgroup->rfer) { 1126 ret = 0; 1127 err = __qgroup_excl_accounting(fs_info, tmp, dst, 1128 qgroup->excl, sign); 1129 if (err < 0) { 1130 ret = err; 1131 goto out; 1132 } 1133 } 1134 out: 1135 if (ret) 1136 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1137 return ret; 1138 } 1139 1140 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, 1141 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1142 { 1143 struct btrfs_root *quota_root; 1144 struct btrfs_qgroup *parent; 1145 struct btrfs_qgroup *member; 1146 struct btrfs_qgroup_list *list; 1147 struct ulist *tmp; 1148 int ret = 0; 1149 1150 /* Check the level of src and dst first */ 1151 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1152 return -EINVAL; 1153 1154 tmp = ulist_alloc(GFP_NOFS); 1155 if (!tmp) 1156 return -ENOMEM; 1157 1158 mutex_lock(&fs_info->qgroup_ioctl_lock); 1159 quota_root = fs_info->quota_root; 1160 if (!quota_root) { 1161 ret = -EINVAL; 1162 goto out; 1163 } 1164 member = find_qgroup_rb(fs_info, src); 1165 parent = find_qgroup_rb(fs_info, dst); 1166 if (!member || !parent) { 1167 ret = -EINVAL; 1168 goto out; 1169 } 1170 1171 /* check if such qgroup relation exist firstly */ 1172 list_for_each_entry(list, &member->groups, next_group) { 1173 if (list->group == parent) { 1174 ret = -EEXIST; 1175 goto out; 1176 } 1177 } 1178 1179 ret = add_qgroup_relation_item(trans, quota_root, src, dst); 1180 if (ret) 1181 goto out; 1182 1183 ret = add_qgroup_relation_item(trans, quota_root, dst, src); 1184 if (ret) { 1185 del_qgroup_relation_item(trans, quota_root, src, dst); 1186 goto out; 1187 } 1188 1189 spin_lock(&fs_info->qgroup_lock); 1190 ret = add_relation_rb(quota_root->fs_info, src, dst); 1191 if (ret < 0) { 1192 spin_unlock(&fs_info->qgroup_lock); 1193 goto out; 1194 } 1195 ret = quick_update_accounting(fs_info, tmp, src, dst, 1); 1196 spin_unlock(&fs_info->qgroup_lock); 1197 out: 1198 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1199 ulist_free(tmp); 1200 return ret; 1201 } 1202 1203 int __del_qgroup_relation(struct btrfs_trans_handle *trans, 1204 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1205 { 1206 struct btrfs_root *quota_root; 1207 struct btrfs_qgroup *parent; 1208 struct btrfs_qgroup *member; 1209 struct btrfs_qgroup_list *list; 1210 struct ulist *tmp; 1211 int ret = 0; 1212 int err; 1213 1214 tmp = ulist_alloc(GFP_NOFS); 1215 if (!tmp) 1216 return -ENOMEM; 1217 1218 quota_root = fs_info->quota_root; 1219 if (!quota_root) { 1220 ret = -EINVAL; 1221 goto out; 1222 } 1223 1224 member = find_qgroup_rb(fs_info, src); 1225 parent = find_qgroup_rb(fs_info, dst); 1226 if (!member || !parent) { 1227 ret = -EINVAL; 1228 goto out; 1229 } 1230 1231 /* check if such qgroup relation exist firstly */ 1232 list_for_each_entry(list, &member->groups, next_group) { 1233 if (list->group == parent) 1234 goto exist; 1235 } 1236 ret = -ENOENT; 1237 goto out; 1238 exist: 1239 ret = del_qgroup_relation_item(trans, quota_root, src, dst); 1240 err = del_qgroup_relation_item(trans, quota_root, dst, src); 1241 if (err && !ret) 1242 ret = err; 1243 1244 spin_lock(&fs_info->qgroup_lock); 1245 del_relation_rb(fs_info, src, dst); 1246 ret = quick_update_accounting(fs_info, tmp, src, dst, -1); 1247 spin_unlock(&fs_info->qgroup_lock); 1248 out: 1249 ulist_free(tmp); 1250 return ret; 1251 } 1252 1253 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, 1254 struct btrfs_fs_info *fs_info, u64 src, u64 dst) 1255 { 1256 int ret = 0; 1257 1258 mutex_lock(&fs_info->qgroup_ioctl_lock); 1259 ret = __del_qgroup_relation(trans, fs_info, src, dst); 1260 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1261 1262 return ret; 1263 } 1264 1265 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, 1266 struct btrfs_fs_info *fs_info, u64 qgroupid) 1267 { 1268 struct btrfs_root *quota_root; 1269 struct btrfs_qgroup *qgroup; 1270 int ret = 0; 1271 1272 mutex_lock(&fs_info->qgroup_ioctl_lock); 1273 quota_root = fs_info->quota_root; 1274 if (!quota_root) { 1275 ret = -EINVAL; 1276 goto out; 1277 } 1278 qgroup = find_qgroup_rb(fs_info, qgroupid); 1279 if (qgroup) { 1280 ret = -EEXIST; 1281 goto out; 1282 } 1283 1284 ret = add_qgroup_item(trans, quota_root, qgroupid); 1285 if (ret) 1286 goto out; 1287 1288 spin_lock(&fs_info->qgroup_lock); 1289 qgroup = add_qgroup_rb(fs_info, qgroupid); 1290 spin_unlock(&fs_info->qgroup_lock); 1291 1292 if (IS_ERR(qgroup)) 1293 ret = PTR_ERR(qgroup); 1294 out: 1295 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1296 return ret; 1297 } 1298 1299 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, 1300 struct btrfs_fs_info *fs_info, u64 qgroupid) 1301 { 1302 struct btrfs_root *quota_root; 1303 struct btrfs_qgroup *qgroup; 1304 struct btrfs_qgroup_list *list; 1305 int ret = 0; 1306 1307 mutex_lock(&fs_info->qgroup_ioctl_lock); 1308 quota_root = fs_info->quota_root; 1309 if (!quota_root) { 1310 ret = -EINVAL; 1311 goto out; 1312 } 1313 1314 qgroup = find_qgroup_rb(fs_info, qgroupid); 1315 if (!qgroup) { 1316 ret = -ENOENT; 1317 goto out; 1318 } else { 1319 /* check if there are no children of this qgroup */ 1320 if (!list_empty(&qgroup->members)) { 1321 ret = -EBUSY; 1322 goto out; 1323 } 1324 } 1325 ret = del_qgroup_item(trans, quota_root, qgroupid); 1326 1327 while (!list_empty(&qgroup->groups)) { 1328 list = list_first_entry(&qgroup->groups, 1329 struct btrfs_qgroup_list, next_group); 1330 ret = __del_qgroup_relation(trans, fs_info, 1331 qgroupid, 1332 list->group->qgroupid); 1333 if (ret) 1334 goto out; 1335 } 1336 1337 spin_lock(&fs_info->qgroup_lock); 1338 del_qgroup_rb(quota_root->fs_info, qgroupid); 1339 spin_unlock(&fs_info->qgroup_lock); 1340 out: 1341 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1342 return ret; 1343 } 1344 1345 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, 1346 struct btrfs_fs_info *fs_info, u64 qgroupid, 1347 struct btrfs_qgroup_limit *limit) 1348 { 1349 struct btrfs_root *quota_root; 1350 struct btrfs_qgroup *qgroup; 1351 int ret = 0; 1352 /* Sometimes we would want to clear the limit on this qgroup. 1353 * To meet this requirement, we treat the -1 as a special value 1354 * which tell kernel to clear the limit on this qgroup. 1355 */ 1356 const u64 CLEAR_VALUE = -1; 1357 1358 mutex_lock(&fs_info->qgroup_ioctl_lock); 1359 quota_root = fs_info->quota_root; 1360 if (!quota_root) { 1361 ret = -EINVAL; 1362 goto out; 1363 } 1364 1365 qgroup = find_qgroup_rb(fs_info, qgroupid); 1366 if (!qgroup) { 1367 ret = -ENOENT; 1368 goto out; 1369 } 1370 1371 spin_lock(&fs_info->qgroup_lock); 1372 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1373 if (limit->max_rfer == CLEAR_VALUE) { 1374 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1375 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1376 qgroup->max_rfer = 0; 1377 } else { 1378 qgroup->max_rfer = limit->max_rfer; 1379 } 1380 } 1381 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1382 if (limit->max_excl == CLEAR_VALUE) { 1383 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1384 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1385 qgroup->max_excl = 0; 1386 } else { 1387 qgroup->max_excl = limit->max_excl; 1388 } 1389 } 1390 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1391 if (limit->rsv_rfer == CLEAR_VALUE) { 1392 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1393 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1394 qgroup->rsv_rfer = 0; 1395 } else { 1396 qgroup->rsv_rfer = limit->rsv_rfer; 1397 } 1398 } 1399 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1400 if (limit->rsv_excl == CLEAR_VALUE) { 1401 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1402 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1403 qgroup->rsv_excl = 0; 1404 } else { 1405 qgroup->rsv_excl = limit->rsv_excl; 1406 } 1407 } 1408 qgroup->lim_flags |= limit->flags; 1409 1410 spin_unlock(&fs_info->qgroup_lock); 1411 1412 ret = update_qgroup_limit_item(trans, quota_root, qgroup); 1413 if (ret) { 1414 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1415 btrfs_info(fs_info, "unable to update quota limit for %llu", 1416 qgroupid); 1417 } 1418 1419 out: 1420 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1421 return ret; 1422 } 1423 1424 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, 1425 struct btrfs_fs_info *fs_info) 1426 { 1427 struct btrfs_qgroup_extent_record *record; 1428 struct btrfs_delayed_ref_root *delayed_refs; 1429 struct rb_node *node; 1430 u64 qgroup_to_skip; 1431 int ret = 0; 1432 1433 delayed_refs = &trans->transaction->delayed_refs; 1434 qgroup_to_skip = delayed_refs->qgroup_to_skip; 1435 1436 /* 1437 * No need to do lock, since this function will only be called in 1438 * btrfs_commmit_transaction(). 1439 */ 1440 node = rb_first(&delayed_refs->dirty_extent_root); 1441 while (node) { 1442 record = rb_entry(node, struct btrfs_qgroup_extent_record, 1443 node); 1444 ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, 1445 &record->old_roots); 1446 if (ret < 0) 1447 break; 1448 if (qgroup_to_skip) 1449 ulist_del(record->old_roots, qgroup_to_skip, 0); 1450 node = rb_next(node); 1451 } 1452 return ret; 1453 } 1454 1455 struct btrfs_qgroup_extent_record 1456 *btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs, 1457 struct btrfs_qgroup_extent_record *record) 1458 { 1459 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1460 struct rb_node *parent_node = NULL; 1461 struct btrfs_qgroup_extent_record *entry; 1462 u64 bytenr = record->bytenr; 1463 1464 while (*p) { 1465 parent_node = *p; 1466 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1467 node); 1468 if (bytenr < entry->bytenr) 1469 p = &(*p)->rb_left; 1470 else if (bytenr > entry->bytenr) 1471 p = &(*p)->rb_right; 1472 else 1473 return entry; 1474 } 1475 1476 rb_link_node(&record->node, parent_node, p); 1477 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1478 return NULL; 1479 } 1480 1481 #define UPDATE_NEW 0 1482 #define UPDATE_OLD 1 1483 /* 1484 * Walk all of the roots that points to the bytenr and adjust their refcnts. 1485 */ 1486 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 1487 struct ulist *roots, struct ulist *tmp, 1488 struct ulist *qgroups, u64 seq, int update_old) 1489 { 1490 struct ulist_node *unode; 1491 struct ulist_iterator uiter; 1492 struct ulist_node *tmp_unode; 1493 struct ulist_iterator tmp_uiter; 1494 struct btrfs_qgroup *qg; 1495 int ret = 0; 1496 1497 if (!roots) 1498 return 0; 1499 ULIST_ITER_INIT(&uiter); 1500 while ((unode = ulist_next(roots, &uiter))) { 1501 qg = find_qgroup_rb(fs_info, unode->val); 1502 if (!qg) 1503 continue; 1504 1505 ulist_reinit(tmp); 1506 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg), 1507 GFP_ATOMIC); 1508 if (ret < 0) 1509 return ret; 1510 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC); 1511 if (ret < 0) 1512 return ret; 1513 ULIST_ITER_INIT(&tmp_uiter); 1514 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { 1515 struct btrfs_qgroup_list *glist; 1516 1517 qg = u64_to_ptr(tmp_unode->aux); 1518 if (update_old) 1519 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 1520 else 1521 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 1522 list_for_each_entry(glist, &qg->groups, next_group) { 1523 ret = ulist_add(qgroups, glist->group->qgroupid, 1524 ptr_to_u64(glist->group), 1525 GFP_ATOMIC); 1526 if (ret < 0) 1527 return ret; 1528 ret = ulist_add(tmp, glist->group->qgroupid, 1529 ptr_to_u64(glist->group), 1530 GFP_ATOMIC); 1531 if (ret < 0) 1532 return ret; 1533 } 1534 } 1535 } 1536 return 0; 1537 } 1538 1539 /* 1540 * Update qgroup rfer/excl counters. 1541 * Rfer update is easy, codes can explain themselves. 1542 * 1543 * Excl update is tricky, the update is split into 2 part. 1544 * Part 1: Possible exclusive <-> sharing detect: 1545 * | A | !A | 1546 * ------------------------------------- 1547 * B | * | - | 1548 * ------------------------------------- 1549 * !B | + | ** | 1550 * ------------------------------------- 1551 * 1552 * Conditions: 1553 * A: cur_old_roots < nr_old_roots (not exclusive before) 1554 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 1555 * B: cur_new_roots < nr_new_roots (not exclusive now) 1556 * !B: cur_new_roots == nr_new_roots (possible exclsuive now) 1557 * 1558 * Results: 1559 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 1560 * *: Definitely not changed. **: Possible unchanged. 1561 * 1562 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 1563 * 1564 * To make the logic clear, we first use condition A and B to split 1565 * combination into 4 results. 1566 * 1567 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 1568 * only on variant maybe 0. 1569 * 1570 * Lastly, check result **, since there are 2 variants maybe 0, split them 1571 * again(2x2). 1572 * But this time we don't need to consider other things, the codes and logic 1573 * is easy to understand now. 1574 */ 1575 static int qgroup_update_counters(struct btrfs_fs_info *fs_info, 1576 struct ulist *qgroups, 1577 u64 nr_old_roots, 1578 u64 nr_new_roots, 1579 u64 num_bytes, u64 seq) 1580 { 1581 struct ulist_node *unode; 1582 struct ulist_iterator uiter; 1583 struct btrfs_qgroup *qg; 1584 u64 cur_new_count, cur_old_count; 1585 1586 ULIST_ITER_INIT(&uiter); 1587 while ((unode = ulist_next(qgroups, &uiter))) { 1588 bool dirty = false; 1589 1590 qg = u64_to_ptr(unode->aux); 1591 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 1592 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 1593 1594 /* Rfer update part */ 1595 if (cur_old_count == 0 && cur_new_count > 0) { 1596 qg->rfer += num_bytes; 1597 qg->rfer_cmpr += num_bytes; 1598 dirty = true; 1599 } 1600 if (cur_old_count > 0 && cur_new_count == 0) { 1601 qg->rfer -= num_bytes; 1602 qg->rfer_cmpr -= num_bytes; 1603 dirty = true; 1604 } 1605 1606 /* Excl update part */ 1607 /* Exclusive/none -> shared case */ 1608 if (cur_old_count == nr_old_roots && 1609 cur_new_count < nr_new_roots) { 1610 /* Exclusive -> shared */ 1611 if (cur_old_count != 0) { 1612 qg->excl -= num_bytes; 1613 qg->excl_cmpr -= num_bytes; 1614 dirty = true; 1615 } 1616 } 1617 1618 /* Shared -> exclusive/none case */ 1619 if (cur_old_count < nr_old_roots && 1620 cur_new_count == nr_new_roots) { 1621 /* Shared->exclusive */ 1622 if (cur_new_count != 0) { 1623 qg->excl += num_bytes; 1624 qg->excl_cmpr += num_bytes; 1625 dirty = true; 1626 } 1627 } 1628 1629 /* Exclusive/none -> exclusive/none case */ 1630 if (cur_old_count == nr_old_roots && 1631 cur_new_count == nr_new_roots) { 1632 if (cur_old_count == 0) { 1633 /* None -> exclusive/none */ 1634 1635 if (cur_new_count != 0) { 1636 /* None -> exclusive */ 1637 qg->excl += num_bytes; 1638 qg->excl_cmpr += num_bytes; 1639 dirty = true; 1640 } 1641 /* None -> none, nothing changed */ 1642 } else { 1643 /* Exclusive -> exclusive/none */ 1644 1645 if (cur_new_count == 0) { 1646 /* Exclusive -> none */ 1647 qg->excl -= num_bytes; 1648 qg->excl_cmpr -= num_bytes; 1649 dirty = true; 1650 } 1651 /* Exclusive -> exclusive, nothing changed */ 1652 } 1653 } 1654 1655 /* For exclusive extent, free its reserved bytes too */ 1656 if (nr_old_roots == 0 && nr_new_roots == 1 && 1657 cur_new_count == nr_new_roots) 1658 qg->reserved -= num_bytes; 1659 if (dirty) 1660 qgroup_dirty(fs_info, qg); 1661 } 1662 return 0; 1663 } 1664 1665 int 1666 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, 1667 struct btrfs_fs_info *fs_info, 1668 u64 bytenr, u64 num_bytes, 1669 struct ulist *old_roots, struct ulist *new_roots) 1670 { 1671 struct ulist *qgroups = NULL; 1672 struct ulist *tmp = NULL; 1673 u64 seq; 1674 u64 nr_new_roots = 0; 1675 u64 nr_old_roots = 0; 1676 int ret = 0; 1677 1678 if (new_roots) 1679 nr_new_roots = new_roots->nnodes; 1680 if (old_roots) 1681 nr_old_roots = old_roots->nnodes; 1682 1683 if (!fs_info->quota_enabled) 1684 goto out_free; 1685 BUG_ON(!fs_info->quota_root); 1686 1687 qgroups = ulist_alloc(GFP_NOFS); 1688 if (!qgroups) { 1689 ret = -ENOMEM; 1690 goto out_free; 1691 } 1692 tmp = ulist_alloc(GFP_NOFS); 1693 if (!tmp) { 1694 ret = -ENOMEM; 1695 goto out_free; 1696 } 1697 1698 mutex_lock(&fs_info->qgroup_rescan_lock); 1699 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 1700 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 1701 mutex_unlock(&fs_info->qgroup_rescan_lock); 1702 ret = 0; 1703 goto out_free; 1704 } 1705 } 1706 mutex_unlock(&fs_info->qgroup_rescan_lock); 1707 1708 spin_lock(&fs_info->qgroup_lock); 1709 seq = fs_info->qgroup_seq; 1710 1711 /* Update old refcnts using old_roots */ 1712 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, 1713 UPDATE_OLD); 1714 if (ret < 0) 1715 goto out; 1716 1717 /* Update new refcnts using new_roots */ 1718 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, 1719 UPDATE_NEW); 1720 if (ret < 0) 1721 goto out; 1722 1723 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, 1724 num_bytes, seq); 1725 1726 /* 1727 * Bump qgroup_seq to avoid seq overlap 1728 */ 1729 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 1730 out: 1731 spin_unlock(&fs_info->qgroup_lock); 1732 out_free: 1733 ulist_free(tmp); 1734 ulist_free(qgroups); 1735 ulist_free(old_roots); 1736 ulist_free(new_roots); 1737 return ret; 1738 } 1739 1740 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans, 1741 struct btrfs_fs_info *fs_info) 1742 { 1743 struct btrfs_qgroup_extent_record *record; 1744 struct btrfs_delayed_ref_root *delayed_refs; 1745 struct ulist *new_roots = NULL; 1746 struct rb_node *node; 1747 u64 qgroup_to_skip; 1748 int ret = 0; 1749 1750 delayed_refs = &trans->transaction->delayed_refs; 1751 qgroup_to_skip = delayed_refs->qgroup_to_skip; 1752 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 1753 record = rb_entry(node, struct btrfs_qgroup_extent_record, 1754 node); 1755 1756 if (!ret) { 1757 /* 1758 * Use (u64)-1 as time_seq to do special search, which 1759 * doesn't lock tree or delayed_refs and search current 1760 * root. It's safe inside commit_transaction(). 1761 */ 1762 ret = btrfs_find_all_roots(trans, fs_info, 1763 record->bytenr, (u64)-1, &new_roots); 1764 if (ret < 0) 1765 goto cleanup; 1766 if (qgroup_to_skip) 1767 ulist_del(new_roots, qgroup_to_skip, 0); 1768 ret = btrfs_qgroup_account_extent(trans, fs_info, 1769 record->bytenr, record->num_bytes, 1770 record->old_roots, new_roots); 1771 record->old_roots = NULL; 1772 new_roots = NULL; 1773 } 1774 cleanup: 1775 ulist_free(record->old_roots); 1776 ulist_free(new_roots); 1777 new_roots = NULL; 1778 rb_erase(node, &delayed_refs->dirty_extent_root); 1779 kfree(record); 1780 1781 } 1782 return ret; 1783 } 1784 1785 /* 1786 * called from commit_transaction. Writes all changed qgroups to disk. 1787 */ 1788 int btrfs_run_qgroups(struct btrfs_trans_handle *trans, 1789 struct btrfs_fs_info *fs_info) 1790 { 1791 struct btrfs_root *quota_root = fs_info->quota_root; 1792 int ret = 0; 1793 int start_rescan_worker = 0; 1794 1795 if (!quota_root) 1796 goto out; 1797 1798 if (!fs_info->quota_enabled && fs_info->pending_quota_state) 1799 start_rescan_worker = 1; 1800 1801 fs_info->quota_enabled = fs_info->pending_quota_state; 1802 1803 spin_lock(&fs_info->qgroup_lock); 1804 while (!list_empty(&fs_info->dirty_qgroups)) { 1805 struct btrfs_qgroup *qgroup; 1806 qgroup = list_first_entry(&fs_info->dirty_qgroups, 1807 struct btrfs_qgroup, dirty); 1808 list_del_init(&qgroup->dirty); 1809 spin_unlock(&fs_info->qgroup_lock); 1810 ret = update_qgroup_info_item(trans, quota_root, qgroup); 1811 if (ret) 1812 fs_info->qgroup_flags |= 1813 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1814 ret = update_qgroup_limit_item(trans, quota_root, qgroup); 1815 if (ret) 1816 fs_info->qgroup_flags |= 1817 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1818 spin_lock(&fs_info->qgroup_lock); 1819 } 1820 if (fs_info->quota_enabled) 1821 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 1822 else 1823 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1824 spin_unlock(&fs_info->qgroup_lock); 1825 1826 ret = update_qgroup_status_item(trans, fs_info, quota_root); 1827 if (ret) 1828 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1829 1830 if (!ret && start_rescan_worker) { 1831 ret = qgroup_rescan_init(fs_info, 0, 1); 1832 if (!ret) { 1833 qgroup_rescan_zero_tracking(fs_info); 1834 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1835 &fs_info->qgroup_rescan_work); 1836 } 1837 ret = 0; 1838 } 1839 1840 out: 1841 1842 return ret; 1843 } 1844 1845 /* 1846 * copy the acounting information between qgroups. This is necessary when a 1847 * snapshot or a subvolume is created 1848 */ 1849 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 1850 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, 1851 struct btrfs_qgroup_inherit *inherit) 1852 { 1853 int ret = 0; 1854 int i; 1855 u64 *i_qgroups; 1856 struct btrfs_root *quota_root = fs_info->quota_root; 1857 struct btrfs_qgroup *srcgroup; 1858 struct btrfs_qgroup *dstgroup; 1859 u32 level_size = 0; 1860 u64 nums; 1861 1862 mutex_lock(&fs_info->qgroup_ioctl_lock); 1863 if (!fs_info->quota_enabled) 1864 goto out; 1865 1866 if (!quota_root) { 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 1871 if (inherit) { 1872 i_qgroups = (u64 *)(inherit + 1); 1873 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 1874 2 * inherit->num_excl_copies; 1875 for (i = 0; i < nums; ++i) { 1876 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 1877 if (!srcgroup) { 1878 ret = -EINVAL; 1879 goto out; 1880 } 1881 1882 if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) { 1883 ret = -EINVAL; 1884 goto out; 1885 } 1886 ++i_qgroups; 1887 } 1888 } 1889 1890 /* 1891 * create a tracking group for the subvol itself 1892 */ 1893 ret = add_qgroup_item(trans, quota_root, objectid); 1894 if (ret) 1895 goto out; 1896 1897 if (srcid) { 1898 struct btrfs_root *srcroot; 1899 struct btrfs_key srckey; 1900 1901 srckey.objectid = srcid; 1902 srckey.type = BTRFS_ROOT_ITEM_KEY; 1903 srckey.offset = (u64)-1; 1904 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey); 1905 if (IS_ERR(srcroot)) { 1906 ret = PTR_ERR(srcroot); 1907 goto out; 1908 } 1909 1910 rcu_read_lock(); 1911 level_size = srcroot->nodesize; 1912 rcu_read_unlock(); 1913 } 1914 1915 /* 1916 * add qgroup to all inherited groups 1917 */ 1918 if (inherit) { 1919 i_qgroups = (u64 *)(inherit + 1); 1920 for (i = 0; i < inherit->num_qgroups; ++i) { 1921 ret = add_qgroup_relation_item(trans, quota_root, 1922 objectid, *i_qgroups); 1923 if (ret) 1924 goto out; 1925 ret = add_qgroup_relation_item(trans, quota_root, 1926 *i_qgroups, objectid); 1927 if (ret) 1928 goto out; 1929 ++i_qgroups; 1930 } 1931 } 1932 1933 1934 spin_lock(&fs_info->qgroup_lock); 1935 1936 dstgroup = add_qgroup_rb(fs_info, objectid); 1937 if (IS_ERR(dstgroup)) { 1938 ret = PTR_ERR(dstgroup); 1939 goto unlock; 1940 } 1941 1942 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 1943 dstgroup->lim_flags = inherit->lim.flags; 1944 dstgroup->max_rfer = inherit->lim.max_rfer; 1945 dstgroup->max_excl = inherit->lim.max_excl; 1946 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 1947 dstgroup->rsv_excl = inherit->lim.rsv_excl; 1948 1949 ret = update_qgroup_limit_item(trans, quota_root, dstgroup); 1950 if (ret) { 1951 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1952 btrfs_info(fs_info, "unable to update quota limit for %llu", 1953 dstgroup->qgroupid); 1954 goto unlock; 1955 } 1956 } 1957 1958 if (srcid) { 1959 srcgroup = find_qgroup_rb(fs_info, srcid); 1960 if (!srcgroup) 1961 goto unlock; 1962 1963 /* 1964 * We call inherit after we clone the root in order to make sure 1965 * our counts don't go crazy, so at this point the only 1966 * difference between the two roots should be the root node. 1967 */ 1968 dstgroup->rfer = srcgroup->rfer; 1969 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 1970 dstgroup->excl = level_size; 1971 dstgroup->excl_cmpr = level_size; 1972 srcgroup->excl = level_size; 1973 srcgroup->excl_cmpr = level_size; 1974 1975 /* inherit the limit info */ 1976 dstgroup->lim_flags = srcgroup->lim_flags; 1977 dstgroup->max_rfer = srcgroup->max_rfer; 1978 dstgroup->max_excl = srcgroup->max_excl; 1979 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 1980 dstgroup->rsv_excl = srcgroup->rsv_excl; 1981 1982 qgroup_dirty(fs_info, dstgroup); 1983 qgroup_dirty(fs_info, srcgroup); 1984 } 1985 1986 if (!inherit) 1987 goto unlock; 1988 1989 i_qgroups = (u64 *)(inherit + 1); 1990 for (i = 0; i < inherit->num_qgroups; ++i) { 1991 ret = add_relation_rb(quota_root->fs_info, objectid, 1992 *i_qgroups); 1993 if (ret) 1994 goto unlock; 1995 ++i_qgroups; 1996 } 1997 1998 for (i = 0; i < inherit->num_ref_copies; ++i) { 1999 struct btrfs_qgroup *src; 2000 struct btrfs_qgroup *dst; 2001 2002 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2003 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2004 2005 if (!src || !dst) { 2006 ret = -EINVAL; 2007 goto unlock; 2008 } 2009 2010 dst->rfer = src->rfer - level_size; 2011 dst->rfer_cmpr = src->rfer_cmpr - level_size; 2012 i_qgroups += 2; 2013 } 2014 for (i = 0; i < inherit->num_excl_copies; ++i) { 2015 struct btrfs_qgroup *src; 2016 struct btrfs_qgroup *dst; 2017 2018 src = find_qgroup_rb(fs_info, i_qgroups[0]); 2019 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 2020 2021 if (!src || !dst) { 2022 ret = -EINVAL; 2023 goto unlock; 2024 } 2025 2026 dst->excl = src->excl + level_size; 2027 dst->excl_cmpr = src->excl_cmpr + level_size; 2028 i_qgroups += 2; 2029 } 2030 2031 unlock: 2032 spin_unlock(&fs_info->qgroup_lock); 2033 out: 2034 mutex_unlock(&fs_info->qgroup_ioctl_lock); 2035 return ret; 2036 } 2037 2038 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) 2039 { 2040 struct btrfs_root *quota_root; 2041 struct btrfs_qgroup *qgroup; 2042 struct btrfs_fs_info *fs_info = root->fs_info; 2043 u64 ref_root = root->root_key.objectid; 2044 int ret = 0; 2045 struct ulist_node *unode; 2046 struct ulist_iterator uiter; 2047 2048 if (!is_fstree(ref_root)) 2049 return 0; 2050 2051 if (num_bytes == 0) 2052 return 0; 2053 2054 spin_lock(&fs_info->qgroup_lock); 2055 quota_root = fs_info->quota_root; 2056 if (!quota_root) 2057 goto out; 2058 2059 qgroup = find_qgroup_rb(fs_info, ref_root); 2060 if (!qgroup) 2061 goto out; 2062 2063 /* 2064 * in a first step, we check all affected qgroups if any limits would 2065 * be exceeded 2066 */ 2067 ulist_reinit(fs_info->qgroup_ulist); 2068 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2069 (uintptr_t)qgroup, GFP_ATOMIC); 2070 if (ret < 0) 2071 goto out; 2072 ULIST_ITER_INIT(&uiter); 2073 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2074 struct btrfs_qgroup *qg; 2075 struct btrfs_qgroup_list *glist; 2076 2077 qg = u64_to_ptr(unode->aux); 2078 2079 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2080 qg->reserved + (s64)qg->rfer + num_bytes > 2081 qg->max_rfer) { 2082 ret = -EDQUOT; 2083 goto out; 2084 } 2085 2086 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 2087 qg->reserved + (s64)qg->excl + num_bytes > 2088 qg->max_excl) { 2089 ret = -EDQUOT; 2090 goto out; 2091 } 2092 2093 list_for_each_entry(glist, &qg->groups, next_group) { 2094 ret = ulist_add(fs_info->qgroup_ulist, 2095 glist->group->qgroupid, 2096 (uintptr_t)glist->group, GFP_ATOMIC); 2097 if (ret < 0) 2098 goto out; 2099 } 2100 } 2101 ret = 0; 2102 /* 2103 * no limits exceeded, now record the reservation into all qgroups 2104 */ 2105 ULIST_ITER_INIT(&uiter); 2106 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2107 struct btrfs_qgroup *qg; 2108 2109 qg = u64_to_ptr(unode->aux); 2110 2111 qg->reserved += num_bytes; 2112 } 2113 2114 out: 2115 spin_unlock(&fs_info->qgroup_lock); 2116 return ret; 2117 } 2118 2119 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) 2120 { 2121 struct btrfs_root *quota_root; 2122 struct btrfs_qgroup *qgroup; 2123 struct btrfs_fs_info *fs_info = root->fs_info; 2124 struct ulist_node *unode; 2125 struct ulist_iterator uiter; 2126 u64 ref_root = root->root_key.objectid; 2127 int ret = 0; 2128 2129 if (!is_fstree(ref_root)) 2130 return; 2131 2132 if (num_bytes == 0) 2133 return; 2134 2135 spin_lock(&fs_info->qgroup_lock); 2136 2137 quota_root = fs_info->quota_root; 2138 if (!quota_root) 2139 goto out; 2140 2141 qgroup = find_qgroup_rb(fs_info, ref_root); 2142 if (!qgroup) 2143 goto out; 2144 2145 ulist_reinit(fs_info->qgroup_ulist); 2146 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, 2147 (uintptr_t)qgroup, GFP_ATOMIC); 2148 if (ret < 0) 2149 goto out; 2150 ULIST_ITER_INIT(&uiter); 2151 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { 2152 struct btrfs_qgroup *qg; 2153 struct btrfs_qgroup_list *glist; 2154 2155 qg = u64_to_ptr(unode->aux); 2156 2157 qg->reserved -= num_bytes; 2158 2159 list_for_each_entry(glist, &qg->groups, next_group) { 2160 ret = ulist_add(fs_info->qgroup_ulist, 2161 glist->group->qgroupid, 2162 (uintptr_t)glist->group, GFP_ATOMIC); 2163 if (ret < 0) 2164 goto out; 2165 } 2166 } 2167 2168 out: 2169 spin_unlock(&fs_info->qgroup_lock); 2170 } 2171 2172 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) 2173 { 2174 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq) 2175 return; 2176 btrfs_err(trans->root->fs_info, 2177 "qgroups not uptodate in trans handle %p: list is%s empty, " 2178 "seq is %#x.%x", 2179 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not", 2180 (u32)(trans->delayed_ref_elem.seq >> 32), 2181 (u32)trans->delayed_ref_elem.seq); 2182 BUG(); 2183 } 2184 2185 /* 2186 * returns < 0 on error, 0 when more leafs are to be scanned. 2187 * returns 1 when done. 2188 */ 2189 static int 2190 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 2191 struct btrfs_trans_handle *trans, 2192 struct extent_buffer *scratch_leaf) 2193 { 2194 struct btrfs_key found; 2195 struct ulist *roots = NULL; 2196 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); 2197 u64 num_bytes; 2198 int slot; 2199 int ret; 2200 2201 path->leave_spinning = 1; 2202 mutex_lock(&fs_info->qgroup_rescan_lock); 2203 ret = btrfs_search_slot_for_read(fs_info->extent_root, 2204 &fs_info->qgroup_rescan_progress, 2205 path, 1, 0); 2206 2207 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n", 2208 fs_info->qgroup_rescan_progress.objectid, 2209 fs_info->qgroup_rescan_progress.type, 2210 fs_info->qgroup_rescan_progress.offset, ret); 2211 2212 if (ret) { 2213 /* 2214 * The rescan is about to end, we will not be scanning any 2215 * further blocks. We cannot unset the RESCAN flag here, because 2216 * we want to commit the transaction if everything went well. 2217 * To make the live accounting work in this phase, we set our 2218 * scan progress pointer such that every real extent objectid 2219 * will be smaller. 2220 */ 2221 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 2222 btrfs_release_path(path); 2223 mutex_unlock(&fs_info->qgroup_rescan_lock); 2224 return ret; 2225 } 2226 2227 btrfs_item_key_to_cpu(path->nodes[0], &found, 2228 btrfs_header_nritems(path->nodes[0]) - 1); 2229 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 2230 2231 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2232 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf)); 2233 slot = path->slots[0]; 2234 btrfs_release_path(path); 2235 mutex_unlock(&fs_info->qgroup_rescan_lock); 2236 2237 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 2238 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 2239 if (found.type != BTRFS_EXTENT_ITEM_KEY && 2240 found.type != BTRFS_METADATA_ITEM_KEY) 2241 continue; 2242 if (found.type == BTRFS_METADATA_ITEM_KEY) 2243 num_bytes = fs_info->extent_root->nodesize; 2244 else 2245 num_bytes = found.offset; 2246 2247 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, 2248 &roots); 2249 if (ret < 0) 2250 goto out; 2251 /* For rescan, just pass old_roots as NULL */ 2252 ret = btrfs_qgroup_account_extent(trans, fs_info, 2253 found.objectid, num_bytes, NULL, roots); 2254 if (ret < 0) 2255 goto out; 2256 } 2257 out: 2258 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 2259 2260 return ret; 2261 } 2262 2263 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 2264 { 2265 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 2266 qgroup_rescan_work); 2267 struct btrfs_path *path; 2268 struct btrfs_trans_handle *trans = NULL; 2269 struct extent_buffer *scratch_leaf = NULL; 2270 int err = -ENOMEM; 2271 int ret = 0; 2272 2273 path = btrfs_alloc_path(); 2274 if (!path) 2275 goto out; 2276 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS); 2277 if (!scratch_leaf) 2278 goto out; 2279 2280 err = 0; 2281 while (!err) { 2282 trans = btrfs_start_transaction(fs_info->fs_root, 0); 2283 if (IS_ERR(trans)) { 2284 err = PTR_ERR(trans); 2285 break; 2286 } 2287 if (!fs_info->quota_enabled) { 2288 err = -EINTR; 2289 } else { 2290 err = qgroup_rescan_leaf(fs_info, path, trans, 2291 scratch_leaf); 2292 } 2293 if (err > 0) 2294 btrfs_commit_transaction(trans, fs_info->fs_root); 2295 else 2296 btrfs_end_transaction(trans, fs_info->fs_root); 2297 } 2298 2299 out: 2300 kfree(scratch_leaf); 2301 btrfs_free_path(path); 2302 2303 mutex_lock(&fs_info->qgroup_rescan_lock); 2304 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2305 2306 if (err > 0 && 2307 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 2308 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2309 } else if (err < 0) { 2310 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 2311 } 2312 mutex_unlock(&fs_info->qgroup_rescan_lock); 2313 2314 /* 2315 * only update status, since the previous part has alreay updated the 2316 * qgroup info. 2317 */ 2318 trans = btrfs_start_transaction(fs_info->quota_root, 1); 2319 if (IS_ERR(trans)) { 2320 err = PTR_ERR(trans); 2321 btrfs_err(fs_info, 2322 "fail to start transaction for status update: %d\n", 2323 err); 2324 goto done; 2325 } 2326 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); 2327 if (ret < 0) { 2328 err = ret; 2329 btrfs_err(fs_info, "fail to update qgroup status: %d\n", err); 2330 } 2331 btrfs_end_transaction(trans, fs_info->quota_root); 2332 2333 if (err >= 0) { 2334 btrfs_info(fs_info, "qgroup scan completed%s", 2335 err > 0 ? " (inconsistency flag cleared)" : ""); 2336 } else { 2337 btrfs_err(fs_info, "qgroup scan failed with %d", err); 2338 } 2339 2340 done: 2341 complete_all(&fs_info->qgroup_rescan_completion); 2342 } 2343 2344 /* 2345 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 2346 * memory required for the rescan context. 2347 */ 2348 static int 2349 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 2350 int init_flags) 2351 { 2352 int ret = 0; 2353 2354 if (!init_flags && 2355 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) || 2356 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) { 2357 ret = -EINVAL; 2358 goto err; 2359 } 2360 2361 mutex_lock(&fs_info->qgroup_rescan_lock); 2362 spin_lock(&fs_info->qgroup_lock); 2363 2364 if (init_flags) { 2365 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 2366 ret = -EINPROGRESS; 2367 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) 2368 ret = -EINVAL; 2369 2370 if (ret) { 2371 spin_unlock(&fs_info->qgroup_lock); 2372 mutex_unlock(&fs_info->qgroup_rescan_lock); 2373 goto err; 2374 } 2375 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2376 } 2377 2378 memset(&fs_info->qgroup_rescan_progress, 0, 2379 sizeof(fs_info->qgroup_rescan_progress)); 2380 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 2381 2382 spin_unlock(&fs_info->qgroup_lock); 2383 mutex_unlock(&fs_info->qgroup_rescan_lock); 2384 2385 init_completion(&fs_info->qgroup_rescan_completion); 2386 2387 memset(&fs_info->qgroup_rescan_work, 0, 2388 sizeof(fs_info->qgroup_rescan_work)); 2389 btrfs_init_work(&fs_info->qgroup_rescan_work, 2390 btrfs_qgroup_rescan_helper, 2391 btrfs_qgroup_rescan_worker, NULL, NULL); 2392 2393 if (ret) { 2394 err: 2395 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret); 2396 return ret; 2397 } 2398 2399 return 0; 2400 } 2401 2402 static void 2403 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 2404 { 2405 struct rb_node *n; 2406 struct btrfs_qgroup *qgroup; 2407 2408 spin_lock(&fs_info->qgroup_lock); 2409 /* clear all current qgroup tracking information */ 2410 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 2411 qgroup = rb_entry(n, struct btrfs_qgroup, node); 2412 qgroup->rfer = 0; 2413 qgroup->rfer_cmpr = 0; 2414 qgroup->excl = 0; 2415 qgroup->excl_cmpr = 0; 2416 } 2417 spin_unlock(&fs_info->qgroup_lock); 2418 } 2419 2420 int 2421 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 2422 { 2423 int ret = 0; 2424 struct btrfs_trans_handle *trans; 2425 2426 ret = qgroup_rescan_init(fs_info, 0, 1); 2427 if (ret) 2428 return ret; 2429 2430 /* 2431 * We have set the rescan_progress to 0, which means no more 2432 * delayed refs will be accounted by btrfs_qgroup_account_ref. 2433 * However, btrfs_qgroup_account_ref may be right after its call 2434 * to btrfs_find_all_roots, in which case it would still do the 2435 * accounting. 2436 * To solve this, we're committing the transaction, which will 2437 * ensure we run all delayed refs and only after that, we are 2438 * going to clear all tracking information for a clean start. 2439 */ 2440 2441 trans = btrfs_join_transaction(fs_info->fs_root); 2442 if (IS_ERR(trans)) { 2443 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2444 return PTR_ERR(trans); 2445 } 2446 ret = btrfs_commit_transaction(trans, fs_info->fs_root); 2447 if (ret) { 2448 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2449 return ret; 2450 } 2451 2452 qgroup_rescan_zero_tracking(fs_info); 2453 2454 btrfs_queue_work(fs_info->qgroup_rescan_workers, 2455 &fs_info->qgroup_rescan_work); 2456 2457 return 0; 2458 } 2459 2460 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info) 2461 { 2462 int running; 2463 int ret = 0; 2464 2465 mutex_lock(&fs_info->qgroup_rescan_lock); 2466 spin_lock(&fs_info->qgroup_lock); 2467 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2468 spin_unlock(&fs_info->qgroup_lock); 2469 mutex_unlock(&fs_info->qgroup_rescan_lock); 2470 2471 if (running) 2472 ret = wait_for_completion_interruptible( 2473 &fs_info->qgroup_rescan_completion); 2474 2475 return ret; 2476 } 2477 2478 /* 2479 * this is only called from open_ctree where we're still single threaded, thus 2480 * locking is omitted here. 2481 */ 2482 void 2483 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 2484 { 2485 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 2486 btrfs_queue_work(fs_info->qgroup_rescan_workers, 2487 &fs_info->qgroup_rescan_work); 2488 } 2489