1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 #include <linux/sched/mm.h> 15 16 #include "ctree.h" 17 #include "transaction.h" 18 #include "disk-io.h" 19 #include "locking.h" 20 #include "ulist.h" 21 #include "backref.h" 22 #include "extent_io.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "sysfs.h" 26 #include "tree-mod-log.h" 27 #include "fs.h" 28 #include "accessors.h" 29 #include "extent-tree.h" 30 #include "root-tree.h" 31 #include "tree-checker.h" 32 33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info) 34 { 35 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 36 return BTRFS_QGROUP_MODE_DISABLED; 37 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) 38 return BTRFS_QGROUP_MODE_SIMPLE; 39 return BTRFS_QGROUP_MODE_FULL; 40 } 41 42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info) 43 { 44 return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; 45 } 46 47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info) 48 { 49 return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; 50 } 51 52 /* 53 * Helpers to access qgroup reservation 54 * 55 * Callers should ensure the lock context and type are valid 56 */ 57 58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 59 { 60 u64 ret = 0; 61 int i; 62 63 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 64 ret += qgroup->rsv.values[i]; 65 66 return ret; 67 } 68 69 #ifdef CONFIG_BTRFS_DEBUG 70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 71 { 72 if (type == BTRFS_QGROUP_RSV_DATA) 73 return "data"; 74 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 75 return "meta_pertrans"; 76 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 77 return "meta_prealloc"; 78 return NULL; 79 } 80 #endif 81 82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 83 struct btrfs_qgroup *qgroup, u64 num_bytes, 84 enum btrfs_qgroup_rsv_type type) 85 { 86 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 87 qgroup->rsv.values[type] += num_bytes; 88 } 89 90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 91 struct btrfs_qgroup *qgroup, u64 num_bytes, 92 enum btrfs_qgroup_rsv_type type) 93 { 94 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 95 if (qgroup->rsv.values[type] >= num_bytes) { 96 qgroup->rsv.values[type] -= num_bytes; 97 return; 98 } 99 #ifdef CONFIG_BTRFS_DEBUG 100 WARN_RATELIMIT(1, 101 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 102 qgroup->qgroupid, qgroup_rsv_type_str(type), 103 qgroup->rsv.values[type], num_bytes); 104 #endif 105 qgroup->rsv.values[type] = 0; 106 } 107 108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 109 struct btrfs_qgroup *dest, 110 const struct btrfs_qgroup *src) 111 { 112 int i; 113 114 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 115 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 116 } 117 118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 119 struct btrfs_qgroup *dest, 120 const struct btrfs_qgroup *src) 121 { 122 int i; 123 124 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 125 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 126 } 127 128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 129 int mod) 130 { 131 if (qg->old_refcnt < seq) 132 qg->old_refcnt = seq; 133 qg->old_refcnt += mod; 134 } 135 136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 137 int mod) 138 { 139 if (qg->new_refcnt < seq) 140 qg->new_refcnt = seq; 141 qg->new_refcnt += mod; 142 } 143 144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq) 145 { 146 if (qg->old_refcnt < seq) 147 return 0; 148 return qg->old_refcnt - seq; 149 } 150 151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq) 152 { 153 if (qg->new_refcnt < seq) 154 return 0; 155 return qg->new_refcnt - seq; 156 } 157 158 static int 159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 160 int init_flags); 161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 162 163 /* must be called with qgroup_ioctl_lock held */ 164 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info, 165 u64 qgroupid) 166 { 167 struct rb_node *n = fs_info->qgroup_tree.rb_node; 168 struct btrfs_qgroup *qgroup; 169 170 while (n) { 171 qgroup = rb_entry(n, struct btrfs_qgroup, node); 172 if (qgroup->qgroupid < qgroupid) 173 n = n->rb_left; 174 else if (qgroup->qgroupid > qgroupid) 175 n = n->rb_right; 176 else 177 return qgroup; 178 } 179 return NULL; 180 } 181 182 /* 183 * Add qgroup to the filesystem's qgroup tree. 184 * 185 * Must be called with qgroup_lock held and @prealloc preallocated. 186 * 187 * The control on the lifespan of @prealloc would be transferred to this 188 * function, thus caller should no longer touch @prealloc. 189 */ 190 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 191 struct btrfs_qgroup *prealloc, 192 u64 qgroupid) 193 { 194 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 195 struct rb_node *parent = NULL; 196 struct btrfs_qgroup *qgroup; 197 198 /* Caller must have pre-allocated @prealloc. */ 199 ASSERT(prealloc); 200 201 while (*p) { 202 parent = *p; 203 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 204 205 if (qgroup->qgroupid < qgroupid) { 206 p = &(*p)->rb_left; 207 } else if (qgroup->qgroupid > qgroupid) { 208 p = &(*p)->rb_right; 209 } else { 210 kfree(prealloc); 211 return qgroup; 212 } 213 } 214 215 qgroup = prealloc; 216 qgroup->qgroupid = qgroupid; 217 INIT_LIST_HEAD(&qgroup->groups); 218 INIT_LIST_HEAD(&qgroup->members); 219 INIT_LIST_HEAD(&qgroup->dirty); 220 INIT_LIST_HEAD(&qgroup->iterator); 221 INIT_LIST_HEAD(&qgroup->nested_iterator); 222 223 rb_link_node(&qgroup->node, parent, p); 224 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 225 226 return qgroup; 227 } 228 229 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, 230 struct btrfs_qgroup *qgroup) 231 { 232 struct btrfs_qgroup_list *list; 233 234 list_del(&qgroup->dirty); 235 while (!list_empty(&qgroup->groups)) { 236 list = list_first_entry(&qgroup->groups, 237 struct btrfs_qgroup_list, next_group); 238 list_del(&list->next_group); 239 list_del(&list->next_member); 240 kfree(list); 241 } 242 243 while (!list_empty(&qgroup->members)) { 244 list = list_first_entry(&qgroup->members, 245 struct btrfs_qgroup_list, next_member); 246 list_del(&list->next_group); 247 list_del(&list->next_member); 248 kfree(list); 249 } 250 } 251 252 /* must be called with qgroup_lock held */ 253 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 254 { 255 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 256 257 if (!qgroup) 258 return -ENOENT; 259 260 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 261 __del_qgroup_rb(fs_info, qgroup); 262 return 0; 263 } 264 265 /* 266 * Add relation specified by two qgroups. 267 * 268 * Must be called with qgroup_lock held, the ownership of @prealloc is 269 * transferred to this function and caller should not touch it anymore. 270 * 271 * Return: 0 on success 272 * -ENOENT if one of the qgroups is NULL 273 * <0 other errors 274 */ 275 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc, 276 struct btrfs_qgroup *member, 277 struct btrfs_qgroup *parent) 278 { 279 if (!member || !parent) { 280 kfree(prealloc); 281 return -ENOENT; 282 } 283 284 prealloc->group = parent; 285 prealloc->member = member; 286 list_add_tail(&prealloc->next_group, &member->groups); 287 list_add_tail(&prealloc->next_member, &parent->members); 288 289 return 0; 290 } 291 292 /* 293 * Add relation specified by two qgroup ids. 294 * 295 * Must be called with qgroup_lock held. 296 * 297 * Return: 0 on success 298 * -ENOENT if one of the ids does not exist 299 * <0 other errors 300 */ 301 static int add_relation_rb(struct btrfs_fs_info *fs_info, 302 struct btrfs_qgroup_list *prealloc, 303 u64 memberid, u64 parentid) 304 { 305 struct btrfs_qgroup *member; 306 struct btrfs_qgroup *parent; 307 308 member = find_qgroup_rb(fs_info, memberid); 309 parent = find_qgroup_rb(fs_info, parentid); 310 311 return __add_relation_rb(prealloc, member, parent); 312 } 313 314 /* Must be called with qgroup_lock held */ 315 static int del_relation_rb(struct btrfs_fs_info *fs_info, 316 u64 memberid, u64 parentid) 317 { 318 struct btrfs_qgroup *member; 319 struct btrfs_qgroup *parent; 320 struct btrfs_qgroup_list *list; 321 322 member = find_qgroup_rb(fs_info, memberid); 323 parent = find_qgroup_rb(fs_info, parentid); 324 if (!member || !parent) 325 return -ENOENT; 326 327 list_for_each_entry(list, &member->groups, next_group) { 328 if (list->group == parent) { 329 list_del(&list->next_group); 330 list_del(&list->next_member); 331 kfree(list); 332 return 0; 333 } 334 } 335 return -ENOENT; 336 } 337 338 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 339 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid, 340 u64 rfer, u64 excl) 341 { 342 struct btrfs_qgroup *qgroup; 343 344 qgroup = find_qgroup_rb(fs_info, qgroupid); 345 if (!qgroup) 346 return -EINVAL; 347 if (qgroup->rfer != rfer || qgroup->excl != excl) 348 return -EINVAL; 349 return 0; 350 } 351 #endif 352 353 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info) 354 { 355 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 356 return; 357 fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | 358 BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 359 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 360 } 361 362 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, 363 struct extent_buffer *leaf, int slot, 364 struct btrfs_qgroup_status_item *ptr) 365 { 366 ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); 367 ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr)); 368 fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr); 369 } 370 371 /* 372 * The full config is read in one go, only called from open_ctree() 373 * It doesn't use any locking, as at this point we're still single-threaded 374 */ 375 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 376 { 377 struct btrfs_key key; 378 struct btrfs_key found_key; 379 struct btrfs_root *quota_root = fs_info->quota_root; 380 struct btrfs_path *path = NULL; 381 struct extent_buffer *l; 382 int slot; 383 int ret = 0; 384 u64 flags = 0; 385 u64 rescan_progress = 0; 386 387 if (!fs_info->quota_root) 388 return 0; 389 390 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 391 if (!fs_info->qgroup_ulist) { 392 ret = -ENOMEM; 393 goto out; 394 } 395 396 path = btrfs_alloc_path(); 397 if (!path) { 398 ret = -ENOMEM; 399 goto out; 400 } 401 402 ret = btrfs_sysfs_add_qgroups(fs_info); 403 if (ret < 0) 404 goto out; 405 /* default this to quota off, in case no status key is found */ 406 fs_info->qgroup_flags = 0; 407 408 /* 409 * pass 1: read status, all qgroup infos and limits 410 */ 411 key.objectid = 0; 412 key.type = 0; 413 key.offset = 0; 414 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 415 if (ret) 416 goto out; 417 418 while (1) { 419 struct btrfs_qgroup *qgroup; 420 421 slot = path->slots[0]; 422 l = path->nodes[0]; 423 btrfs_item_key_to_cpu(l, &found_key, slot); 424 425 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 426 struct btrfs_qgroup_status_item *ptr; 427 428 ptr = btrfs_item_ptr(l, slot, 429 struct btrfs_qgroup_status_item); 430 431 if (btrfs_qgroup_status_version(l, ptr) != 432 BTRFS_QGROUP_STATUS_VERSION) { 433 btrfs_err(fs_info, 434 "old qgroup version, quota disabled"); 435 goto out; 436 } 437 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); 438 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) { 439 qgroup_read_enable_gen(fs_info, l, slot, ptr); 440 } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) { 441 qgroup_mark_inconsistent(fs_info); 442 btrfs_err(fs_info, 443 "qgroup generation mismatch, marked as inconsistent"); 444 } 445 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 446 goto next1; 447 } 448 449 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 450 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 451 goto next1; 452 453 qgroup = find_qgroup_rb(fs_info, found_key.offset); 454 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 455 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 456 btrfs_err(fs_info, "inconsistent qgroup config"); 457 qgroup_mark_inconsistent(fs_info); 458 } 459 if (!qgroup) { 460 struct btrfs_qgroup *prealloc; 461 struct btrfs_root *tree_root = fs_info->tree_root; 462 463 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); 464 if (!prealloc) { 465 ret = -ENOMEM; 466 goto out; 467 } 468 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 469 /* 470 * If a qgroup exists for a subvolume ID, it is possible 471 * that subvolume has been deleted, in which case 472 * re-using that ID would lead to incorrect accounting. 473 * 474 * Ensure that we skip any such subvol ids. 475 * 476 * We don't need to lock because this is only called 477 * during mount before we start doing things like creating 478 * subvolumes. 479 */ 480 if (is_fstree(qgroup->qgroupid) && 481 qgroup->qgroupid > tree_root->free_objectid) 482 /* 483 * Don't need to check against BTRFS_LAST_FREE_OBJECTID, 484 * as it will get checked on the next call to 485 * btrfs_get_free_objectid. 486 */ 487 tree_root->free_objectid = qgroup->qgroupid + 1; 488 } 489 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 490 if (ret < 0) 491 goto out; 492 493 switch (found_key.type) { 494 case BTRFS_QGROUP_INFO_KEY: { 495 struct btrfs_qgroup_info_item *ptr; 496 497 ptr = btrfs_item_ptr(l, slot, 498 struct btrfs_qgroup_info_item); 499 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 500 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 501 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 502 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 503 /* generation currently unused */ 504 break; 505 } 506 case BTRFS_QGROUP_LIMIT_KEY: { 507 struct btrfs_qgroup_limit_item *ptr; 508 509 ptr = btrfs_item_ptr(l, slot, 510 struct btrfs_qgroup_limit_item); 511 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 512 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 513 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 514 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 515 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 516 break; 517 } 518 } 519 next1: 520 ret = btrfs_next_item(quota_root, path); 521 if (ret < 0) 522 goto out; 523 if (ret) 524 break; 525 } 526 btrfs_release_path(path); 527 528 /* 529 * pass 2: read all qgroup relations 530 */ 531 key.objectid = 0; 532 key.type = BTRFS_QGROUP_RELATION_KEY; 533 key.offset = 0; 534 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 535 if (ret) 536 goto out; 537 while (1) { 538 struct btrfs_qgroup_list *list = NULL; 539 540 slot = path->slots[0]; 541 l = path->nodes[0]; 542 btrfs_item_key_to_cpu(l, &found_key, slot); 543 544 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 545 goto next2; 546 547 if (found_key.objectid > found_key.offset) { 548 /* parent <- member, not needed to build config */ 549 /* FIXME should we omit the key completely? */ 550 goto next2; 551 } 552 553 list = kzalloc(sizeof(*list), GFP_KERNEL); 554 if (!list) { 555 ret = -ENOMEM; 556 goto out; 557 } 558 ret = add_relation_rb(fs_info, list, found_key.objectid, 559 found_key.offset); 560 list = NULL; 561 if (ret == -ENOENT) { 562 btrfs_warn(fs_info, 563 "orphan qgroup relation 0x%llx->0x%llx", 564 found_key.objectid, found_key.offset); 565 ret = 0; /* ignore the error */ 566 } 567 if (ret) 568 goto out; 569 next2: 570 ret = btrfs_next_item(quota_root, path); 571 if (ret < 0) 572 goto out; 573 if (ret) 574 break; 575 } 576 out: 577 btrfs_free_path(path); 578 fs_info->qgroup_flags |= flags; 579 if (ret >= 0) { 580 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON) 581 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 582 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 583 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 584 } else { 585 ulist_free(fs_info->qgroup_ulist); 586 fs_info->qgroup_ulist = NULL; 587 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 588 btrfs_sysfs_del_qgroups(fs_info); 589 } 590 591 return ret < 0 ? ret : 0; 592 } 593 594 /* 595 * Called in close_ctree() when quota is still enabled. This verifies we don't 596 * leak some reserved space. 597 * 598 * Return false if no reserved space is left. 599 * Return true if some reserved space is leaked. 600 */ 601 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info) 602 { 603 struct rb_node *node; 604 bool ret = false; 605 606 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) 607 return ret; 608 /* 609 * Since we're unmounting, there is no race and no need to grab qgroup 610 * lock. And here we don't go post-order to provide a more user 611 * friendly sorted result. 612 */ 613 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 614 struct btrfs_qgroup *qgroup; 615 int i; 616 617 qgroup = rb_entry(node, struct btrfs_qgroup, node); 618 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 619 if (qgroup->rsv.values[i]) { 620 ret = true; 621 btrfs_warn(fs_info, 622 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 623 btrfs_qgroup_level(qgroup->qgroupid), 624 btrfs_qgroup_subvolid(qgroup->qgroupid), 625 i, qgroup->rsv.values[i]); 626 } 627 } 628 } 629 return ret; 630 } 631 632 /* 633 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 634 * first two are in single-threaded paths.And for the third one, we have set 635 * quota_root to be null with qgroup_lock held before, so it is safe to clean 636 * up the in-memory structures without qgroup_lock held. 637 */ 638 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 639 { 640 struct rb_node *n; 641 struct btrfs_qgroup *qgroup; 642 643 while ((n = rb_first(&fs_info->qgroup_tree))) { 644 qgroup = rb_entry(n, struct btrfs_qgroup, node); 645 rb_erase(n, &fs_info->qgroup_tree); 646 __del_qgroup_rb(fs_info, qgroup); 647 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 648 kfree(qgroup); 649 } 650 /* 651 * We call btrfs_free_qgroup_config() when unmounting 652 * filesystem and disabling quota, so we set qgroup_ulist 653 * to be null here to avoid double free. 654 */ 655 ulist_free(fs_info->qgroup_ulist); 656 fs_info->qgroup_ulist = NULL; 657 btrfs_sysfs_del_qgroups(fs_info); 658 } 659 660 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 661 u64 dst) 662 { 663 int ret; 664 struct btrfs_root *quota_root = trans->fs_info->quota_root; 665 struct btrfs_path *path; 666 struct btrfs_key key; 667 668 path = btrfs_alloc_path(); 669 if (!path) 670 return -ENOMEM; 671 672 key.objectid = src; 673 key.type = BTRFS_QGROUP_RELATION_KEY; 674 key.offset = dst; 675 676 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 677 678 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 679 680 btrfs_free_path(path); 681 return ret; 682 } 683 684 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 685 u64 dst) 686 { 687 int ret; 688 struct btrfs_root *quota_root = trans->fs_info->quota_root; 689 struct btrfs_path *path; 690 struct btrfs_key key; 691 692 path = btrfs_alloc_path(); 693 if (!path) 694 return -ENOMEM; 695 696 key.objectid = src; 697 key.type = BTRFS_QGROUP_RELATION_KEY; 698 key.offset = dst; 699 700 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 701 if (ret < 0) 702 goto out; 703 704 if (ret > 0) { 705 ret = -ENOENT; 706 goto out; 707 } 708 709 ret = btrfs_del_item(trans, quota_root, path); 710 out: 711 btrfs_free_path(path); 712 return ret; 713 } 714 715 static int add_qgroup_item(struct btrfs_trans_handle *trans, 716 struct btrfs_root *quota_root, u64 qgroupid) 717 { 718 int ret; 719 struct btrfs_path *path; 720 struct btrfs_qgroup_info_item *qgroup_info; 721 struct btrfs_qgroup_limit_item *qgroup_limit; 722 struct extent_buffer *leaf; 723 struct btrfs_key key; 724 725 if (btrfs_is_testing(quota_root->fs_info)) 726 return 0; 727 728 path = btrfs_alloc_path(); 729 if (!path) 730 return -ENOMEM; 731 732 key.objectid = 0; 733 key.type = BTRFS_QGROUP_INFO_KEY; 734 key.offset = qgroupid; 735 736 /* 737 * Avoid a transaction abort by catching -EEXIST here. In that 738 * case, we proceed by re-initializing the existing structure 739 * on disk. 740 */ 741 742 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 743 sizeof(*qgroup_info)); 744 if (ret && ret != -EEXIST) 745 goto out; 746 747 leaf = path->nodes[0]; 748 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 749 struct btrfs_qgroup_info_item); 750 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 751 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 752 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 753 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 754 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 755 756 btrfs_mark_buffer_dirty(trans, leaf); 757 758 btrfs_release_path(path); 759 760 key.type = BTRFS_QGROUP_LIMIT_KEY; 761 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 762 sizeof(*qgroup_limit)); 763 if (ret && ret != -EEXIST) 764 goto out; 765 766 leaf = path->nodes[0]; 767 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 768 struct btrfs_qgroup_limit_item); 769 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 770 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 771 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 772 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 773 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 774 775 btrfs_mark_buffer_dirty(trans, leaf); 776 777 ret = 0; 778 out: 779 btrfs_free_path(path); 780 return ret; 781 } 782 783 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 784 { 785 int ret; 786 struct btrfs_root *quota_root = trans->fs_info->quota_root; 787 struct btrfs_path *path; 788 struct btrfs_key key; 789 790 path = btrfs_alloc_path(); 791 if (!path) 792 return -ENOMEM; 793 794 key.objectid = 0; 795 key.type = BTRFS_QGROUP_INFO_KEY; 796 key.offset = qgroupid; 797 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 798 if (ret < 0) 799 goto out; 800 801 if (ret > 0) { 802 ret = -ENOENT; 803 goto out; 804 } 805 806 ret = btrfs_del_item(trans, quota_root, path); 807 if (ret) 808 goto out; 809 810 btrfs_release_path(path); 811 812 key.type = BTRFS_QGROUP_LIMIT_KEY; 813 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 814 if (ret < 0) 815 goto out; 816 817 if (ret > 0) { 818 ret = -ENOENT; 819 goto out; 820 } 821 822 ret = btrfs_del_item(trans, quota_root, path); 823 824 out: 825 btrfs_free_path(path); 826 return ret; 827 } 828 829 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 830 struct btrfs_qgroup *qgroup) 831 { 832 struct btrfs_root *quota_root = trans->fs_info->quota_root; 833 struct btrfs_path *path; 834 struct btrfs_key key; 835 struct extent_buffer *l; 836 struct btrfs_qgroup_limit_item *qgroup_limit; 837 int ret; 838 int slot; 839 840 key.objectid = 0; 841 key.type = BTRFS_QGROUP_LIMIT_KEY; 842 key.offset = qgroup->qgroupid; 843 844 path = btrfs_alloc_path(); 845 if (!path) 846 return -ENOMEM; 847 848 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 849 if (ret > 0) 850 ret = -ENOENT; 851 852 if (ret) 853 goto out; 854 855 l = path->nodes[0]; 856 slot = path->slots[0]; 857 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 858 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 859 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 860 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 861 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 862 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 863 864 btrfs_mark_buffer_dirty(trans, l); 865 866 out: 867 btrfs_free_path(path); 868 return ret; 869 } 870 871 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 872 struct btrfs_qgroup *qgroup) 873 { 874 struct btrfs_fs_info *fs_info = trans->fs_info; 875 struct btrfs_root *quota_root = fs_info->quota_root; 876 struct btrfs_path *path; 877 struct btrfs_key key; 878 struct extent_buffer *l; 879 struct btrfs_qgroup_info_item *qgroup_info; 880 int ret; 881 int slot; 882 883 if (btrfs_is_testing(fs_info)) 884 return 0; 885 886 key.objectid = 0; 887 key.type = BTRFS_QGROUP_INFO_KEY; 888 key.offset = qgroup->qgroupid; 889 890 path = btrfs_alloc_path(); 891 if (!path) 892 return -ENOMEM; 893 894 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 895 if (ret > 0) 896 ret = -ENOENT; 897 898 if (ret) 899 goto out; 900 901 l = path->nodes[0]; 902 slot = path->slots[0]; 903 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 904 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 905 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 906 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 907 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 908 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 909 910 btrfs_mark_buffer_dirty(trans, l); 911 912 out: 913 btrfs_free_path(path); 914 return ret; 915 } 916 917 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 918 { 919 struct btrfs_fs_info *fs_info = trans->fs_info; 920 struct btrfs_root *quota_root = fs_info->quota_root; 921 struct btrfs_path *path; 922 struct btrfs_key key; 923 struct extent_buffer *l; 924 struct btrfs_qgroup_status_item *ptr; 925 int ret; 926 int slot; 927 928 key.objectid = 0; 929 key.type = BTRFS_QGROUP_STATUS_KEY; 930 key.offset = 0; 931 932 path = btrfs_alloc_path(); 933 if (!path) 934 return -ENOMEM; 935 936 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 937 if (ret > 0) 938 ret = -ENOENT; 939 940 if (ret) 941 goto out; 942 943 l = path->nodes[0]; 944 slot = path->slots[0]; 945 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 946 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & 947 BTRFS_QGROUP_STATUS_FLAGS_MASK); 948 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 949 btrfs_set_qgroup_status_rescan(l, ptr, 950 fs_info->qgroup_rescan_progress.objectid); 951 952 btrfs_mark_buffer_dirty(trans, l); 953 954 out: 955 btrfs_free_path(path); 956 return ret; 957 } 958 959 /* 960 * called with qgroup_lock held 961 */ 962 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 963 struct btrfs_root *root) 964 { 965 struct btrfs_path *path; 966 struct btrfs_key key; 967 struct extent_buffer *leaf = NULL; 968 int ret; 969 int nr = 0; 970 971 path = btrfs_alloc_path(); 972 if (!path) 973 return -ENOMEM; 974 975 key.objectid = 0; 976 key.offset = 0; 977 key.type = 0; 978 979 while (1) { 980 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 981 if (ret < 0) 982 goto out; 983 leaf = path->nodes[0]; 984 nr = btrfs_header_nritems(leaf); 985 if (!nr) 986 break; 987 /* 988 * delete the leaf one by one 989 * since the whole tree is going 990 * to be deleted. 991 */ 992 path->slots[0] = 0; 993 ret = btrfs_del_items(trans, root, path, 0, nr); 994 if (ret) 995 goto out; 996 997 btrfs_release_path(path); 998 } 999 ret = 0; 1000 out: 1001 btrfs_free_path(path); 1002 return ret; 1003 } 1004 1005 int btrfs_quota_enable(struct btrfs_fs_info *fs_info, 1006 struct btrfs_ioctl_quota_ctl_args *quota_ctl_args) 1007 { 1008 struct btrfs_root *quota_root; 1009 struct btrfs_root *tree_root = fs_info->tree_root; 1010 struct btrfs_path *path = NULL; 1011 struct btrfs_qgroup_status_item *ptr; 1012 struct extent_buffer *leaf; 1013 struct btrfs_key key; 1014 struct btrfs_key found_key; 1015 struct btrfs_qgroup *qgroup = NULL; 1016 struct btrfs_qgroup *prealloc = NULL; 1017 struct btrfs_trans_handle *trans = NULL; 1018 struct ulist *ulist = NULL; 1019 const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); 1020 int ret = 0; 1021 int slot; 1022 1023 /* 1024 * We need to have subvol_sem write locked, to prevent races between 1025 * concurrent tasks trying to enable quotas, because we will unlock 1026 * and relock qgroup_ioctl_lock before setting fs_info->quota_root 1027 * and before setting BTRFS_FS_QUOTA_ENABLED. 1028 */ 1029 lockdep_assert_held_write(&fs_info->subvol_sem); 1030 1031 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 1032 btrfs_err(fs_info, 1033 "qgroups are currently unsupported in extent tree v2"); 1034 return -EINVAL; 1035 } 1036 1037 mutex_lock(&fs_info->qgroup_ioctl_lock); 1038 if (fs_info->quota_root) 1039 goto out; 1040 1041 ulist = ulist_alloc(GFP_KERNEL); 1042 if (!ulist) { 1043 ret = -ENOMEM; 1044 goto out; 1045 } 1046 1047 ret = btrfs_sysfs_add_qgroups(fs_info); 1048 if (ret < 0) 1049 goto out; 1050 1051 /* 1052 * Unlock qgroup_ioctl_lock before starting the transaction. This is to 1053 * avoid lock acquisition inversion problems (reported by lockdep) between 1054 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we 1055 * start a transaction. 1056 * After we started the transaction lock qgroup_ioctl_lock again and 1057 * check if someone else created the quota root in the meanwhile. If so, 1058 * just return success and release the transaction handle. 1059 * 1060 * Also we don't need to worry about someone else calling 1061 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because 1062 * that function returns 0 (success) when the sysfs entries already exist. 1063 */ 1064 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1065 1066 /* 1067 * 1 for quota root item 1068 * 1 for BTRFS_QGROUP_STATUS item 1069 * 1070 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 1071 * per subvolume. However those are not currently reserved since it 1072 * would be a lot of overkill. 1073 */ 1074 trans = btrfs_start_transaction(tree_root, 2); 1075 1076 mutex_lock(&fs_info->qgroup_ioctl_lock); 1077 if (IS_ERR(trans)) { 1078 ret = PTR_ERR(trans); 1079 trans = NULL; 1080 goto out; 1081 } 1082 1083 if (fs_info->quota_root) 1084 goto out; 1085 1086 fs_info->qgroup_ulist = ulist; 1087 ulist = NULL; 1088 1089 /* 1090 * initially create the quota tree 1091 */ 1092 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 1093 if (IS_ERR(quota_root)) { 1094 ret = PTR_ERR(quota_root); 1095 btrfs_abort_transaction(trans, ret); 1096 goto out; 1097 } 1098 1099 path = btrfs_alloc_path(); 1100 if (!path) { 1101 ret = -ENOMEM; 1102 btrfs_abort_transaction(trans, ret); 1103 goto out_free_root; 1104 } 1105 1106 key.objectid = 0; 1107 key.type = BTRFS_QGROUP_STATUS_KEY; 1108 key.offset = 0; 1109 1110 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 1111 sizeof(*ptr)); 1112 if (ret) { 1113 btrfs_abort_transaction(trans, ret); 1114 goto out_free_path; 1115 } 1116 1117 leaf = path->nodes[0]; 1118 ptr = btrfs_item_ptr(leaf, path->slots[0], 1119 struct btrfs_qgroup_status_item); 1120 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1121 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1122 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; 1123 if (simple) { 1124 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1125 btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); 1126 } else { 1127 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1128 } 1129 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & 1130 BTRFS_QGROUP_STATUS_FLAGS_MASK); 1131 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1132 1133 btrfs_mark_buffer_dirty(trans, leaf); 1134 1135 key.objectid = 0; 1136 key.type = BTRFS_ROOT_REF_KEY; 1137 key.offset = 0; 1138 1139 btrfs_release_path(path); 1140 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1141 if (ret > 0) 1142 goto out_add_root; 1143 if (ret < 0) { 1144 btrfs_abort_transaction(trans, ret); 1145 goto out_free_path; 1146 } 1147 1148 while (1) { 1149 slot = path->slots[0]; 1150 leaf = path->nodes[0]; 1151 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1152 1153 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1154 1155 /* Release locks on tree_root before we access quota_root */ 1156 btrfs_release_path(path); 1157 1158 /* We should not have a stray @prealloc pointer. */ 1159 ASSERT(prealloc == NULL); 1160 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1161 if (!prealloc) { 1162 ret = -ENOMEM; 1163 btrfs_abort_transaction(trans, ret); 1164 goto out_free_path; 1165 } 1166 1167 ret = add_qgroup_item(trans, quota_root, 1168 found_key.offset); 1169 if (ret) { 1170 btrfs_abort_transaction(trans, ret); 1171 goto out_free_path; 1172 } 1173 1174 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 1175 prealloc = NULL; 1176 if (IS_ERR(qgroup)) { 1177 ret = PTR_ERR(qgroup); 1178 btrfs_abort_transaction(trans, ret); 1179 goto out_free_path; 1180 } 1181 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1182 if (ret < 0) { 1183 btrfs_abort_transaction(trans, ret); 1184 goto out_free_path; 1185 } 1186 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1187 path, 1, 0); 1188 if (ret < 0) { 1189 btrfs_abort_transaction(trans, ret); 1190 goto out_free_path; 1191 } 1192 if (ret > 0) { 1193 /* 1194 * Shouldn't happen, but in case it does we 1195 * don't need to do the btrfs_next_item, just 1196 * continue. 1197 */ 1198 continue; 1199 } 1200 } 1201 ret = btrfs_next_item(tree_root, path); 1202 if (ret < 0) { 1203 btrfs_abort_transaction(trans, ret); 1204 goto out_free_path; 1205 } 1206 if (ret) 1207 break; 1208 } 1209 1210 out_add_root: 1211 btrfs_release_path(path); 1212 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1213 if (ret) { 1214 btrfs_abort_transaction(trans, ret); 1215 goto out_free_path; 1216 } 1217 1218 ASSERT(prealloc == NULL); 1219 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1220 if (!prealloc) { 1221 ret = -ENOMEM; 1222 goto out_free_path; 1223 } 1224 qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID); 1225 prealloc = NULL; 1226 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1227 if (ret < 0) { 1228 btrfs_abort_transaction(trans, ret); 1229 goto out_free_path; 1230 } 1231 1232 fs_info->qgroup_enable_gen = trans->transid; 1233 1234 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1235 /* 1236 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid 1237 * a deadlock with tasks concurrently doing other qgroup operations, such 1238 * adding/removing qgroups or adding/deleting qgroup relations for example, 1239 * because all qgroup operations first start or join a transaction and then 1240 * lock the qgroup_ioctl_lock mutex. 1241 * We are safe from a concurrent task trying to enable quotas, by calling 1242 * this function, since we are serialized by fs_info->subvol_sem. 1243 */ 1244 ret = btrfs_commit_transaction(trans); 1245 trans = NULL; 1246 mutex_lock(&fs_info->qgroup_ioctl_lock); 1247 if (ret) 1248 goto out_free_path; 1249 1250 /* 1251 * Set quota enabled flag after committing the transaction, to avoid 1252 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1253 * creation. 1254 */ 1255 spin_lock(&fs_info->qgroup_lock); 1256 fs_info->quota_root = quota_root; 1257 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1258 if (simple) 1259 btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); 1260 spin_unlock(&fs_info->qgroup_lock); 1261 1262 /* Skip rescan for simple qgroups. */ 1263 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 1264 goto out_free_path; 1265 1266 ret = qgroup_rescan_init(fs_info, 0, 1); 1267 if (!ret) { 1268 qgroup_rescan_zero_tracking(fs_info); 1269 fs_info->qgroup_rescan_running = true; 1270 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1271 &fs_info->qgroup_rescan_work); 1272 } else { 1273 /* 1274 * We have set both BTRFS_FS_QUOTA_ENABLED and 1275 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with 1276 * -EINPROGRESS. That can happen because someone started the 1277 * rescan worker by calling quota rescan ioctl before we 1278 * attempted to initialize the rescan worker. Failure due to 1279 * quotas disabled in the meanwhile is not possible, because 1280 * we are holding a write lock on fs_info->subvol_sem, which 1281 * is also acquired when disabling quotas. 1282 * Ignore such error, and any other error would need to undo 1283 * everything we did in the transaction we just committed. 1284 */ 1285 ASSERT(ret == -EINPROGRESS); 1286 ret = 0; 1287 } 1288 1289 out_free_path: 1290 btrfs_free_path(path); 1291 out_free_root: 1292 if (ret) 1293 btrfs_put_root(quota_root); 1294 out: 1295 if (ret) { 1296 ulist_free(fs_info->qgroup_ulist); 1297 fs_info->qgroup_ulist = NULL; 1298 btrfs_sysfs_del_qgroups(fs_info); 1299 } 1300 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1301 if (ret && trans) 1302 btrfs_end_transaction(trans); 1303 else if (trans) 1304 ret = btrfs_end_transaction(trans); 1305 ulist_free(ulist); 1306 kfree(prealloc); 1307 return ret; 1308 } 1309 1310 /* 1311 * It is possible to have outstanding ordered extents which reserved bytes 1312 * before we disabled. We need to fully flush delalloc, ordered extents, and a 1313 * commit to ensure that we don't leak such reservations, only to have them 1314 * come back if we re-enable. 1315 * 1316 * - enable simple quotas 1317 * - reserve space 1318 * - release it, store rsv_bytes in OE 1319 * - disable quotas 1320 * - enable simple quotas (qgroup rsv are all 0) 1321 * - OE finishes 1322 * - run delayed refs 1323 * - free rsv_bytes, resulting in miscounting or even underflow 1324 */ 1325 static int flush_reservations(struct btrfs_fs_info *fs_info) 1326 { 1327 int ret; 1328 1329 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 1330 if (ret) 1331 return ret; 1332 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 1333 1334 return btrfs_commit_current_transaction(fs_info->tree_root); 1335 } 1336 1337 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1338 { 1339 struct btrfs_root *quota_root = NULL; 1340 struct btrfs_trans_handle *trans = NULL; 1341 int ret = 0; 1342 1343 /* 1344 * We need to have subvol_sem write locked to prevent races with 1345 * snapshot creation. 1346 */ 1347 lockdep_assert_held_write(&fs_info->subvol_sem); 1348 1349 /* 1350 * Relocation will mess with backrefs, so make sure we have the 1351 * cleaner_mutex held to protect us from relocate. 1352 */ 1353 lockdep_assert_held(&fs_info->cleaner_mutex); 1354 1355 mutex_lock(&fs_info->qgroup_ioctl_lock); 1356 if (!fs_info->quota_root) 1357 goto out; 1358 1359 /* 1360 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to 1361 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs 1362 * to lock that mutex while holding a transaction handle and the rescan 1363 * worker needs to commit a transaction. 1364 */ 1365 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1366 1367 /* 1368 * Request qgroup rescan worker to complete and wait for it. This wait 1369 * must be done before transaction start for quota disable since it may 1370 * deadlock with transaction by the qgroup rescan worker. 1371 */ 1372 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1373 btrfs_qgroup_wait_for_completion(fs_info, false); 1374 1375 /* 1376 * We have nothing held here and no trans handle, just return the error 1377 * if there is one. 1378 */ 1379 ret = flush_reservations(fs_info); 1380 if (ret) 1381 return ret; 1382 1383 /* 1384 * 1 For the root item 1385 * 1386 * We should also reserve enough items for the quota tree deletion in 1387 * btrfs_clean_quota_tree but this is not done. 1388 * 1389 * Also, we must always start a transaction without holding the mutex 1390 * qgroup_ioctl_lock, see btrfs_quota_enable(). 1391 */ 1392 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1393 1394 mutex_lock(&fs_info->qgroup_ioctl_lock); 1395 if (IS_ERR(trans)) { 1396 ret = PTR_ERR(trans); 1397 trans = NULL; 1398 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1399 goto out; 1400 } 1401 1402 if (!fs_info->quota_root) 1403 goto out; 1404 1405 spin_lock(&fs_info->qgroup_lock); 1406 quota_root = fs_info->quota_root; 1407 fs_info->quota_root = NULL; 1408 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1409 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1410 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL; 1411 spin_unlock(&fs_info->qgroup_lock); 1412 1413 btrfs_free_qgroup_config(fs_info); 1414 1415 ret = btrfs_clean_quota_tree(trans, quota_root); 1416 if (ret) { 1417 btrfs_abort_transaction(trans, ret); 1418 goto out; 1419 } 1420 1421 ret = btrfs_del_root(trans, "a_root->root_key); 1422 if (ret) { 1423 btrfs_abort_transaction(trans, ret); 1424 goto out; 1425 } 1426 1427 spin_lock(&fs_info->trans_lock); 1428 list_del("a_root->dirty_list); 1429 spin_unlock(&fs_info->trans_lock); 1430 1431 btrfs_tree_lock(quota_root->node); 1432 btrfs_clear_buffer_dirty(trans, quota_root->node); 1433 btrfs_tree_unlock(quota_root->node); 1434 ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root), 1435 quota_root->node, 0, 1); 1436 1437 if (ret < 0) 1438 btrfs_abort_transaction(trans, ret); 1439 1440 out: 1441 btrfs_put_root(quota_root); 1442 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1443 if (ret && trans) 1444 btrfs_end_transaction(trans); 1445 else if (trans) 1446 ret = btrfs_commit_transaction(trans); 1447 return ret; 1448 } 1449 1450 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1451 struct btrfs_qgroup *qgroup) 1452 { 1453 if (list_empty(&qgroup->dirty)) 1454 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1455 } 1456 1457 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) 1458 { 1459 if (!list_empty(&qgroup->iterator)) 1460 return; 1461 1462 list_add_tail(&qgroup->iterator, head); 1463 } 1464 1465 static void qgroup_iterator_clean(struct list_head *head) 1466 { 1467 while (!list_empty(head)) { 1468 struct btrfs_qgroup *qgroup; 1469 1470 qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); 1471 list_del_init(&qgroup->iterator); 1472 } 1473 } 1474 1475 /* 1476 * The easy accounting, we're updating qgroup relationship whose child qgroup 1477 * only has exclusive extents. 1478 * 1479 * In this case, all exclusive extents will also be exclusive for parent, so 1480 * excl/rfer just get added/removed. 1481 * 1482 * So is qgroup reservation space, which should also be added/removed to 1483 * parent. 1484 * Or when child tries to release reservation space, parent will underflow its 1485 * reservation (for relationship adding case). 1486 * 1487 * Caller should hold fs_info->qgroup_lock. 1488 */ 1489 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, 1490 struct btrfs_qgroup *src, int sign) 1491 { 1492 struct btrfs_qgroup *qgroup; 1493 struct btrfs_qgroup *cur; 1494 LIST_HEAD(qgroup_list); 1495 u64 num_bytes = src->excl; 1496 int ret = 0; 1497 1498 qgroup = find_qgroup_rb(fs_info, ref_root); 1499 if (!qgroup) 1500 goto out; 1501 1502 qgroup_iterator_add(&qgroup_list, qgroup); 1503 list_for_each_entry(cur, &qgroup_list, iterator) { 1504 struct btrfs_qgroup_list *glist; 1505 1506 qgroup->rfer += sign * num_bytes; 1507 qgroup->rfer_cmpr += sign * num_bytes; 1508 1509 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1510 qgroup->excl += sign * num_bytes; 1511 qgroup->excl_cmpr += sign * num_bytes; 1512 1513 if (sign > 0) 1514 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1515 else 1516 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1517 qgroup_dirty(fs_info, qgroup); 1518 1519 /* Append parent qgroups to @qgroup_list. */ 1520 list_for_each_entry(glist, &qgroup->groups, next_group) 1521 qgroup_iterator_add(&qgroup_list, glist->group); 1522 } 1523 ret = 0; 1524 out: 1525 qgroup_iterator_clean(&qgroup_list); 1526 return ret; 1527 } 1528 1529 1530 /* 1531 * Quick path for updating qgroup with only excl refs. 1532 * 1533 * In that case, just update all parent will be enough. 1534 * Or we needs to do a full rescan. 1535 * Caller should also hold fs_info->qgroup_lock. 1536 * 1537 * Return 0 for quick update, return >0 for need to full rescan 1538 * and mark INCONSISTENT flag. 1539 * Return < 0 for other error. 1540 */ 1541 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1542 u64 src, u64 dst, int sign) 1543 { 1544 struct btrfs_qgroup *qgroup; 1545 int ret = 1; 1546 1547 qgroup = find_qgroup_rb(fs_info, src); 1548 if (!qgroup) 1549 goto out; 1550 if (qgroup->excl == qgroup->rfer) { 1551 ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign); 1552 if (ret < 0) 1553 goto out; 1554 ret = 0; 1555 } 1556 out: 1557 if (ret) 1558 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1559 return ret; 1560 } 1561 1562 /* 1563 * Add relation between @src and @dst qgroup. The @prealloc is allocated by the 1564 * callers and transferred here (either used or freed on error). 1565 */ 1566 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst, 1567 struct btrfs_qgroup_list *prealloc) 1568 { 1569 struct btrfs_fs_info *fs_info = trans->fs_info; 1570 struct btrfs_qgroup *parent; 1571 struct btrfs_qgroup *member; 1572 struct btrfs_qgroup_list *list; 1573 int ret = 0; 1574 1575 ASSERT(prealloc); 1576 1577 /* Check the level of src and dst first */ 1578 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1579 return -EINVAL; 1580 1581 mutex_lock(&fs_info->qgroup_ioctl_lock); 1582 if (!fs_info->quota_root) { 1583 ret = -ENOTCONN; 1584 goto out; 1585 } 1586 member = find_qgroup_rb(fs_info, src); 1587 parent = find_qgroup_rb(fs_info, dst); 1588 if (!member || !parent) { 1589 ret = -EINVAL; 1590 goto out; 1591 } 1592 1593 /* check if such qgroup relation exist firstly */ 1594 list_for_each_entry(list, &member->groups, next_group) { 1595 if (list->group == parent) { 1596 ret = -EEXIST; 1597 goto out; 1598 } 1599 } 1600 1601 ret = add_qgroup_relation_item(trans, src, dst); 1602 if (ret) 1603 goto out; 1604 1605 ret = add_qgroup_relation_item(trans, dst, src); 1606 if (ret) { 1607 del_qgroup_relation_item(trans, src, dst); 1608 goto out; 1609 } 1610 1611 spin_lock(&fs_info->qgroup_lock); 1612 ret = __add_relation_rb(prealloc, member, parent); 1613 prealloc = NULL; 1614 if (ret < 0) { 1615 spin_unlock(&fs_info->qgroup_lock); 1616 goto out; 1617 } 1618 ret = quick_update_accounting(fs_info, src, dst, 1); 1619 spin_unlock(&fs_info->qgroup_lock); 1620 out: 1621 kfree(prealloc); 1622 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1623 return ret; 1624 } 1625 1626 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1627 u64 dst) 1628 { 1629 struct btrfs_fs_info *fs_info = trans->fs_info; 1630 struct btrfs_qgroup *parent; 1631 struct btrfs_qgroup *member; 1632 struct btrfs_qgroup_list *list; 1633 bool found = false; 1634 int ret = 0; 1635 int ret2; 1636 1637 if (!fs_info->quota_root) { 1638 ret = -ENOTCONN; 1639 goto out; 1640 } 1641 1642 member = find_qgroup_rb(fs_info, src); 1643 parent = find_qgroup_rb(fs_info, dst); 1644 /* 1645 * The parent/member pair doesn't exist, then try to delete the dead 1646 * relation items only. 1647 */ 1648 if (!member || !parent) 1649 goto delete_item; 1650 1651 /* check if such qgroup relation exist firstly */ 1652 list_for_each_entry(list, &member->groups, next_group) { 1653 if (list->group == parent) { 1654 found = true; 1655 break; 1656 } 1657 } 1658 1659 delete_item: 1660 ret = del_qgroup_relation_item(trans, src, dst); 1661 if (ret < 0 && ret != -ENOENT) 1662 goto out; 1663 ret2 = del_qgroup_relation_item(trans, dst, src); 1664 if (ret2 < 0 && ret2 != -ENOENT) 1665 goto out; 1666 1667 /* At least one deletion succeeded, return 0 */ 1668 if (!ret || !ret2) 1669 ret = 0; 1670 1671 if (found) { 1672 spin_lock(&fs_info->qgroup_lock); 1673 del_relation_rb(fs_info, src, dst); 1674 ret = quick_update_accounting(fs_info, src, dst, -1); 1675 spin_unlock(&fs_info->qgroup_lock); 1676 } 1677 out: 1678 return ret; 1679 } 1680 1681 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1682 u64 dst) 1683 { 1684 struct btrfs_fs_info *fs_info = trans->fs_info; 1685 int ret = 0; 1686 1687 mutex_lock(&fs_info->qgroup_ioctl_lock); 1688 ret = __del_qgroup_relation(trans, src, dst); 1689 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1690 1691 return ret; 1692 } 1693 1694 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1695 { 1696 struct btrfs_fs_info *fs_info = trans->fs_info; 1697 struct btrfs_root *quota_root; 1698 struct btrfs_qgroup *qgroup; 1699 struct btrfs_qgroup *prealloc = NULL; 1700 int ret = 0; 1701 1702 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) 1703 return 0; 1704 1705 mutex_lock(&fs_info->qgroup_ioctl_lock); 1706 if (!fs_info->quota_root) { 1707 ret = -ENOTCONN; 1708 goto out; 1709 } 1710 quota_root = fs_info->quota_root; 1711 qgroup = find_qgroup_rb(fs_info, qgroupid); 1712 if (qgroup) { 1713 ret = -EEXIST; 1714 goto out; 1715 } 1716 1717 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1718 if (!prealloc) { 1719 ret = -ENOMEM; 1720 goto out; 1721 } 1722 1723 ret = add_qgroup_item(trans, quota_root, qgroupid); 1724 if (ret) 1725 goto out; 1726 1727 spin_lock(&fs_info->qgroup_lock); 1728 qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid); 1729 spin_unlock(&fs_info->qgroup_lock); 1730 prealloc = NULL; 1731 1732 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1733 out: 1734 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1735 kfree(prealloc); 1736 return ret; 1737 } 1738 1739 /* 1740 * Return 0 if we can not delete the qgroup (not empty or has children etc). 1741 * Return >0 if we can delete the qgroup. 1742 * Return <0 for other errors during tree search. 1743 */ 1744 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) 1745 { 1746 struct btrfs_key key; 1747 struct btrfs_path *path; 1748 int ret; 1749 1750 /* 1751 * Squota would never be inconsistent, but there can still be case 1752 * where a dropped subvolume still has qgroup numbers, and squota 1753 * relies on such qgroup for future accounting. 1754 * 1755 * So for squota, do not allow dropping any non-zero qgroup. 1756 */ 1757 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && 1758 (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr)) 1759 return 0; 1760 1761 /* For higher level qgroup, we can only delete it if it has no child. */ 1762 if (btrfs_qgroup_level(qgroup->qgroupid)) { 1763 if (!list_empty(&qgroup->members)) 1764 return 0; 1765 return 1; 1766 } 1767 1768 /* 1769 * For level-0 qgroups, we can only delete it if it has no subvolume 1770 * for it. 1771 * This means even a subvolume is unlinked but not yet fully dropped, 1772 * we can not delete the qgroup. 1773 */ 1774 key.objectid = qgroup->qgroupid; 1775 key.type = BTRFS_ROOT_ITEM_KEY; 1776 key.offset = -1ULL; 1777 path = btrfs_alloc_path(); 1778 if (!path) 1779 return -ENOMEM; 1780 1781 ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL); 1782 btrfs_free_path(path); 1783 /* 1784 * The @ret from btrfs_find_root() exactly matches our definition for 1785 * the return value, thus can be returned directly. 1786 */ 1787 return ret; 1788 } 1789 1790 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1791 { 1792 struct btrfs_fs_info *fs_info = trans->fs_info; 1793 struct btrfs_qgroup *qgroup; 1794 struct btrfs_qgroup_list *list; 1795 int ret = 0; 1796 1797 mutex_lock(&fs_info->qgroup_ioctl_lock); 1798 if (!fs_info->quota_root) { 1799 ret = -ENOTCONN; 1800 goto out; 1801 } 1802 1803 qgroup = find_qgroup_rb(fs_info, qgroupid); 1804 if (!qgroup) { 1805 ret = -ENOENT; 1806 goto out; 1807 } 1808 1809 ret = can_delete_qgroup(fs_info, qgroup); 1810 if (ret < 0) 1811 goto out; 1812 if (ret == 0) { 1813 ret = -EBUSY; 1814 goto out; 1815 } 1816 1817 /* Check if there are no children of this qgroup */ 1818 if (!list_empty(&qgroup->members)) { 1819 ret = -EBUSY; 1820 goto out; 1821 } 1822 1823 ret = del_qgroup_item(trans, qgroupid); 1824 if (ret && ret != -ENOENT) 1825 goto out; 1826 1827 while (!list_empty(&qgroup->groups)) { 1828 list = list_first_entry(&qgroup->groups, 1829 struct btrfs_qgroup_list, next_group); 1830 ret = __del_qgroup_relation(trans, qgroupid, 1831 list->group->qgroupid); 1832 if (ret) 1833 goto out; 1834 } 1835 1836 spin_lock(&fs_info->qgroup_lock); 1837 /* 1838 * Warn on reserved space. The subvolume should has no child nor 1839 * corresponding subvolume. 1840 * Thus its reserved space should all be zero, no matter if qgroup 1841 * is consistent or the mode. 1842 */ 1843 WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] || 1844 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] || 1845 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]); 1846 /* 1847 * The same for rfer/excl numbers, but that's only if our qgroup is 1848 * consistent and if it's in regular qgroup mode. 1849 * For simple mode it's not as accurate thus we can hit non-zero values 1850 * very frequently. 1851 */ 1852 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL && 1853 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { 1854 if (WARN_ON(qgroup->rfer || qgroup->excl || 1855 qgroup->rfer_cmpr || qgroup->excl_cmpr)) { 1856 btrfs_warn_rl(fs_info, 1857 "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", 1858 btrfs_qgroup_level(qgroup->qgroupid), 1859 btrfs_qgroup_subvolid(qgroup->qgroupid), 1860 qgroup->rfer, qgroup->rfer_cmpr, 1861 qgroup->excl, qgroup->excl_cmpr); 1862 qgroup_mark_inconsistent(fs_info); 1863 } 1864 } 1865 del_qgroup_rb(fs_info, qgroupid); 1866 spin_unlock(&fs_info->qgroup_lock); 1867 1868 /* 1869 * Remove the qgroup from sysfs now without holding the qgroup_lock 1870 * spinlock, since the sysfs_remove_group() function needs to take 1871 * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). 1872 */ 1873 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 1874 kfree(qgroup); 1875 out: 1876 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1877 return ret; 1878 } 1879 1880 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid) 1881 { 1882 struct btrfs_trans_handle *trans; 1883 int ret; 1884 1885 if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root) 1886 return 0; 1887 1888 /* 1889 * Commit current transaction to make sure all the rfer/excl numbers 1890 * get updated. 1891 */ 1892 trans = btrfs_start_transaction(fs_info->quota_root, 0); 1893 if (IS_ERR(trans)) 1894 return PTR_ERR(trans); 1895 1896 ret = btrfs_commit_transaction(trans); 1897 if (ret < 0) 1898 return ret; 1899 1900 /* Start new trans to delete the qgroup info and limit items. */ 1901 trans = btrfs_start_transaction(fs_info->quota_root, 2); 1902 if (IS_ERR(trans)) 1903 return PTR_ERR(trans); 1904 ret = btrfs_remove_qgroup(trans, subvolid); 1905 btrfs_end_transaction(trans); 1906 /* 1907 * It's squota and the subvolume still has numbers needed for future 1908 * accounting, in this case we can not delete it. Just skip it. 1909 */ 1910 if (ret == -EBUSY) 1911 ret = 0; 1912 return ret; 1913 } 1914 1915 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1916 struct btrfs_qgroup_limit *limit) 1917 { 1918 struct btrfs_fs_info *fs_info = trans->fs_info; 1919 struct btrfs_qgroup *qgroup; 1920 int ret = 0; 1921 /* Sometimes we would want to clear the limit on this qgroup. 1922 * To meet this requirement, we treat the -1 as a special value 1923 * which tell kernel to clear the limit on this qgroup. 1924 */ 1925 const u64 CLEAR_VALUE = -1; 1926 1927 mutex_lock(&fs_info->qgroup_ioctl_lock); 1928 if (!fs_info->quota_root) { 1929 ret = -ENOTCONN; 1930 goto out; 1931 } 1932 1933 qgroup = find_qgroup_rb(fs_info, qgroupid); 1934 if (!qgroup) { 1935 ret = -ENOENT; 1936 goto out; 1937 } 1938 1939 spin_lock(&fs_info->qgroup_lock); 1940 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1941 if (limit->max_rfer == CLEAR_VALUE) { 1942 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1943 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1944 qgroup->max_rfer = 0; 1945 } else { 1946 qgroup->max_rfer = limit->max_rfer; 1947 } 1948 } 1949 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1950 if (limit->max_excl == CLEAR_VALUE) { 1951 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1952 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1953 qgroup->max_excl = 0; 1954 } else { 1955 qgroup->max_excl = limit->max_excl; 1956 } 1957 } 1958 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1959 if (limit->rsv_rfer == CLEAR_VALUE) { 1960 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1961 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1962 qgroup->rsv_rfer = 0; 1963 } else { 1964 qgroup->rsv_rfer = limit->rsv_rfer; 1965 } 1966 } 1967 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1968 if (limit->rsv_excl == CLEAR_VALUE) { 1969 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1970 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1971 qgroup->rsv_excl = 0; 1972 } else { 1973 qgroup->rsv_excl = limit->rsv_excl; 1974 } 1975 } 1976 qgroup->lim_flags |= limit->flags; 1977 1978 spin_unlock(&fs_info->qgroup_lock); 1979 1980 ret = update_qgroup_limit_item(trans, qgroup); 1981 if (ret) { 1982 qgroup_mark_inconsistent(fs_info); 1983 btrfs_info(fs_info, "unable to update quota limit for %llu", 1984 qgroupid); 1985 } 1986 1987 out: 1988 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1989 return ret; 1990 } 1991 1992 /* 1993 * Inform qgroup to trace one dirty extent, its info is recorded in @record. 1994 * So qgroup can account it at transaction committing time. 1995 * 1996 * No lock version, caller must acquire delayed ref lock and allocated memory, 1997 * then call btrfs_qgroup_trace_extent_post() after exiting lock context. 1998 * 1999 * Return 0 for success insert 2000 * Return >0 for existing record, caller can free @record safely. 2001 * Error is not possible 2002 */ 2003 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 2004 struct btrfs_delayed_ref_root *delayed_refs, 2005 struct btrfs_qgroup_extent_record *record) 2006 { 2007 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 2008 struct rb_node *parent_node = NULL; 2009 struct btrfs_qgroup_extent_record *entry; 2010 u64 bytenr = record->bytenr; 2011 2012 if (!btrfs_qgroup_full_accounting(fs_info)) 2013 return 1; 2014 2015 lockdep_assert_held(&delayed_refs->lock); 2016 trace_btrfs_qgroup_trace_extent(fs_info, record); 2017 2018 while (*p) { 2019 parent_node = *p; 2020 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 2021 node); 2022 if (bytenr < entry->bytenr) { 2023 p = &(*p)->rb_left; 2024 } else if (bytenr > entry->bytenr) { 2025 p = &(*p)->rb_right; 2026 } else { 2027 if (record->data_rsv && !entry->data_rsv) { 2028 entry->data_rsv = record->data_rsv; 2029 entry->data_rsv_refroot = 2030 record->data_rsv_refroot; 2031 } 2032 return 1; 2033 } 2034 } 2035 2036 rb_link_node(&record->node, parent_node, p); 2037 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 2038 return 0; 2039 } 2040 2041 /* 2042 * Post handler after qgroup_trace_extent_nolock(). 2043 * 2044 * NOTE: Current qgroup does the expensive backref walk at transaction 2045 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming 2046 * new transaction. 2047 * This is designed to allow btrfs_find_all_roots() to get correct new_roots 2048 * result. 2049 * 2050 * However for old_roots there is no need to do backref walk at that time, 2051 * since we search commit roots to walk backref and result will always be 2052 * correct. 2053 * 2054 * Due to the nature of no lock version, we can't do backref there. 2055 * So we must call btrfs_qgroup_trace_extent_post() after exiting 2056 * spinlock context. 2057 * 2058 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result 2059 * using current root, then we can move all expensive backref walk out of 2060 * transaction committing, but not now as qgroup accounting will be wrong again. 2061 */ 2062 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, 2063 struct btrfs_qgroup_extent_record *qrecord) 2064 { 2065 struct btrfs_backref_walk_ctx ctx = { 0 }; 2066 int ret; 2067 2068 if (!btrfs_qgroup_full_accounting(trans->fs_info)) 2069 return 0; 2070 /* 2071 * We are always called in a context where we are already holding a 2072 * transaction handle. Often we are called when adding a data delayed 2073 * reference from btrfs_truncate_inode_items() (truncating or unlinking), 2074 * in which case we will be holding a write lock on extent buffer from a 2075 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to 2076 * acquire fs_info->commit_root_sem, because that is a higher level lock 2077 * that must be acquired before locking any extent buffers. 2078 * 2079 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem 2080 * but we can't pass it a non-NULL transaction handle, because otherwise 2081 * it would not use commit roots and would lock extent buffers, causing 2082 * a deadlock if it ends up trying to read lock the same extent buffer 2083 * that was previously write locked at btrfs_truncate_inode_items(). 2084 * 2085 * So pass a NULL transaction handle to btrfs_find_all_roots() and 2086 * explicitly tell it to not acquire the commit_root_sem - if we are 2087 * holding a transaction handle we don't need its protection. 2088 */ 2089 ASSERT(trans != NULL); 2090 2091 if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2092 return 0; 2093 2094 ctx.bytenr = qrecord->bytenr; 2095 ctx.fs_info = trans->fs_info; 2096 2097 ret = btrfs_find_all_roots(&ctx, true); 2098 if (ret < 0) { 2099 qgroup_mark_inconsistent(trans->fs_info); 2100 btrfs_warn(trans->fs_info, 2101 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 2102 ret); 2103 return 0; 2104 } 2105 2106 /* 2107 * Here we don't need to get the lock of 2108 * trans->transaction->delayed_refs, since inserted qrecord won't 2109 * be deleted, only qrecord->node may be modified (new qrecord insert) 2110 * 2111 * So modifying qrecord->old_roots is safe here 2112 */ 2113 qrecord->old_roots = ctx.roots; 2114 return 0; 2115 } 2116 2117 /* 2118 * Inform qgroup to trace one dirty extent, specified by @bytenr and 2119 * @num_bytes. 2120 * So qgroup can account it at commit trans time. 2121 * 2122 * Better encapsulated version, with memory allocation and backref walk for 2123 * commit roots. 2124 * So this can sleep. 2125 * 2126 * Return 0 if the operation is done. 2127 * Return <0 for error, like memory allocation failure or invalid parameter 2128 * (NULL trans) 2129 */ 2130 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2131 u64 num_bytes) 2132 { 2133 struct btrfs_fs_info *fs_info = trans->fs_info; 2134 struct btrfs_qgroup_extent_record *record; 2135 struct btrfs_delayed_ref_root *delayed_refs; 2136 int ret; 2137 2138 if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0) 2139 return 0; 2140 record = kzalloc(sizeof(*record), GFP_NOFS); 2141 if (!record) 2142 return -ENOMEM; 2143 2144 delayed_refs = &trans->transaction->delayed_refs; 2145 record->bytenr = bytenr; 2146 record->num_bytes = num_bytes; 2147 record->old_roots = NULL; 2148 2149 spin_lock(&delayed_refs->lock); 2150 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 2151 spin_unlock(&delayed_refs->lock); 2152 if (ret > 0) { 2153 kfree(record); 2154 return 0; 2155 } 2156 return btrfs_qgroup_trace_extent_post(trans, record); 2157 } 2158 2159 /* 2160 * Inform qgroup to trace all leaf items of data 2161 * 2162 * Return 0 for success 2163 * Return <0 for error(ENOMEM) 2164 */ 2165 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 2166 struct extent_buffer *eb) 2167 { 2168 struct btrfs_fs_info *fs_info = trans->fs_info; 2169 int nr = btrfs_header_nritems(eb); 2170 int i, extent_type, ret; 2171 struct btrfs_key key; 2172 struct btrfs_file_extent_item *fi; 2173 u64 bytenr, num_bytes; 2174 2175 /* We can be called directly from walk_up_proc() */ 2176 if (!btrfs_qgroup_full_accounting(fs_info)) 2177 return 0; 2178 2179 for (i = 0; i < nr; i++) { 2180 btrfs_item_key_to_cpu(eb, &key, i); 2181 2182 if (key.type != BTRFS_EXTENT_DATA_KEY) 2183 continue; 2184 2185 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2186 /* filter out non qgroup-accountable extents */ 2187 extent_type = btrfs_file_extent_type(eb, fi); 2188 2189 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 2190 continue; 2191 2192 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 2193 if (!bytenr) 2194 continue; 2195 2196 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 2197 2198 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes); 2199 if (ret) 2200 return ret; 2201 } 2202 cond_resched(); 2203 return 0; 2204 } 2205 2206 /* 2207 * Walk up the tree from the bottom, freeing leaves and any interior 2208 * nodes which have had all slots visited. If a node (leaf or 2209 * interior) is freed, the node above it will have it's slot 2210 * incremented. The root node will never be freed. 2211 * 2212 * At the end of this function, we should have a path which has all 2213 * slots incremented to the next position for a search. If we need to 2214 * read a new node it will be NULL and the node above it will have the 2215 * correct slot selected for a later read. 2216 * 2217 * If we increment the root nodes slot counter past the number of 2218 * elements, 1 is returned to signal completion of the search. 2219 */ 2220 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 2221 { 2222 int level = 0; 2223 int nr, slot; 2224 struct extent_buffer *eb; 2225 2226 if (root_level == 0) 2227 return 1; 2228 2229 while (level <= root_level) { 2230 eb = path->nodes[level]; 2231 nr = btrfs_header_nritems(eb); 2232 path->slots[level]++; 2233 slot = path->slots[level]; 2234 if (slot >= nr || level == 0) { 2235 /* 2236 * Don't free the root - we will detect this 2237 * condition after our loop and return a 2238 * positive value for caller to stop walking the tree. 2239 */ 2240 if (level != root_level) { 2241 btrfs_tree_unlock_rw(eb, path->locks[level]); 2242 path->locks[level] = 0; 2243 2244 free_extent_buffer(eb); 2245 path->nodes[level] = NULL; 2246 path->slots[level] = 0; 2247 } 2248 } else { 2249 /* 2250 * We have a valid slot to walk back down 2251 * from. Stop here so caller can process these 2252 * new nodes. 2253 */ 2254 break; 2255 } 2256 2257 level++; 2258 } 2259 2260 eb = path->nodes[root_level]; 2261 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 2262 return 1; 2263 2264 return 0; 2265 } 2266 2267 /* 2268 * Helper function to trace a subtree tree block swap. 2269 * 2270 * The swap will happen in highest tree block, but there may be a lot of 2271 * tree blocks involved. 2272 * 2273 * For example: 2274 * OO = Old tree blocks 2275 * NN = New tree blocks allocated during balance 2276 * 2277 * File tree (257) Reloc tree for 257 2278 * L2 OO NN 2279 * / \ / \ 2280 * L1 OO OO (a) OO NN (a) 2281 * / \ / \ / \ / \ 2282 * L0 OO OO OO OO OO OO NN NN 2283 * (b) (c) (b) (c) 2284 * 2285 * When calling qgroup_trace_extent_swap(), we will pass: 2286 * @src_eb = OO(a) 2287 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 2288 * @dst_level = 0 2289 * @root_level = 1 2290 * 2291 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 2292 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 2293 * 2294 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 2295 * 2296 * 1) Tree search from @src_eb 2297 * It should acts as a simplified btrfs_search_slot(). 2298 * The key for search can be extracted from @dst_path->nodes[dst_level] 2299 * (first key). 2300 * 2301 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 2302 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 2303 * They should be marked during previous (@dst_level = 1) iteration. 2304 * 2305 * 3) Mark file extents in leaves dirty 2306 * We don't have good way to pick out new file extents only. 2307 * So we still follow the old method by scanning all file extents in 2308 * the leave. 2309 * 2310 * This function can free us from keeping two paths, thus later we only need 2311 * to care about how to iterate all new tree blocks in reloc tree. 2312 */ 2313 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 2314 struct extent_buffer *src_eb, 2315 struct btrfs_path *dst_path, 2316 int dst_level, int root_level, 2317 bool trace_leaf) 2318 { 2319 struct btrfs_key key; 2320 struct btrfs_path *src_path; 2321 struct btrfs_fs_info *fs_info = trans->fs_info; 2322 u32 nodesize = fs_info->nodesize; 2323 int cur_level = root_level; 2324 int ret; 2325 2326 BUG_ON(dst_level > root_level); 2327 /* Level mismatch */ 2328 if (btrfs_header_level(src_eb) != root_level) 2329 return -EINVAL; 2330 2331 src_path = btrfs_alloc_path(); 2332 if (!src_path) { 2333 ret = -ENOMEM; 2334 goto out; 2335 } 2336 2337 if (dst_level) 2338 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2339 else 2340 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2341 2342 /* For src_path */ 2343 atomic_inc(&src_eb->refs); 2344 src_path->nodes[root_level] = src_eb; 2345 src_path->slots[root_level] = dst_path->slots[root_level]; 2346 src_path->locks[root_level] = 0; 2347 2348 /* A simplified version of btrfs_search_slot() */ 2349 while (cur_level >= dst_level) { 2350 struct btrfs_key src_key; 2351 struct btrfs_key dst_key; 2352 2353 if (src_path->nodes[cur_level] == NULL) { 2354 struct extent_buffer *eb; 2355 int parent_slot; 2356 2357 eb = src_path->nodes[cur_level + 1]; 2358 parent_slot = src_path->slots[cur_level + 1]; 2359 2360 eb = btrfs_read_node_slot(eb, parent_slot); 2361 if (IS_ERR(eb)) { 2362 ret = PTR_ERR(eb); 2363 goto out; 2364 } 2365 2366 src_path->nodes[cur_level] = eb; 2367 2368 btrfs_tree_read_lock(eb); 2369 src_path->locks[cur_level] = BTRFS_READ_LOCK; 2370 } 2371 2372 src_path->slots[cur_level] = dst_path->slots[cur_level]; 2373 if (cur_level) { 2374 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 2375 &dst_key, dst_path->slots[cur_level]); 2376 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 2377 &src_key, src_path->slots[cur_level]); 2378 } else { 2379 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 2380 &dst_key, dst_path->slots[cur_level]); 2381 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 2382 &src_key, src_path->slots[cur_level]); 2383 } 2384 /* Content mismatch, something went wrong */ 2385 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { 2386 ret = -ENOENT; 2387 goto out; 2388 } 2389 cur_level--; 2390 } 2391 2392 /* 2393 * Now both @dst_path and @src_path have been populated, record the tree 2394 * blocks for qgroup accounting. 2395 */ 2396 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 2397 nodesize); 2398 if (ret < 0) 2399 goto out; 2400 ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start, 2401 nodesize); 2402 if (ret < 0) 2403 goto out; 2404 2405 /* Record leaf file extents */ 2406 if (dst_level == 0 && trace_leaf) { 2407 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 2408 if (ret < 0) 2409 goto out; 2410 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 2411 } 2412 out: 2413 btrfs_free_path(src_path); 2414 return ret; 2415 } 2416 2417 /* 2418 * Helper function to do recursive generation-aware depth-first search, to 2419 * locate all new tree blocks in a subtree of reloc tree. 2420 * 2421 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 2422 * reloc tree 2423 * L2 NN (a) 2424 * / \ 2425 * L1 OO NN (b) 2426 * / \ / \ 2427 * L0 OO OO OO NN 2428 * (c) (d) 2429 * If we pass: 2430 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 2431 * @cur_level = 1 2432 * @root_level = 1 2433 * 2434 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 2435 * above tree blocks along with their counter parts in file tree. 2436 * While during search, old tree blocks OO(c) will be skipped as tree block swap 2437 * won't affect OO(c). 2438 */ 2439 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 2440 struct extent_buffer *src_eb, 2441 struct btrfs_path *dst_path, 2442 int cur_level, int root_level, 2443 u64 last_snapshot, bool trace_leaf) 2444 { 2445 struct btrfs_fs_info *fs_info = trans->fs_info; 2446 struct extent_buffer *eb; 2447 bool need_cleanup = false; 2448 int ret = 0; 2449 int i; 2450 2451 /* Level sanity check */ 2452 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2453 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2454 root_level < cur_level) { 2455 btrfs_err_rl(fs_info, 2456 "%s: bad levels, cur_level=%d root_level=%d", 2457 __func__, cur_level, root_level); 2458 return -EUCLEAN; 2459 } 2460 2461 /* Read the tree block if needed */ 2462 if (dst_path->nodes[cur_level] == NULL) { 2463 int parent_slot; 2464 u64 child_gen; 2465 2466 /* 2467 * dst_path->nodes[root_level] must be initialized before 2468 * calling this function. 2469 */ 2470 if (cur_level == root_level) { 2471 btrfs_err_rl(fs_info, 2472 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2473 __func__, root_level, root_level, cur_level); 2474 return -EUCLEAN; 2475 } 2476 2477 /* 2478 * We need to get child blockptr/gen from parent before we can 2479 * read it. 2480 */ 2481 eb = dst_path->nodes[cur_level + 1]; 2482 parent_slot = dst_path->slots[cur_level + 1]; 2483 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2484 2485 /* This node is old, no need to trace */ 2486 if (child_gen < last_snapshot) 2487 goto out; 2488 2489 eb = btrfs_read_node_slot(eb, parent_slot); 2490 if (IS_ERR(eb)) { 2491 ret = PTR_ERR(eb); 2492 goto out; 2493 } 2494 2495 dst_path->nodes[cur_level] = eb; 2496 dst_path->slots[cur_level] = 0; 2497 2498 btrfs_tree_read_lock(eb); 2499 dst_path->locks[cur_level] = BTRFS_READ_LOCK; 2500 need_cleanup = true; 2501 } 2502 2503 /* Now record this tree block and its counter part for qgroups */ 2504 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2505 root_level, trace_leaf); 2506 if (ret < 0) 2507 goto cleanup; 2508 2509 eb = dst_path->nodes[cur_level]; 2510 2511 if (cur_level > 0) { 2512 /* Iterate all child tree blocks */ 2513 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2514 /* Skip old tree blocks as they won't be swapped */ 2515 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2516 continue; 2517 dst_path->slots[cur_level] = i; 2518 2519 /* Recursive call (at most 7 times) */ 2520 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2521 dst_path, cur_level - 1, root_level, 2522 last_snapshot, trace_leaf); 2523 if (ret < 0) 2524 goto cleanup; 2525 } 2526 } 2527 2528 cleanup: 2529 if (need_cleanup) { 2530 /* Clean up */ 2531 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2532 dst_path->locks[cur_level]); 2533 free_extent_buffer(dst_path->nodes[cur_level]); 2534 dst_path->nodes[cur_level] = NULL; 2535 dst_path->slots[cur_level] = 0; 2536 dst_path->locks[cur_level] = 0; 2537 } 2538 out: 2539 return ret; 2540 } 2541 2542 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2543 struct extent_buffer *src_eb, 2544 struct extent_buffer *dst_eb, 2545 u64 last_snapshot, bool trace_leaf) 2546 { 2547 struct btrfs_fs_info *fs_info = trans->fs_info; 2548 struct btrfs_path *dst_path = NULL; 2549 int level; 2550 int ret; 2551 2552 if (!btrfs_qgroup_full_accounting(fs_info)) 2553 return 0; 2554 2555 /* Wrong parameter order */ 2556 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { 2557 btrfs_err_rl(fs_info, 2558 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2559 btrfs_header_generation(src_eb), 2560 btrfs_header_generation(dst_eb)); 2561 return -EUCLEAN; 2562 } 2563 2564 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2565 ret = -EIO; 2566 goto out; 2567 } 2568 2569 level = btrfs_header_level(dst_eb); 2570 dst_path = btrfs_alloc_path(); 2571 if (!dst_path) { 2572 ret = -ENOMEM; 2573 goto out; 2574 } 2575 /* For dst_path */ 2576 atomic_inc(&dst_eb->refs); 2577 dst_path->nodes[level] = dst_eb; 2578 dst_path->slots[level] = 0; 2579 dst_path->locks[level] = 0; 2580 2581 /* Do the generation aware breadth-first search */ 2582 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2583 level, last_snapshot, trace_leaf); 2584 if (ret < 0) 2585 goto out; 2586 ret = 0; 2587 2588 out: 2589 btrfs_free_path(dst_path); 2590 if (ret < 0) 2591 qgroup_mark_inconsistent(fs_info); 2592 return ret; 2593 } 2594 2595 /* 2596 * Inform qgroup to trace a whole subtree, including all its child tree 2597 * blocks and data. 2598 * The root tree block is specified by @root_eb. 2599 * 2600 * Normally used by relocation(tree block swap) and subvolume deletion. 2601 * 2602 * Return 0 for success 2603 * Return <0 for error(ENOMEM or tree search error) 2604 */ 2605 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2606 struct extent_buffer *root_eb, 2607 u64 root_gen, int root_level) 2608 { 2609 struct btrfs_fs_info *fs_info = trans->fs_info; 2610 int ret = 0; 2611 int level; 2612 u8 drop_subptree_thres; 2613 struct extent_buffer *eb = root_eb; 2614 struct btrfs_path *path = NULL; 2615 2616 ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL); 2617 ASSERT(root_eb != NULL); 2618 2619 if (!btrfs_qgroup_full_accounting(fs_info)) 2620 return 0; 2621 2622 spin_lock(&fs_info->qgroup_lock); 2623 drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; 2624 spin_unlock(&fs_info->qgroup_lock); 2625 2626 /* 2627 * This function only gets called for snapshot drop, if we hit a high 2628 * node here, it means we are going to change ownership for quite a lot 2629 * of extents, which will greatly slow down btrfs_commit_transaction(). 2630 * 2631 * So here if we find a high tree here, we just skip the accounting and 2632 * mark qgroup inconsistent. 2633 */ 2634 if (root_level >= drop_subptree_thres) { 2635 qgroup_mark_inconsistent(fs_info); 2636 return 0; 2637 } 2638 2639 if (!extent_buffer_uptodate(root_eb)) { 2640 struct btrfs_tree_parent_check check = { 2641 .has_first_key = false, 2642 .transid = root_gen, 2643 .level = root_level 2644 }; 2645 2646 ret = btrfs_read_extent_buffer(root_eb, &check); 2647 if (ret) 2648 goto out; 2649 } 2650 2651 if (root_level == 0) { 2652 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); 2653 goto out; 2654 } 2655 2656 path = btrfs_alloc_path(); 2657 if (!path) 2658 return -ENOMEM; 2659 2660 /* 2661 * Walk down the tree. Missing extent blocks are filled in as 2662 * we go. Metadata is accounted every time we read a new 2663 * extent block. 2664 * 2665 * When we reach a leaf, we account for file extent items in it, 2666 * walk back up the tree (adjusting slot pointers as we go) 2667 * and restart the search process. 2668 */ 2669 atomic_inc(&root_eb->refs); /* For path */ 2670 path->nodes[root_level] = root_eb; 2671 path->slots[root_level] = 0; 2672 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2673 walk_down: 2674 level = root_level; 2675 while (level >= 0) { 2676 if (path->nodes[level] == NULL) { 2677 int parent_slot; 2678 u64 child_bytenr; 2679 2680 /* 2681 * We need to get child blockptr from parent before we 2682 * can read it. 2683 */ 2684 eb = path->nodes[level + 1]; 2685 parent_slot = path->slots[level + 1]; 2686 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2687 2688 eb = btrfs_read_node_slot(eb, parent_slot); 2689 if (IS_ERR(eb)) { 2690 ret = PTR_ERR(eb); 2691 goto out; 2692 } 2693 2694 path->nodes[level] = eb; 2695 path->slots[level] = 0; 2696 2697 btrfs_tree_read_lock(eb); 2698 path->locks[level] = BTRFS_READ_LOCK; 2699 2700 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2701 fs_info->nodesize); 2702 if (ret) 2703 goto out; 2704 } 2705 2706 if (level == 0) { 2707 ret = btrfs_qgroup_trace_leaf_items(trans, 2708 path->nodes[level]); 2709 if (ret) 2710 goto out; 2711 2712 /* Nonzero return here means we completed our search */ 2713 ret = adjust_slots_upwards(path, root_level); 2714 if (ret) 2715 break; 2716 2717 /* Restart search with new slots */ 2718 goto walk_down; 2719 } 2720 2721 level--; 2722 } 2723 2724 ret = 0; 2725 out: 2726 btrfs_free_path(path); 2727 2728 return ret; 2729 } 2730 2731 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup) 2732 { 2733 if (!list_empty(&qgroup->nested_iterator)) 2734 return; 2735 2736 list_add_tail(&qgroup->nested_iterator, head); 2737 } 2738 2739 static void qgroup_iterator_nested_clean(struct list_head *head) 2740 { 2741 while (!list_empty(head)) { 2742 struct btrfs_qgroup *qgroup; 2743 2744 qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator); 2745 list_del_init(&qgroup->nested_iterator); 2746 } 2747 } 2748 2749 #define UPDATE_NEW 0 2750 #define UPDATE_OLD 1 2751 /* 2752 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2753 */ 2754 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2755 struct ulist *roots, struct list_head *qgroups, 2756 u64 seq, int update_old) 2757 { 2758 struct ulist_node *unode; 2759 struct ulist_iterator uiter; 2760 struct btrfs_qgroup *qg; 2761 2762 if (!roots) 2763 return; 2764 ULIST_ITER_INIT(&uiter); 2765 while ((unode = ulist_next(roots, &uiter))) { 2766 LIST_HEAD(tmp); 2767 2768 qg = find_qgroup_rb(fs_info, unode->val); 2769 if (!qg) 2770 continue; 2771 2772 qgroup_iterator_nested_add(qgroups, qg); 2773 qgroup_iterator_add(&tmp, qg); 2774 list_for_each_entry(qg, &tmp, iterator) { 2775 struct btrfs_qgroup_list *glist; 2776 2777 if (update_old) 2778 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2779 else 2780 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2781 2782 list_for_each_entry(glist, &qg->groups, next_group) { 2783 qgroup_iterator_nested_add(qgroups, glist->group); 2784 qgroup_iterator_add(&tmp, glist->group); 2785 } 2786 } 2787 qgroup_iterator_clean(&tmp); 2788 } 2789 } 2790 2791 /* 2792 * Update qgroup rfer/excl counters. 2793 * Rfer update is easy, codes can explain themselves. 2794 * 2795 * Excl update is tricky, the update is split into 2 parts. 2796 * Part 1: Possible exclusive <-> sharing detect: 2797 * | A | !A | 2798 * ------------------------------------- 2799 * B | * | - | 2800 * ------------------------------------- 2801 * !B | + | ** | 2802 * ------------------------------------- 2803 * 2804 * Conditions: 2805 * A: cur_old_roots < nr_old_roots (not exclusive before) 2806 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2807 * B: cur_new_roots < nr_new_roots (not exclusive now) 2808 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2809 * 2810 * Results: 2811 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2812 * *: Definitely not changed. **: Possible unchanged. 2813 * 2814 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2815 * 2816 * To make the logic clear, we first use condition A and B to split 2817 * combination into 4 results. 2818 * 2819 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2820 * only on variant maybe 0. 2821 * 2822 * Lastly, check result **, since there are 2 variants maybe 0, split them 2823 * again(2x2). 2824 * But this time we don't need to consider other things, the codes and logic 2825 * is easy to understand now. 2826 */ 2827 static void qgroup_update_counters(struct btrfs_fs_info *fs_info, 2828 struct list_head *qgroups, u64 nr_old_roots, 2829 u64 nr_new_roots, u64 num_bytes, u64 seq) 2830 { 2831 struct btrfs_qgroup *qg; 2832 2833 list_for_each_entry(qg, qgroups, nested_iterator) { 2834 u64 cur_new_count, cur_old_count; 2835 bool dirty = false; 2836 2837 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2838 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2839 2840 trace_qgroup_update_counters(fs_info, qg, cur_old_count, 2841 cur_new_count); 2842 2843 /* Rfer update part */ 2844 if (cur_old_count == 0 && cur_new_count > 0) { 2845 qg->rfer += num_bytes; 2846 qg->rfer_cmpr += num_bytes; 2847 dirty = true; 2848 } 2849 if (cur_old_count > 0 && cur_new_count == 0) { 2850 qg->rfer -= num_bytes; 2851 qg->rfer_cmpr -= num_bytes; 2852 dirty = true; 2853 } 2854 2855 /* Excl update part */ 2856 /* Exclusive/none -> shared case */ 2857 if (cur_old_count == nr_old_roots && 2858 cur_new_count < nr_new_roots) { 2859 /* Exclusive -> shared */ 2860 if (cur_old_count != 0) { 2861 qg->excl -= num_bytes; 2862 qg->excl_cmpr -= num_bytes; 2863 dirty = true; 2864 } 2865 } 2866 2867 /* Shared -> exclusive/none case */ 2868 if (cur_old_count < nr_old_roots && 2869 cur_new_count == nr_new_roots) { 2870 /* Shared->exclusive */ 2871 if (cur_new_count != 0) { 2872 qg->excl += num_bytes; 2873 qg->excl_cmpr += num_bytes; 2874 dirty = true; 2875 } 2876 } 2877 2878 /* Exclusive/none -> exclusive/none case */ 2879 if (cur_old_count == nr_old_roots && 2880 cur_new_count == nr_new_roots) { 2881 if (cur_old_count == 0) { 2882 /* None -> exclusive/none */ 2883 2884 if (cur_new_count != 0) { 2885 /* None -> exclusive */ 2886 qg->excl += num_bytes; 2887 qg->excl_cmpr += num_bytes; 2888 dirty = true; 2889 } 2890 /* None -> none, nothing changed */ 2891 } else { 2892 /* Exclusive -> exclusive/none */ 2893 2894 if (cur_new_count == 0) { 2895 /* Exclusive -> none */ 2896 qg->excl -= num_bytes; 2897 qg->excl_cmpr -= num_bytes; 2898 dirty = true; 2899 } 2900 /* Exclusive -> exclusive, nothing changed */ 2901 } 2902 } 2903 2904 if (dirty) 2905 qgroup_dirty(fs_info, qg); 2906 } 2907 } 2908 2909 /* 2910 * Check if the @roots potentially is a list of fs tree roots 2911 * 2912 * Return 0 for definitely not a fs/subvol tree roots ulist 2913 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2914 * one as well) 2915 */ 2916 static int maybe_fs_roots(struct ulist *roots) 2917 { 2918 struct ulist_node *unode; 2919 struct ulist_iterator uiter; 2920 2921 /* Empty one, still possible for fs roots */ 2922 if (!roots || roots->nnodes == 0) 2923 return 1; 2924 2925 ULIST_ITER_INIT(&uiter); 2926 unode = ulist_next(roots, &uiter); 2927 if (!unode) 2928 return 1; 2929 2930 /* 2931 * If it contains fs tree roots, then it must belong to fs/subvol 2932 * trees. 2933 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2934 */ 2935 return is_fstree(unode->val); 2936 } 2937 2938 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2939 u64 num_bytes, struct ulist *old_roots, 2940 struct ulist *new_roots) 2941 { 2942 struct btrfs_fs_info *fs_info = trans->fs_info; 2943 LIST_HEAD(qgroups); 2944 u64 seq; 2945 u64 nr_new_roots = 0; 2946 u64 nr_old_roots = 0; 2947 int ret = 0; 2948 2949 /* 2950 * If quotas get disabled meanwhile, the resources need to be freed and 2951 * we can't just exit here. 2952 */ 2953 if (!btrfs_qgroup_full_accounting(fs_info) || 2954 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2955 goto out_free; 2956 2957 if (new_roots) { 2958 if (!maybe_fs_roots(new_roots)) 2959 goto out_free; 2960 nr_new_roots = new_roots->nnodes; 2961 } 2962 if (old_roots) { 2963 if (!maybe_fs_roots(old_roots)) 2964 goto out_free; 2965 nr_old_roots = old_roots->nnodes; 2966 } 2967 2968 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2969 if (nr_old_roots == 0 && nr_new_roots == 0) 2970 goto out_free; 2971 2972 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2973 num_bytes, nr_old_roots, nr_new_roots); 2974 2975 mutex_lock(&fs_info->qgroup_rescan_lock); 2976 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2977 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2978 mutex_unlock(&fs_info->qgroup_rescan_lock); 2979 ret = 0; 2980 goto out_free; 2981 } 2982 } 2983 mutex_unlock(&fs_info->qgroup_rescan_lock); 2984 2985 spin_lock(&fs_info->qgroup_lock); 2986 seq = fs_info->qgroup_seq; 2987 2988 /* Update old refcnts using old_roots */ 2989 qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD); 2990 2991 /* Update new refcnts using new_roots */ 2992 qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW); 2993 2994 qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots, 2995 num_bytes, seq); 2996 2997 /* 2998 * We're done using the iterator, release all its qgroups while holding 2999 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup() 3000 * and trigger use-after-free accesses to qgroups. 3001 */ 3002 qgroup_iterator_nested_clean(&qgroups); 3003 3004 /* 3005 * Bump qgroup_seq to avoid seq overlap 3006 */ 3007 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 3008 spin_unlock(&fs_info->qgroup_lock); 3009 out_free: 3010 ulist_free(old_roots); 3011 ulist_free(new_roots); 3012 return ret; 3013 } 3014 3015 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 3016 { 3017 struct btrfs_fs_info *fs_info = trans->fs_info; 3018 struct btrfs_qgroup_extent_record *record; 3019 struct btrfs_delayed_ref_root *delayed_refs; 3020 struct ulist *new_roots = NULL; 3021 struct rb_node *node; 3022 u64 num_dirty_extents = 0; 3023 u64 qgroup_to_skip; 3024 int ret = 0; 3025 3026 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 3027 return 0; 3028 3029 delayed_refs = &trans->transaction->delayed_refs; 3030 qgroup_to_skip = delayed_refs->qgroup_to_skip; 3031 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 3032 record = rb_entry(node, struct btrfs_qgroup_extent_record, 3033 node); 3034 3035 num_dirty_extents++; 3036 trace_btrfs_qgroup_account_extents(fs_info, record); 3037 3038 if (!ret && !(fs_info->qgroup_flags & 3039 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { 3040 struct btrfs_backref_walk_ctx ctx = { 0 }; 3041 3042 ctx.bytenr = record->bytenr; 3043 ctx.fs_info = fs_info; 3044 3045 /* 3046 * Old roots should be searched when inserting qgroup 3047 * extent record. 3048 * 3049 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, 3050 * we may have some record inserted during 3051 * NO_ACCOUNTING (thus no old_roots populated), but 3052 * later we start rescan, which clears NO_ACCOUNTING, 3053 * leaving some inserted records without old_roots 3054 * populated. 3055 * 3056 * Those cases are rare and should not cause too much 3057 * time spent during commit_transaction(). 3058 */ 3059 if (!record->old_roots) { 3060 /* Search commit root to find old_roots */ 3061 ret = btrfs_find_all_roots(&ctx, false); 3062 if (ret < 0) 3063 goto cleanup; 3064 record->old_roots = ctx.roots; 3065 ctx.roots = NULL; 3066 } 3067 3068 /* 3069 * Use BTRFS_SEQ_LAST as time_seq to do special search, 3070 * which doesn't lock tree or delayed_refs and search 3071 * current root. It's safe inside commit_transaction(). 3072 */ 3073 ctx.trans = trans; 3074 ctx.time_seq = BTRFS_SEQ_LAST; 3075 ret = btrfs_find_all_roots(&ctx, false); 3076 if (ret < 0) 3077 goto cleanup; 3078 new_roots = ctx.roots; 3079 if (qgroup_to_skip) { 3080 ulist_del(new_roots, qgroup_to_skip, 0); 3081 ulist_del(record->old_roots, qgroup_to_skip, 3082 0); 3083 } 3084 ret = btrfs_qgroup_account_extent(trans, record->bytenr, 3085 record->num_bytes, 3086 record->old_roots, 3087 new_roots); 3088 record->old_roots = NULL; 3089 new_roots = NULL; 3090 } 3091 /* Free the reserved data space */ 3092 btrfs_qgroup_free_refroot(fs_info, 3093 record->data_rsv_refroot, 3094 record->data_rsv, 3095 BTRFS_QGROUP_RSV_DATA); 3096 cleanup: 3097 ulist_free(record->old_roots); 3098 ulist_free(new_roots); 3099 new_roots = NULL; 3100 rb_erase(node, &delayed_refs->dirty_extent_root); 3101 kfree(record); 3102 3103 } 3104 trace_qgroup_num_dirty_extents(fs_info, trans->transid, 3105 num_dirty_extents); 3106 return ret; 3107 } 3108 3109 /* 3110 * Writes all changed qgroups to disk. 3111 * Called by the transaction commit path and the qgroup assign ioctl. 3112 */ 3113 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 3114 { 3115 struct btrfs_fs_info *fs_info = trans->fs_info; 3116 int ret = 0; 3117 3118 /* 3119 * In case we are called from the qgroup assign ioctl, assert that we 3120 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota 3121 * disable operation (ioctl) and access a freed quota root. 3122 */ 3123 if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) 3124 lockdep_assert_held(&fs_info->qgroup_ioctl_lock); 3125 3126 if (!fs_info->quota_root) 3127 return ret; 3128 3129 spin_lock(&fs_info->qgroup_lock); 3130 while (!list_empty(&fs_info->dirty_qgroups)) { 3131 struct btrfs_qgroup *qgroup; 3132 qgroup = list_first_entry(&fs_info->dirty_qgroups, 3133 struct btrfs_qgroup, dirty); 3134 list_del_init(&qgroup->dirty); 3135 spin_unlock(&fs_info->qgroup_lock); 3136 ret = update_qgroup_info_item(trans, qgroup); 3137 if (ret) 3138 qgroup_mark_inconsistent(fs_info); 3139 ret = update_qgroup_limit_item(trans, qgroup); 3140 if (ret) 3141 qgroup_mark_inconsistent(fs_info); 3142 spin_lock(&fs_info->qgroup_lock); 3143 } 3144 if (btrfs_qgroup_enabled(fs_info)) 3145 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 3146 else 3147 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 3148 spin_unlock(&fs_info->qgroup_lock); 3149 3150 ret = update_qgroup_status_item(trans); 3151 if (ret) 3152 qgroup_mark_inconsistent(fs_info); 3153 3154 return ret; 3155 } 3156 3157 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, 3158 struct btrfs_qgroup_inherit *inherit, 3159 size_t size) 3160 { 3161 if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) 3162 return -EOPNOTSUPP; 3163 if (size < sizeof(*inherit) || size > PAGE_SIZE) 3164 return -EINVAL; 3165 3166 /* 3167 * In the past we allowed btrfs_qgroup_inherit to specify to copy 3168 * rfer/excl numbers directly from other qgroups. This behavior has 3169 * been disabled in userspace for a very long time, but here we should 3170 * also disable it in kernel, as this behavior is known to mark qgroup 3171 * inconsistent, and a rescan would wipe out the changes anyway. 3172 * 3173 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies. 3174 */ 3175 if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0) 3176 return -EINVAL; 3177 3178 if (size != struct_size(inherit, qgroups, inherit->num_qgroups)) 3179 return -EINVAL; 3180 3181 /* 3182 * Skip the inherit source qgroups check if qgroup is not enabled. 3183 * Qgroup can still be later enabled causing problems, but in that case 3184 * btrfs_qgroup_inherit() would just ignore those invalid ones. 3185 */ 3186 if (!btrfs_qgroup_enabled(fs_info)) 3187 return 0; 3188 3189 /* 3190 * Now check all the remaining qgroups, they should all: 3191 * 3192 * - Exist 3193 * - Be higher level qgroups. 3194 */ 3195 for (int i = 0; i < inherit->num_qgroups; i++) { 3196 struct btrfs_qgroup *qgroup; 3197 u64 qgroupid = inherit->qgroups[i]; 3198 3199 if (btrfs_qgroup_level(qgroupid) == 0) 3200 return -EINVAL; 3201 3202 spin_lock(&fs_info->qgroup_lock); 3203 qgroup = find_qgroup_rb(fs_info, qgroupid); 3204 if (!qgroup) { 3205 spin_unlock(&fs_info->qgroup_lock); 3206 return -ENOENT; 3207 } 3208 spin_unlock(&fs_info->qgroup_lock); 3209 } 3210 return 0; 3211 } 3212 3213 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, 3214 u64 inode_rootid, 3215 struct btrfs_qgroup_inherit **inherit) 3216 { 3217 int i = 0; 3218 u64 num_qgroups = 0; 3219 struct btrfs_qgroup *inode_qg; 3220 struct btrfs_qgroup_list *qg_list; 3221 struct btrfs_qgroup_inherit *res; 3222 size_t struct_sz; 3223 u64 *qgids; 3224 3225 if (*inherit) 3226 return -EEXIST; 3227 3228 inode_qg = find_qgroup_rb(fs_info, inode_rootid); 3229 if (!inode_qg) 3230 return -ENOENT; 3231 3232 num_qgroups = list_count_nodes(&inode_qg->groups); 3233 3234 if (!num_qgroups) 3235 return 0; 3236 3237 struct_sz = struct_size(res, qgroups, num_qgroups); 3238 if (struct_sz == SIZE_MAX) 3239 return -ERANGE; 3240 3241 res = kzalloc(struct_sz, GFP_NOFS); 3242 if (!res) 3243 return -ENOMEM; 3244 res->num_qgroups = num_qgroups; 3245 qgids = res->qgroups; 3246 3247 list_for_each_entry(qg_list, &inode_qg->groups, next_group) 3248 qgids[i++] = qg_list->group->qgroupid; 3249 3250 *inherit = res; 3251 return 0; 3252 } 3253 3254 /* 3255 * Check if we can skip rescan when inheriting qgroups. If @src has a single 3256 * @parent, and that @parent is owning all its bytes exclusively, we can skip 3257 * the full rescan, by just adding nodesize to the @parent's excl/rfer. 3258 * 3259 * Return <0 for fatal errors (like srcid/parentid has no qgroup). 3260 * Return 0 if a quick inherit is done. 3261 * Return >0 if a quick inherit is not possible, and a full rescan is needed. 3262 */ 3263 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info, 3264 u64 srcid, u64 parentid) 3265 { 3266 struct btrfs_qgroup *src; 3267 struct btrfs_qgroup *parent; 3268 struct btrfs_qgroup_list *list; 3269 int nr_parents = 0; 3270 3271 src = find_qgroup_rb(fs_info, srcid); 3272 if (!src) 3273 return -ENOENT; 3274 parent = find_qgroup_rb(fs_info, parentid); 3275 if (!parent) 3276 return -ENOENT; 3277 3278 /* 3279 * Source has no parent qgroup, but our new qgroup would have one. 3280 * Qgroup numbers would become inconsistent. 3281 */ 3282 if (list_empty(&src->groups)) 3283 return 1; 3284 3285 list_for_each_entry(list, &src->groups, next_group) { 3286 /* The parent is not the same, quick update is not possible. */ 3287 if (list->group->qgroupid != parentid) 3288 return 1; 3289 nr_parents++; 3290 /* 3291 * More than one parent qgroup, we can't be sure about accounting 3292 * consistency. 3293 */ 3294 if (nr_parents > 1) 3295 return 1; 3296 } 3297 3298 /* 3299 * The parent is not exclusively owning all its bytes. We're not sure 3300 * if the source has any bytes not fully owned by the parent. 3301 */ 3302 if (parent->excl != parent->rfer) 3303 return 1; 3304 3305 parent->excl += fs_info->nodesize; 3306 parent->rfer += fs_info->nodesize; 3307 return 0; 3308 } 3309 3310 /* 3311 * Copy the accounting information between qgroups. This is necessary 3312 * when a snapshot or a subvolume is created. Throwing an error will 3313 * cause a transaction abort so we take extra care here to only error 3314 * when a readonly fs is a reasonable outcome. 3315 */ 3316 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 3317 u64 objectid, u64 inode_rootid, 3318 struct btrfs_qgroup_inherit *inherit) 3319 { 3320 int ret = 0; 3321 u64 *i_qgroups; 3322 bool committing = false; 3323 struct btrfs_fs_info *fs_info = trans->fs_info; 3324 struct btrfs_root *quota_root; 3325 struct btrfs_qgroup *srcgroup; 3326 struct btrfs_qgroup *dstgroup; 3327 struct btrfs_qgroup *prealloc; 3328 struct btrfs_qgroup_list **qlist_prealloc = NULL; 3329 bool free_inherit = false; 3330 bool need_rescan = false; 3331 u32 level_size = 0; 3332 u64 nums; 3333 3334 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 3335 if (!prealloc) 3336 return -ENOMEM; 3337 3338 /* 3339 * There are only two callers of this function. 3340 * 3341 * One in create_subvol() in the ioctl context, which needs to hold 3342 * the qgroup_ioctl_lock. 3343 * 3344 * The other one in create_pending_snapshot() where no other qgroup 3345 * code can modify the fs as they all need to either start a new trans 3346 * or hold a trans handler, thus we don't need to hold 3347 * qgroup_ioctl_lock. 3348 * This would avoid long and complex lock chain and make lockdep happy. 3349 */ 3350 spin_lock(&fs_info->trans_lock); 3351 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 3352 committing = true; 3353 spin_unlock(&fs_info->trans_lock); 3354 3355 if (!committing) 3356 mutex_lock(&fs_info->qgroup_ioctl_lock); 3357 if (!btrfs_qgroup_enabled(fs_info)) 3358 goto out; 3359 3360 quota_root = fs_info->quota_root; 3361 if (!quota_root) { 3362 ret = -EINVAL; 3363 goto out; 3364 } 3365 3366 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) { 3367 ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit); 3368 if (ret) 3369 goto out; 3370 free_inherit = true; 3371 } 3372 3373 if (inherit) { 3374 i_qgroups = (u64 *)(inherit + 1); 3375 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 3376 2 * inherit->num_excl_copies; 3377 for (int i = 0; i < nums; i++) { 3378 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 3379 3380 /* 3381 * Zero out invalid groups so we can ignore 3382 * them later. 3383 */ 3384 if (!srcgroup || 3385 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 3386 *i_qgroups = 0ULL; 3387 3388 ++i_qgroups; 3389 } 3390 } 3391 3392 /* 3393 * create a tracking group for the subvol itself 3394 */ 3395 ret = add_qgroup_item(trans, quota_root, objectid); 3396 if (ret) 3397 goto out; 3398 3399 /* 3400 * add qgroup to all inherited groups 3401 */ 3402 if (inherit) { 3403 i_qgroups = (u64 *)(inherit + 1); 3404 for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) { 3405 if (*i_qgroups == 0) 3406 continue; 3407 ret = add_qgroup_relation_item(trans, objectid, 3408 *i_qgroups); 3409 if (ret && ret != -EEXIST) 3410 goto out; 3411 ret = add_qgroup_relation_item(trans, *i_qgroups, 3412 objectid); 3413 if (ret && ret != -EEXIST) 3414 goto out; 3415 } 3416 ret = 0; 3417 3418 qlist_prealloc = kcalloc(inherit->num_qgroups, 3419 sizeof(struct btrfs_qgroup_list *), 3420 GFP_NOFS); 3421 if (!qlist_prealloc) { 3422 ret = -ENOMEM; 3423 goto out; 3424 } 3425 for (int i = 0; i < inherit->num_qgroups; i++) { 3426 qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list), 3427 GFP_NOFS); 3428 if (!qlist_prealloc[i]) { 3429 ret = -ENOMEM; 3430 goto out; 3431 } 3432 } 3433 } 3434 3435 spin_lock(&fs_info->qgroup_lock); 3436 3437 dstgroup = add_qgroup_rb(fs_info, prealloc, objectid); 3438 prealloc = NULL; 3439 3440 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 3441 dstgroup->lim_flags = inherit->lim.flags; 3442 dstgroup->max_rfer = inherit->lim.max_rfer; 3443 dstgroup->max_excl = inherit->lim.max_excl; 3444 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 3445 dstgroup->rsv_excl = inherit->lim.rsv_excl; 3446 3447 qgroup_dirty(fs_info, dstgroup); 3448 } 3449 3450 if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) { 3451 srcgroup = find_qgroup_rb(fs_info, srcid); 3452 if (!srcgroup) 3453 goto unlock; 3454 3455 /* 3456 * We call inherit after we clone the root in order to make sure 3457 * our counts don't go crazy, so at this point the only 3458 * difference between the two roots should be the root node. 3459 */ 3460 level_size = fs_info->nodesize; 3461 dstgroup->rfer = srcgroup->rfer; 3462 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 3463 dstgroup->excl = level_size; 3464 dstgroup->excl_cmpr = level_size; 3465 srcgroup->excl = level_size; 3466 srcgroup->excl_cmpr = level_size; 3467 3468 /* inherit the limit info */ 3469 dstgroup->lim_flags = srcgroup->lim_flags; 3470 dstgroup->max_rfer = srcgroup->max_rfer; 3471 dstgroup->max_excl = srcgroup->max_excl; 3472 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 3473 dstgroup->rsv_excl = srcgroup->rsv_excl; 3474 3475 qgroup_dirty(fs_info, dstgroup); 3476 qgroup_dirty(fs_info, srcgroup); 3477 3478 /* 3479 * If the source qgroup has parent but the new one doesn't, 3480 * we need a full rescan. 3481 */ 3482 if (!inherit && !list_empty(&srcgroup->groups)) 3483 need_rescan = true; 3484 } 3485 3486 if (!inherit) 3487 goto unlock; 3488 3489 i_qgroups = (u64 *)(inherit + 1); 3490 for (int i = 0; i < inherit->num_qgroups; i++) { 3491 if (*i_qgroups) { 3492 ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid, 3493 *i_qgroups); 3494 qlist_prealloc[i] = NULL; 3495 if (ret) 3496 goto unlock; 3497 } 3498 if (srcid) { 3499 /* Check if we can do a quick inherit. */ 3500 ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups); 3501 if (ret < 0) 3502 goto unlock; 3503 if (ret > 0) 3504 need_rescan = true; 3505 ret = 0; 3506 } 3507 ++i_qgroups; 3508 } 3509 3510 for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) { 3511 struct btrfs_qgroup *src; 3512 struct btrfs_qgroup *dst; 3513 3514 if (!i_qgroups[0] || !i_qgroups[1]) 3515 continue; 3516 3517 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3518 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3519 3520 if (!src || !dst) { 3521 ret = -EINVAL; 3522 goto unlock; 3523 } 3524 3525 dst->rfer = src->rfer - level_size; 3526 dst->rfer_cmpr = src->rfer_cmpr - level_size; 3527 3528 /* Manually tweaking numbers certainly needs a rescan */ 3529 need_rescan = true; 3530 } 3531 for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) { 3532 struct btrfs_qgroup *src; 3533 struct btrfs_qgroup *dst; 3534 3535 if (!i_qgroups[0] || !i_qgroups[1]) 3536 continue; 3537 3538 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3539 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3540 3541 if (!src || !dst) { 3542 ret = -EINVAL; 3543 goto unlock; 3544 } 3545 3546 dst->excl = src->excl + level_size; 3547 dst->excl_cmpr = src->excl_cmpr + level_size; 3548 need_rescan = true; 3549 } 3550 3551 unlock: 3552 spin_unlock(&fs_info->qgroup_lock); 3553 if (!ret) 3554 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 3555 out: 3556 if (!committing) 3557 mutex_unlock(&fs_info->qgroup_ioctl_lock); 3558 if (need_rescan) 3559 qgroup_mark_inconsistent(fs_info); 3560 if (qlist_prealloc) { 3561 for (int i = 0; i < inherit->num_qgroups; i++) 3562 kfree(qlist_prealloc[i]); 3563 kfree(qlist_prealloc); 3564 } 3565 if (free_inherit) 3566 kfree(inherit); 3567 kfree(prealloc); 3568 return ret; 3569 } 3570 3571 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 3572 { 3573 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 3574 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 3575 return false; 3576 3577 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 3578 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 3579 return false; 3580 3581 return true; 3582 } 3583 3584 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 3585 enum btrfs_qgroup_rsv_type type) 3586 { 3587 struct btrfs_qgroup *qgroup; 3588 struct btrfs_fs_info *fs_info = root->fs_info; 3589 u64 ref_root = btrfs_root_id(root); 3590 int ret = 0; 3591 LIST_HEAD(qgroup_list); 3592 3593 if (!is_fstree(ref_root)) 3594 return 0; 3595 3596 if (num_bytes == 0) 3597 return 0; 3598 3599 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 3600 capable(CAP_SYS_RESOURCE)) 3601 enforce = false; 3602 3603 spin_lock(&fs_info->qgroup_lock); 3604 if (!fs_info->quota_root) 3605 goto out; 3606 3607 qgroup = find_qgroup_rb(fs_info, ref_root); 3608 if (!qgroup) 3609 goto out; 3610 3611 qgroup_iterator_add(&qgroup_list, qgroup); 3612 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3613 struct btrfs_qgroup_list *glist; 3614 3615 if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { 3616 ret = -EDQUOT; 3617 goto out; 3618 } 3619 3620 list_for_each_entry(glist, &qgroup->groups, next_group) 3621 qgroup_iterator_add(&qgroup_list, glist->group); 3622 } 3623 3624 ret = 0; 3625 /* 3626 * no limits exceeded, now record the reservation into all qgroups 3627 */ 3628 list_for_each_entry(qgroup, &qgroup_list, iterator) 3629 qgroup_rsv_add(fs_info, qgroup, num_bytes, type); 3630 3631 out: 3632 qgroup_iterator_clean(&qgroup_list); 3633 spin_unlock(&fs_info->qgroup_lock); 3634 return ret; 3635 } 3636 3637 /* 3638 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3639 * qgroup). 3640 * 3641 * Will handle all higher level qgroup too. 3642 * 3643 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3644 * This special case is only used for META_PERTRANS type. 3645 */ 3646 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3647 u64 ref_root, u64 num_bytes, 3648 enum btrfs_qgroup_rsv_type type) 3649 { 3650 struct btrfs_qgroup *qgroup; 3651 LIST_HEAD(qgroup_list); 3652 3653 if (!is_fstree(ref_root)) 3654 return; 3655 3656 if (num_bytes == 0) 3657 return; 3658 3659 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3660 WARN(1, "%s: Invalid type to free", __func__); 3661 return; 3662 } 3663 spin_lock(&fs_info->qgroup_lock); 3664 3665 if (!fs_info->quota_root) 3666 goto out; 3667 3668 qgroup = find_qgroup_rb(fs_info, ref_root); 3669 if (!qgroup) 3670 goto out; 3671 3672 if (num_bytes == (u64)-1) 3673 /* 3674 * We're freeing all pertrans rsv, get reserved value from 3675 * level 0 qgroup as real num_bytes to free. 3676 */ 3677 num_bytes = qgroup->rsv.values[type]; 3678 3679 qgroup_iterator_add(&qgroup_list, qgroup); 3680 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3681 struct btrfs_qgroup_list *glist; 3682 3683 qgroup_rsv_release(fs_info, qgroup, num_bytes, type); 3684 list_for_each_entry(glist, &qgroup->groups, next_group) { 3685 qgroup_iterator_add(&qgroup_list, glist->group); 3686 } 3687 } 3688 out: 3689 qgroup_iterator_clean(&qgroup_list); 3690 spin_unlock(&fs_info->qgroup_lock); 3691 } 3692 3693 /* 3694 * Check if the leaf is the last leaf. Which means all node pointers 3695 * are at their last position. 3696 */ 3697 static bool is_last_leaf(struct btrfs_path *path) 3698 { 3699 int i; 3700 3701 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3702 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3703 return false; 3704 } 3705 return true; 3706 } 3707 3708 /* 3709 * returns < 0 on error, 0 when more leafs are to be scanned. 3710 * returns 1 when done. 3711 */ 3712 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3713 struct btrfs_path *path) 3714 { 3715 struct btrfs_fs_info *fs_info = trans->fs_info; 3716 struct btrfs_root *extent_root; 3717 struct btrfs_key found; 3718 struct extent_buffer *scratch_leaf = NULL; 3719 u64 num_bytes; 3720 bool done; 3721 int slot; 3722 int ret; 3723 3724 if (!btrfs_qgroup_full_accounting(fs_info)) 3725 return 1; 3726 3727 mutex_lock(&fs_info->qgroup_rescan_lock); 3728 extent_root = btrfs_extent_root(fs_info, 3729 fs_info->qgroup_rescan_progress.objectid); 3730 ret = btrfs_search_slot_for_read(extent_root, 3731 &fs_info->qgroup_rescan_progress, 3732 path, 1, 0); 3733 3734 btrfs_debug(fs_info, 3735 "current progress key (%llu %u %llu), search_slot ret %d", 3736 fs_info->qgroup_rescan_progress.objectid, 3737 fs_info->qgroup_rescan_progress.type, 3738 fs_info->qgroup_rescan_progress.offset, ret); 3739 3740 if (ret) { 3741 /* 3742 * The rescan is about to end, we will not be scanning any 3743 * further blocks. We cannot unset the RESCAN flag here, because 3744 * we want to commit the transaction if everything went well. 3745 * To make the live accounting work in this phase, we set our 3746 * scan progress pointer such that every real extent objectid 3747 * will be smaller. 3748 */ 3749 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3750 btrfs_release_path(path); 3751 mutex_unlock(&fs_info->qgroup_rescan_lock); 3752 return ret; 3753 } 3754 done = is_last_leaf(path); 3755 3756 btrfs_item_key_to_cpu(path->nodes[0], &found, 3757 btrfs_header_nritems(path->nodes[0]) - 1); 3758 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3759 3760 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3761 if (!scratch_leaf) { 3762 ret = -ENOMEM; 3763 mutex_unlock(&fs_info->qgroup_rescan_lock); 3764 goto out; 3765 } 3766 slot = path->slots[0]; 3767 btrfs_release_path(path); 3768 mutex_unlock(&fs_info->qgroup_rescan_lock); 3769 3770 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3771 struct btrfs_backref_walk_ctx ctx = { 0 }; 3772 3773 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3774 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3775 found.type != BTRFS_METADATA_ITEM_KEY) 3776 continue; 3777 if (found.type == BTRFS_METADATA_ITEM_KEY) 3778 num_bytes = fs_info->nodesize; 3779 else 3780 num_bytes = found.offset; 3781 3782 ctx.bytenr = found.objectid; 3783 ctx.fs_info = fs_info; 3784 3785 ret = btrfs_find_all_roots(&ctx, false); 3786 if (ret < 0) 3787 goto out; 3788 /* For rescan, just pass old_roots as NULL */ 3789 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3790 num_bytes, NULL, ctx.roots); 3791 if (ret < 0) 3792 goto out; 3793 } 3794 out: 3795 if (scratch_leaf) 3796 free_extent_buffer(scratch_leaf); 3797 3798 if (done && !ret) { 3799 ret = 1; 3800 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3801 } 3802 return ret; 3803 } 3804 3805 static bool rescan_should_stop(struct btrfs_fs_info *fs_info) 3806 { 3807 if (btrfs_fs_closing(fs_info)) 3808 return true; 3809 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) 3810 return true; 3811 if (!btrfs_qgroup_enabled(fs_info)) 3812 return true; 3813 if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3814 return true; 3815 return false; 3816 } 3817 3818 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3819 { 3820 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3821 qgroup_rescan_work); 3822 struct btrfs_path *path; 3823 struct btrfs_trans_handle *trans = NULL; 3824 int ret = 0; 3825 bool stopped = false; 3826 bool did_leaf_rescans = false; 3827 3828 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 3829 return; 3830 3831 path = btrfs_alloc_path(); 3832 if (!path) { 3833 ret = -ENOMEM; 3834 goto out; 3835 } 3836 /* 3837 * Rescan should only search for commit root, and any later difference 3838 * should be recorded by qgroup 3839 */ 3840 path->search_commit_root = 1; 3841 path->skip_locking = 1; 3842 3843 while (!ret && !(stopped = rescan_should_stop(fs_info))) { 3844 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3845 if (IS_ERR(trans)) { 3846 ret = PTR_ERR(trans); 3847 break; 3848 } 3849 3850 ret = qgroup_rescan_leaf(trans, path); 3851 did_leaf_rescans = true; 3852 3853 if (ret > 0) 3854 btrfs_commit_transaction(trans); 3855 else 3856 btrfs_end_transaction(trans); 3857 } 3858 3859 out: 3860 btrfs_free_path(path); 3861 3862 mutex_lock(&fs_info->qgroup_rescan_lock); 3863 if (ret > 0 && 3864 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3865 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3866 } else if (ret < 0 || stopped) { 3867 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3868 } 3869 mutex_unlock(&fs_info->qgroup_rescan_lock); 3870 3871 /* 3872 * Only update status, since the previous part has already updated the 3873 * qgroup info, and only if we did any actual work. This also prevents 3874 * race with a concurrent quota disable, which has already set 3875 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at 3876 * btrfs_quota_disable(). 3877 */ 3878 if (did_leaf_rescans) { 3879 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3880 if (IS_ERR(trans)) { 3881 ret = PTR_ERR(trans); 3882 trans = NULL; 3883 btrfs_err(fs_info, 3884 "fail to start transaction for status update: %d", 3885 ret); 3886 } 3887 } else { 3888 trans = NULL; 3889 } 3890 3891 mutex_lock(&fs_info->qgroup_rescan_lock); 3892 if (!stopped || 3893 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3894 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3895 if (trans) { 3896 int ret2 = update_qgroup_status_item(trans); 3897 3898 if (ret2 < 0) { 3899 ret = ret2; 3900 btrfs_err(fs_info, "fail to update qgroup status: %d", ret); 3901 } 3902 } 3903 fs_info->qgroup_rescan_running = false; 3904 fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; 3905 complete_all(&fs_info->qgroup_rescan_completion); 3906 mutex_unlock(&fs_info->qgroup_rescan_lock); 3907 3908 if (!trans) 3909 return; 3910 3911 btrfs_end_transaction(trans); 3912 3913 if (stopped) { 3914 btrfs_info(fs_info, "qgroup scan paused"); 3915 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { 3916 btrfs_info(fs_info, "qgroup scan cancelled"); 3917 } else if (ret >= 0) { 3918 btrfs_info(fs_info, "qgroup scan completed%s", 3919 ret > 0 ? " (inconsistency flag cleared)" : ""); 3920 } else { 3921 btrfs_err(fs_info, "qgroup scan failed with %d", ret); 3922 } 3923 } 3924 3925 /* 3926 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3927 * memory required for the rescan context. 3928 */ 3929 static int 3930 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3931 int init_flags) 3932 { 3933 int ret = 0; 3934 3935 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { 3936 btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode"); 3937 return -EINVAL; 3938 } 3939 3940 if (!init_flags) { 3941 /* we're resuming qgroup rescan at mount time */ 3942 if (!(fs_info->qgroup_flags & 3943 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3944 btrfs_debug(fs_info, 3945 "qgroup rescan init failed, qgroup rescan is not queued"); 3946 ret = -EINVAL; 3947 } else if (!(fs_info->qgroup_flags & 3948 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3949 btrfs_debug(fs_info, 3950 "qgroup rescan init failed, qgroup is not enabled"); 3951 ret = -ENOTCONN; 3952 } 3953 3954 if (ret) 3955 return ret; 3956 } 3957 3958 mutex_lock(&fs_info->qgroup_rescan_lock); 3959 3960 if (init_flags) { 3961 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3962 ret = -EINPROGRESS; 3963 } else if (!(fs_info->qgroup_flags & 3964 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3965 btrfs_debug(fs_info, 3966 "qgroup rescan init failed, qgroup is not enabled"); 3967 ret = -ENOTCONN; 3968 } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 3969 /* Quota disable is in progress */ 3970 ret = -EBUSY; 3971 } 3972 3973 if (ret) { 3974 mutex_unlock(&fs_info->qgroup_rescan_lock); 3975 return ret; 3976 } 3977 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3978 } 3979 3980 memset(&fs_info->qgroup_rescan_progress, 0, 3981 sizeof(fs_info->qgroup_rescan_progress)); 3982 fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 3983 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 3984 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3985 init_completion(&fs_info->qgroup_rescan_completion); 3986 mutex_unlock(&fs_info->qgroup_rescan_lock); 3987 3988 btrfs_init_work(&fs_info->qgroup_rescan_work, 3989 btrfs_qgroup_rescan_worker, NULL); 3990 return 0; 3991 } 3992 3993 static void 3994 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 3995 { 3996 struct rb_node *n; 3997 struct btrfs_qgroup *qgroup; 3998 3999 spin_lock(&fs_info->qgroup_lock); 4000 /* clear all current qgroup tracking information */ 4001 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 4002 qgroup = rb_entry(n, struct btrfs_qgroup, node); 4003 qgroup->rfer = 0; 4004 qgroup->rfer_cmpr = 0; 4005 qgroup->excl = 0; 4006 qgroup->excl_cmpr = 0; 4007 qgroup_dirty(fs_info, qgroup); 4008 } 4009 spin_unlock(&fs_info->qgroup_lock); 4010 } 4011 4012 int 4013 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 4014 { 4015 int ret = 0; 4016 4017 ret = qgroup_rescan_init(fs_info, 0, 1); 4018 if (ret) 4019 return ret; 4020 4021 /* 4022 * We have set the rescan_progress to 0, which means no more 4023 * delayed refs will be accounted by btrfs_qgroup_account_ref. 4024 * However, btrfs_qgroup_account_ref may be right after its call 4025 * to btrfs_find_all_roots, in which case it would still do the 4026 * accounting. 4027 * To solve this, we're committing the transaction, which will 4028 * ensure we run all delayed refs and only after that, we are 4029 * going to clear all tracking information for a clean start. 4030 */ 4031 4032 ret = btrfs_commit_current_transaction(fs_info->fs_root); 4033 if (ret) { 4034 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 4035 return ret; 4036 } 4037 4038 qgroup_rescan_zero_tracking(fs_info); 4039 4040 mutex_lock(&fs_info->qgroup_rescan_lock); 4041 fs_info->qgroup_rescan_running = true; 4042 btrfs_queue_work(fs_info->qgroup_rescan_workers, 4043 &fs_info->qgroup_rescan_work); 4044 mutex_unlock(&fs_info->qgroup_rescan_lock); 4045 4046 return 0; 4047 } 4048 4049 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 4050 bool interruptible) 4051 { 4052 int running; 4053 int ret = 0; 4054 4055 mutex_lock(&fs_info->qgroup_rescan_lock); 4056 running = fs_info->qgroup_rescan_running; 4057 mutex_unlock(&fs_info->qgroup_rescan_lock); 4058 4059 if (!running) 4060 return 0; 4061 4062 if (interruptible) 4063 ret = wait_for_completion_interruptible( 4064 &fs_info->qgroup_rescan_completion); 4065 else 4066 wait_for_completion(&fs_info->qgroup_rescan_completion); 4067 4068 return ret; 4069 } 4070 4071 /* 4072 * this is only called from open_ctree where we're still single threaded, thus 4073 * locking is omitted here. 4074 */ 4075 void 4076 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 4077 { 4078 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 4079 mutex_lock(&fs_info->qgroup_rescan_lock); 4080 fs_info->qgroup_rescan_running = true; 4081 btrfs_queue_work(fs_info->qgroup_rescan_workers, 4082 &fs_info->qgroup_rescan_work); 4083 mutex_unlock(&fs_info->qgroup_rescan_lock); 4084 } 4085 } 4086 4087 #define rbtree_iterate_from_safe(node, next, start) \ 4088 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 4089 4090 static int qgroup_unreserve_range(struct btrfs_inode *inode, 4091 struct extent_changeset *reserved, u64 start, 4092 u64 len) 4093 { 4094 struct rb_node *node; 4095 struct rb_node *next; 4096 struct ulist_node *entry; 4097 int ret = 0; 4098 4099 node = reserved->range_changed.root.rb_node; 4100 if (!node) 4101 return 0; 4102 while (node) { 4103 entry = rb_entry(node, struct ulist_node, rb_node); 4104 if (entry->val < start) 4105 node = node->rb_right; 4106 else 4107 node = node->rb_left; 4108 } 4109 4110 if (entry->val > start && rb_prev(&entry->rb_node)) 4111 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 4112 rb_node); 4113 4114 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 4115 u64 entry_start; 4116 u64 entry_end; 4117 u64 entry_len; 4118 int clear_ret; 4119 4120 entry = rb_entry(node, struct ulist_node, rb_node); 4121 entry_start = entry->val; 4122 entry_end = entry->aux; 4123 entry_len = entry_end - entry_start + 1; 4124 4125 if (entry_start >= start + len) 4126 break; 4127 if (entry_start + entry_len <= start) 4128 continue; 4129 /* 4130 * Now the entry is in [start, start + len), revert the 4131 * EXTENT_QGROUP_RESERVED bit. 4132 */ 4133 clear_ret = clear_extent_bits(&inode->io_tree, entry_start, 4134 entry_end, EXTENT_QGROUP_RESERVED); 4135 if (!ret && clear_ret < 0) 4136 ret = clear_ret; 4137 4138 ulist_del(&reserved->range_changed, entry->val, entry->aux); 4139 if (likely(reserved->bytes_changed >= entry_len)) { 4140 reserved->bytes_changed -= entry_len; 4141 } else { 4142 WARN_ON(1); 4143 reserved->bytes_changed = 0; 4144 } 4145 } 4146 4147 return ret; 4148 } 4149 4150 /* 4151 * Try to free some space for qgroup. 4152 * 4153 * For qgroup, there are only 3 ways to free qgroup space: 4154 * - Flush nodatacow write 4155 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 4156 * In theory, we should only flush nodatacow inodes, but it's not yet 4157 * possible, so we need to flush the whole root. 4158 * 4159 * - Wait for ordered extents 4160 * When ordered extents are finished, their reserved metadata is finally 4161 * converted to per_trans status, which can be freed by later commit 4162 * transaction. 4163 * 4164 * - Commit transaction 4165 * This would free the meta_per_trans space. 4166 * In theory this shouldn't provide much space, but any more qgroup space 4167 * is needed. 4168 */ 4169 static int try_flush_qgroup(struct btrfs_root *root) 4170 { 4171 int ret; 4172 4173 /* Can't hold an open transaction or we run the risk of deadlocking. */ 4174 ASSERT(current->journal_info == NULL); 4175 if (WARN_ON(current->journal_info)) 4176 return 0; 4177 4178 /* 4179 * We don't want to run flush again and again, so if there is a running 4180 * one, we won't try to start a new flush, but exit directly. 4181 */ 4182 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 4183 wait_event(root->qgroup_flush_wait, 4184 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 4185 return 0; 4186 } 4187 4188 btrfs_run_delayed_iputs(root->fs_info); 4189 btrfs_wait_on_delayed_iputs(root->fs_info); 4190 ret = btrfs_start_delalloc_snapshot(root, true); 4191 if (ret < 0) 4192 goto out; 4193 btrfs_wait_ordered_extents(root, U64_MAX, NULL); 4194 4195 ret = btrfs_commit_current_transaction(root); 4196 out: 4197 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 4198 wake_up(&root->qgroup_flush_wait); 4199 return ret; 4200 } 4201 4202 static int qgroup_reserve_data(struct btrfs_inode *inode, 4203 struct extent_changeset **reserved_ret, u64 start, 4204 u64 len) 4205 { 4206 struct btrfs_root *root = inode->root; 4207 struct extent_changeset *reserved; 4208 bool new_reserved = false; 4209 u64 orig_reserved; 4210 u64 to_reserve; 4211 int ret; 4212 4213 if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4214 !is_fstree(btrfs_root_id(root)) || len == 0) 4215 return 0; 4216 4217 /* @reserved parameter is mandatory for qgroup */ 4218 if (WARN_ON(!reserved_ret)) 4219 return -EINVAL; 4220 if (!*reserved_ret) { 4221 new_reserved = true; 4222 *reserved_ret = extent_changeset_alloc(); 4223 if (!*reserved_ret) 4224 return -ENOMEM; 4225 } 4226 reserved = *reserved_ret; 4227 /* Record already reserved space */ 4228 orig_reserved = reserved->bytes_changed; 4229 ret = set_record_extent_bits(&inode->io_tree, start, 4230 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 4231 4232 /* Newly reserved space */ 4233 to_reserve = reserved->bytes_changed - orig_reserved; 4234 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 4235 to_reserve, QGROUP_RESERVE); 4236 if (ret < 0) 4237 goto out; 4238 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 4239 if (ret < 0) 4240 goto cleanup; 4241 4242 return ret; 4243 4244 cleanup: 4245 qgroup_unreserve_range(inode, reserved, start, len); 4246 out: 4247 if (new_reserved) { 4248 extent_changeset_free(reserved); 4249 *reserved_ret = NULL; 4250 } 4251 return ret; 4252 } 4253 4254 /* 4255 * Reserve qgroup space for range [start, start + len). 4256 * 4257 * This function will either reserve space from related qgroups or do nothing 4258 * if the range is already reserved. 4259 * 4260 * Return 0 for successful reservation 4261 * Return <0 for error (including -EQUOT) 4262 * 4263 * NOTE: This function may sleep for memory allocation, dirty page flushing and 4264 * commit transaction. So caller should not hold any dirty page locked. 4265 */ 4266 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 4267 struct extent_changeset **reserved_ret, u64 start, 4268 u64 len) 4269 { 4270 int ret; 4271 4272 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 4273 if (ret <= 0 && ret != -EDQUOT) 4274 return ret; 4275 4276 ret = try_flush_qgroup(inode->root); 4277 if (ret < 0) 4278 return ret; 4279 return qgroup_reserve_data(inode, reserved_ret, start, len); 4280 } 4281 4282 /* Free ranges specified by @reserved, normally in error path */ 4283 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 4284 struct extent_changeset *reserved, 4285 u64 start, u64 len, u64 *freed_ret) 4286 { 4287 struct btrfs_root *root = inode->root; 4288 struct ulist_node *unode; 4289 struct ulist_iterator uiter; 4290 struct extent_changeset changeset; 4291 u64 freed = 0; 4292 int ret; 4293 4294 extent_changeset_init(&changeset); 4295 len = round_up(start + len, root->fs_info->sectorsize); 4296 start = round_down(start, root->fs_info->sectorsize); 4297 4298 ULIST_ITER_INIT(&uiter); 4299 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 4300 u64 range_start = unode->val; 4301 /* unode->aux is the inclusive end */ 4302 u64 range_len = unode->aux - range_start + 1; 4303 u64 free_start; 4304 u64 free_len; 4305 4306 extent_changeset_release(&changeset); 4307 4308 /* Only free range in range [start, start + len) */ 4309 if (range_start >= start + len || 4310 range_start + range_len <= start) 4311 continue; 4312 free_start = max(range_start, start); 4313 free_len = min(start + len, range_start + range_len) - 4314 free_start; 4315 /* 4316 * TODO: To also modify reserved->ranges_reserved to reflect 4317 * the modification. 4318 * 4319 * However as long as we free qgroup reserved according to 4320 * EXTENT_QGROUP_RESERVED, we won't double free. 4321 * So not need to rush. 4322 */ 4323 ret = clear_record_extent_bits(&inode->io_tree, free_start, 4324 free_start + free_len - 1, 4325 EXTENT_QGROUP_RESERVED, &changeset); 4326 if (ret < 0) 4327 goto out; 4328 freed += changeset.bytes_changed; 4329 } 4330 btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed, 4331 BTRFS_QGROUP_RSV_DATA); 4332 if (freed_ret) 4333 *freed_ret = freed; 4334 ret = 0; 4335 out: 4336 extent_changeset_release(&changeset); 4337 return ret; 4338 } 4339 4340 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 4341 struct extent_changeset *reserved, u64 start, u64 len, 4342 u64 *released, int free) 4343 { 4344 struct extent_changeset changeset; 4345 int trace_op = QGROUP_RELEASE; 4346 int ret; 4347 4348 if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 4349 return clear_record_extent_bits(&inode->io_tree, start, 4350 start + len - 1, 4351 EXTENT_QGROUP_RESERVED, NULL); 4352 } 4353 4354 /* In release case, we shouldn't have @reserved */ 4355 WARN_ON(!free && reserved); 4356 if (free && reserved) 4357 return qgroup_free_reserved_data(inode, reserved, start, len, released); 4358 extent_changeset_init(&changeset); 4359 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, 4360 EXTENT_QGROUP_RESERVED, &changeset); 4361 if (ret < 0) 4362 goto out; 4363 4364 if (free) 4365 trace_op = QGROUP_FREE; 4366 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 4367 changeset.bytes_changed, trace_op); 4368 if (free) 4369 btrfs_qgroup_free_refroot(inode->root->fs_info, 4370 btrfs_root_id(inode->root), 4371 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4372 if (released) 4373 *released = changeset.bytes_changed; 4374 out: 4375 extent_changeset_release(&changeset); 4376 return ret; 4377 } 4378 4379 /* 4380 * Free a reserved space range from io_tree and related qgroups 4381 * 4382 * Should be called when a range of pages get invalidated before reaching disk. 4383 * Or for error cleanup case. 4384 * if @reserved is given, only reserved range in [@start, @start + @len) will 4385 * be freed. 4386 * 4387 * For data written to disk, use btrfs_qgroup_release_data(). 4388 * 4389 * NOTE: This function may sleep for memory allocation. 4390 */ 4391 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 4392 struct extent_changeset *reserved, 4393 u64 start, u64 len, u64 *freed) 4394 { 4395 return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1); 4396 } 4397 4398 /* 4399 * Release a reserved space range from io_tree only. 4400 * 4401 * Should be called when a range of pages get written to disk and corresponding 4402 * FILE_EXTENT is inserted into corresponding root. 4403 * 4404 * Since new qgroup accounting framework will only update qgroup numbers at 4405 * commit_transaction() time, its reserved space shouldn't be freed from 4406 * related qgroups. 4407 * 4408 * But we should release the range from io_tree, to allow further write to be 4409 * COWed. 4410 * 4411 * NOTE: This function may sleep for memory allocation. 4412 */ 4413 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released) 4414 { 4415 return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0); 4416 } 4417 4418 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4419 enum btrfs_qgroup_rsv_type type) 4420 { 4421 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4422 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4423 return; 4424 if (num_bytes == 0) 4425 return; 4426 4427 spin_lock(&root->qgroup_meta_rsv_lock); 4428 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 4429 root->qgroup_meta_rsv_prealloc += num_bytes; 4430 else 4431 root->qgroup_meta_rsv_pertrans += num_bytes; 4432 spin_unlock(&root->qgroup_meta_rsv_lock); 4433 } 4434 4435 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4436 enum btrfs_qgroup_rsv_type type) 4437 { 4438 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4439 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4440 return 0; 4441 if (num_bytes == 0) 4442 return 0; 4443 4444 spin_lock(&root->qgroup_meta_rsv_lock); 4445 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 4446 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 4447 num_bytes); 4448 root->qgroup_meta_rsv_prealloc -= num_bytes; 4449 } else { 4450 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 4451 num_bytes); 4452 root->qgroup_meta_rsv_pertrans -= num_bytes; 4453 } 4454 spin_unlock(&root->qgroup_meta_rsv_lock); 4455 return num_bytes; 4456 } 4457 4458 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4459 enum btrfs_qgroup_rsv_type type, bool enforce) 4460 { 4461 struct btrfs_fs_info *fs_info = root->fs_info; 4462 int ret; 4463 4464 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4465 !is_fstree(btrfs_root_id(root)) || num_bytes == 0) 4466 return 0; 4467 4468 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4469 trace_qgroup_meta_reserve(root, (s64)num_bytes, type); 4470 ret = qgroup_reserve(root, num_bytes, enforce, type); 4471 if (ret < 0) 4472 return ret; 4473 /* 4474 * Record what we have reserved into root. 4475 * 4476 * To avoid quota disabled->enabled underflow. 4477 * In that case, we may try to free space we haven't reserved 4478 * (since quota was disabled), so record what we reserved into root. 4479 * And ensure later release won't underflow this number. 4480 */ 4481 add_root_meta_rsv(root, num_bytes, type); 4482 return ret; 4483 } 4484 4485 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4486 enum btrfs_qgroup_rsv_type type, bool enforce, 4487 bool noflush) 4488 { 4489 int ret; 4490 4491 ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4492 if ((ret <= 0 && ret != -EDQUOT) || noflush) 4493 return ret; 4494 4495 ret = try_flush_qgroup(root); 4496 if (ret < 0) 4497 return ret; 4498 return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4499 } 4500 4501 /* 4502 * Per-transaction meta reservation should be all freed at transaction commit 4503 * time 4504 */ 4505 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 4506 { 4507 struct btrfs_fs_info *fs_info = root->fs_info; 4508 4509 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4510 !is_fstree(btrfs_root_id(root))) 4511 return; 4512 4513 /* TODO: Update trace point to handle such free */ 4514 trace_qgroup_meta_free_all_pertrans(root); 4515 /* Special value -1 means to free all reserved space */ 4516 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1, 4517 BTRFS_QGROUP_RSV_META_PERTRANS); 4518 } 4519 4520 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 4521 enum btrfs_qgroup_rsv_type type) 4522 { 4523 struct btrfs_fs_info *fs_info = root->fs_info; 4524 4525 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4526 !is_fstree(btrfs_root_id(root))) 4527 return; 4528 4529 /* 4530 * reservation for META_PREALLOC can happen before quota is enabled, 4531 * which can lead to underflow. 4532 * Here ensure we will only free what we really have reserved. 4533 */ 4534 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 4535 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4536 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); 4537 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type); 4538 } 4539 4540 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 4541 int num_bytes) 4542 { 4543 struct btrfs_qgroup *qgroup; 4544 LIST_HEAD(qgroup_list); 4545 4546 if (num_bytes == 0) 4547 return; 4548 if (!fs_info->quota_root) 4549 return; 4550 4551 spin_lock(&fs_info->qgroup_lock); 4552 qgroup = find_qgroup_rb(fs_info, ref_root); 4553 if (!qgroup) 4554 goto out; 4555 4556 qgroup_iterator_add(&qgroup_list, qgroup); 4557 list_for_each_entry(qgroup, &qgroup_list, iterator) { 4558 struct btrfs_qgroup_list *glist; 4559 4560 qgroup_rsv_release(fs_info, qgroup, num_bytes, 4561 BTRFS_QGROUP_RSV_META_PREALLOC); 4562 if (!sb_rdonly(fs_info->sb)) 4563 qgroup_rsv_add(fs_info, qgroup, num_bytes, 4564 BTRFS_QGROUP_RSV_META_PERTRANS); 4565 4566 list_for_each_entry(glist, &qgroup->groups, next_group) 4567 qgroup_iterator_add(&qgroup_list, glist->group); 4568 } 4569 out: 4570 qgroup_iterator_clean(&qgroup_list); 4571 spin_unlock(&fs_info->qgroup_lock); 4572 } 4573 4574 /* 4575 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS. 4576 * 4577 * This is called when preallocated meta reservation needs to be used. 4578 * Normally after btrfs_join_transaction() call. 4579 */ 4580 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 4581 { 4582 struct btrfs_fs_info *fs_info = root->fs_info; 4583 4584 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4585 !is_fstree(btrfs_root_id(root))) 4586 return; 4587 /* Same as btrfs_qgroup_free_meta_prealloc() */ 4588 num_bytes = sub_root_meta_rsv(root, num_bytes, 4589 BTRFS_QGROUP_RSV_META_PREALLOC); 4590 trace_qgroup_meta_convert(root, num_bytes); 4591 qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes); 4592 if (!sb_rdonly(fs_info->sb)) 4593 add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS); 4594 } 4595 4596 /* 4597 * Check qgroup reserved space leaking, normally at destroy inode 4598 * time 4599 */ 4600 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 4601 { 4602 struct extent_changeset changeset; 4603 struct ulist_node *unode; 4604 struct ulist_iterator iter; 4605 int ret; 4606 4607 extent_changeset_init(&changeset); 4608 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 4609 EXTENT_QGROUP_RESERVED, &changeset); 4610 4611 WARN_ON(ret < 0); 4612 if (WARN_ON(changeset.bytes_changed)) { 4613 ULIST_ITER_INIT(&iter); 4614 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 4615 btrfs_warn(inode->root->fs_info, 4616 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 4617 btrfs_ino(inode), unode->val, unode->aux); 4618 } 4619 btrfs_qgroup_free_refroot(inode->root->fs_info, 4620 btrfs_root_id(inode->root), 4621 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4622 4623 } 4624 extent_changeset_release(&changeset); 4625 } 4626 4627 void btrfs_qgroup_init_swapped_blocks( 4628 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 4629 { 4630 int i; 4631 4632 spin_lock_init(&swapped_blocks->lock); 4633 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 4634 swapped_blocks->blocks[i] = RB_ROOT; 4635 swapped_blocks->swapped = false; 4636 } 4637 4638 /* 4639 * Delete all swapped blocks record of @root. 4640 * Every record here means we skipped a full subtree scan for qgroup. 4641 * 4642 * Gets called when committing one transaction. 4643 */ 4644 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 4645 { 4646 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 4647 int i; 4648 4649 swapped_blocks = &root->swapped_blocks; 4650 4651 spin_lock(&swapped_blocks->lock); 4652 if (!swapped_blocks->swapped) 4653 goto out; 4654 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4655 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 4656 struct btrfs_qgroup_swapped_block *entry; 4657 struct btrfs_qgroup_swapped_block *next; 4658 4659 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 4660 node) 4661 kfree(entry); 4662 swapped_blocks->blocks[i] = RB_ROOT; 4663 } 4664 swapped_blocks->swapped = false; 4665 out: 4666 spin_unlock(&swapped_blocks->lock); 4667 } 4668 4669 /* 4670 * Add subtree roots record into @subvol_root. 4671 * 4672 * @subvol_root: tree root of the subvolume tree get swapped 4673 * @bg: block group under balance 4674 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4675 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4676 * BOTH POINTERS ARE BEFORE TREE SWAP 4677 * @last_snapshot: last snapshot generation of the subvolume tree 4678 */ 4679 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, 4680 struct btrfs_root *subvol_root, 4681 struct btrfs_block_group *bg, 4682 struct extent_buffer *subvol_parent, int subvol_slot, 4683 struct extent_buffer *reloc_parent, int reloc_slot, 4684 u64 last_snapshot) 4685 { 4686 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4687 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4688 struct btrfs_qgroup_swapped_block *block; 4689 struct rb_node **cur; 4690 struct rb_node *parent = NULL; 4691 int level = btrfs_header_level(subvol_parent) - 1; 4692 int ret = 0; 4693 4694 if (!btrfs_qgroup_full_accounting(fs_info)) 4695 return 0; 4696 4697 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4698 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { 4699 btrfs_err_rl(fs_info, 4700 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4701 __func__, 4702 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4703 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4704 return -EUCLEAN; 4705 } 4706 4707 block = kmalloc(sizeof(*block), GFP_NOFS); 4708 if (!block) { 4709 ret = -ENOMEM; 4710 goto out; 4711 } 4712 4713 /* 4714 * @reloc_parent/slot is still before swap, while @block is going to 4715 * record the bytenr after swap, so we do the swap here. 4716 */ 4717 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4718 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4719 reloc_slot); 4720 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4721 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4722 subvol_slot); 4723 block->last_snapshot = last_snapshot; 4724 block->level = level; 4725 4726 /* 4727 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4728 * no one else can modify tree blocks thus we qgroup will not change 4729 * no matter the value of trace_leaf. 4730 */ 4731 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4732 block->trace_leaf = true; 4733 else 4734 block->trace_leaf = false; 4735 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4736 4737 /* Insert @block into @blocks */ 4738 spin_lock(&blocks->lock); 4739 cur = &blocks->blocks[level].rb_node; 4740 while (*cur) { 4741 struct btrfs_qgroup_swapped_block *entry; 4742 4743 parent = *cur; 4744 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, 4745 node); 4746 4747 if (entry->subvol_bytenr < block->subvol_bytenr) { 4748 cur = &(*cur)->rb_left; 4749 } else if (entry->subvol_bytenr > block->subvol_bytenr) { 4750 cur = &(*cur)->rb_right; 4751 } else { 4752 if (entry->subvol_generation != 4753 block->subvol_generation || 4754 entry->reloc_bytenr != block->reloc_bytenr || 4755 entry->reloc_generation != 4756 block->reloc_generation) { 4757 /* 4758 * Duplicated but mismatch entry found. 4759 * Shouldn't happen. 4760 * 4761 * Marking qgroup inconsistent should be enough 4762 * for end users. 4763 */ 4764 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4765 ret = -EEXIST; 4766 } 4767 kfree(block); 4768 goto out_unlock; 4769 } 4770 } 4771 rb_link_node(&block->node, parent, cur); 4772 rb_insert_color(&block->node, &blocks->blocks[level]); 4773 blocks->swapped = true; 4774 out_unlock: 4775 spin_unlock(&blocks->lock); 4776 out: 4777 if (ret < 0) 4778 qgroup_mark_inconsistent(fs_info); 4779 return ret; 4780 } 4781 4782 /* 4783 * Check if the tree block is a subtree root, and if so do the needed 4784 * delayed subtree trace for qgroup. 4785 * 4786 * This is called during btrfs_cow_block(). 4787 */ 4788 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4789 struct btrfs_root *root, 4790 struct extent_buffer *subvol_eb) 4791 { 4792 struct btrfs_fs_info *fs_info = root->fs_info; 4793 struct btrfs_tree_parent_check check = { 0 }; 4794 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4795 struct btrfs_qgroup_swapped_block *block; 4796 struct extent_buffer *reloc_eb = NULL; 4797 struct rb_node *node; 4798 bool found = false; 4799 bool swapped = false; 4800 int level = btrfs_header_level(subvol_eb); 4801 int ret = 0; 4802 int i; 4803 4804 if (!btrfs_qgroup_full_accounting(fs_info)) 4805 return 0; 4806 if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root) 4807 return 0; 4808 4809 spin_lock(&blocks->lock); 4810 if (!blocks->swapped) { 4811 spin_unlock(&blocks->lock); 4812 return 0; 4813 } 4814 node = blocks->blocks[level].rb_node; 4815 4816 while (node) { 4817 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4818 if (block->subvol_bytenr < subvol_eb->start) { 4819 node = node->rb_left; 4820 } else if (block->subvol_bytenr > subvol_eb->start) { 4821 node = node->rb_right; 4822 } else { 4823 found = true; 4824 break; 4825 } 4826 } 4827 if (!found) { 4828 spin_unlock(&blocks->lock); 4829 goto out; 4830 } 4831 /* Found one, remove it from @blocks first and update blocks->swapped */ 4832 rb_erase(&block->node, &blocks->blocks[level]); 4833 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4834 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4835 swapped = true; 4836 break; 4837 } 4838 } 4839 blocks->swapped = swapped; 4840 spin_unlock(&blocks->lock); 4841 4842 check.level = block->level; 4843 check.transid = block->reloc_generation; 4844 check.has_first_key = true; 4845 memcpy(&check.first_key, &block->first_key, sizeof(check.first_key)); 4846 4847 /* Read out reloc subtree root */ 4848 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check); 4849 if (IS_ERR(reloc_eb)) { 4850 ret = PTR_ERR(reloc_eb); 4851 reloc_eb = NULL; 4852 goto free_out; 4853 } 4854 if (!extent_buffer_uptodate(reloc_eb)) { 4855 ret = -EIO; 4856 goto free_out; 4857 } 4858 4859 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4860 block->last_snapshot, block->trace_leaf); 4861 free_out: 4862 kfree(block); 4863 free_extent_buffer(reloc_eb); 4864 out: 4865 if (ret < 0) { 4866 btrfs_err_rl(fs_info, 4867 "failed to account subtree at bytenr %llu: %d", 4868 subvol_eb->start, ret); 4869 qgroup_mark_inconsistent(fs_info); 4870 } 4871 return ret; 4872 } 4873 4874 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4875 { 4876 struct btrfs_qgroup_extent_record *entry; 4877 struct btrfs_qgroup_extent_record *next; 4878 struct rb_root *root; 4879 4880 root = &trans->delayed_refs.dirty_extent_root; 4881 rbtree_postorder_for_each_entry_safe(entry, next, root, node) { 4882 ulist_free(entry->old_roots); 4883 kfree(entry); 4884 } 4885 *root = RB_ROOT; 4886 } 4887 4888 void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes) 4889 { 4890 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) 4891 return; 4892 4893 if (!is_fstree(root)) 4894 return; 4895 4896 btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA); 4897 } 4898 4899 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, 4900 const struct btrfs_squota_delta *delta) 4901 { 4902 int ret; 4903 struct btrfs_qgroup *qgroup; 4904 struct btrfs_qgroup *qg; 4905 LIST_HEAD(qgroup_list); 4906 u64 root = delta->root; 4907 u64 num_bytes = delta->num_bytes; 4908 const int sign = (delta->is_inc ? 1 : -1); 4909 4910 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) 4911 return 0; 4912 4913 if (!is_fstree(root)) 4914 return 0; 4915 4916 /* If the extent predates enabling quotas, don't count it. */ 4917 if (delta->generation < fs_info->qgroup_enable_gen) 4918 return 0; 4919 4920 spin_lock(&fs_info->qgroup_lock); 4921 qgroup = find_qgroup_rb(fs_info, root); 4922 if (!qgroup) { 4923 ret = -ENOENT; 4924 goto out; 4925 } 4926 4927 ret = 0; 4928 qgroup_iterator_add(&qgroup_list, qgroup); 4929 list_for_each_entry(qg, &qgroup_list, iterator) { 4930 struct btrfs_qgroup_list *glist; 4931 4932 qg->excl += num_bytes * sign; 4933 qg->rfer += num_bytes * sign; 4934 qgroup_dirty(fs_info, qg); 4935 4936 list_for_each_entry(glist, &qg->groups, next_group) 4937 qgroup_iterator_add(&qgroup_list, glist->group); 4938 } 4939 qgroup_iterator_clean(&qgroup_list); 4940 4941 out: 4942 spin_unlock(&fs_info->qgroup_lock); 4943 return ret; 4944 } 4945