1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 #include <linux/sched/mm.h> 15 16 #include "ctree.h" 17 #include "transaction.h" 18 #include "disk-io.h" 19 #include "locking.h" 20 #include "ulist.h" 21 #include "backref.h" 22 #include "extent_io.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "sysfs.h" 26 #include "tree-mod-log.h" 27 #include "fs.h" 28 #include "accessors.h" 29 #include "extent-tree.h" 30 #include "root-tree.h" 31 #include "tree-checker.h" 32 33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info) 34 { 35 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 36 return BTRFS_QGROUP_MODE_DISABLED; 37 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) 38 return BTRFS_QGROUP_MODE_SIMPLE; 39 return BTRFS_QGROUP_MODE_FULL; 40 } 41 42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info) 43 { 44 return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; 45 } 46 47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info) 48 { 49 return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; 50 } 51 52 /* 53 * Helpers to access qgroup reservation 54 * 55 * Callers should ensure the lock context and type are valid 56 */ 57 58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 59 { 60 u64 ret = 0; 61 int i; 62 63 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 64 ret += qgroup->rsv.values[i]; 65 66 return ret; 67 } 68 69 #ifdef CONFIG_BTRFS_DEBUG 70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 71 { 72 if (type == BTRFS_QGROUP_RSV_DATA) 73 return "data"; 74 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 75 return "meta_pertrans"; 76 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 77 return "meta_prealloc"; 78 return NULL; 79 } 80 #endif 81 82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 83 struct btrfs_qgroup *qgroup, u64 num_bytes, 84 enum btrfs_qgroup_rsv_type type) 85 { 86 trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 87 qgroup->rsv.values[type] += num_bytes; 88 } 89 90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 91 struct btrfs_qgroup *qgroup, u64 num_bytes, 92 enum btrfs_qgroup_rsv_type type) 93 { 94 trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 95 if (qgroup->rsv.values[type] >= num_bytes) { 96 qgroup->rsv.values[type] -= num_bytes; 97 return; 98 } 99 #ifdef CONFIG_BTRFS_DEBUG 100 WARN_RATELIMIT(1, 101 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 102 qgroup->qgroupid, qgroup_rsv_type_str(type), 103 qgroup->rsv.values[type], num_bytes); 104 #endif 105 qgroup->rsv.values[type] = 0; 106 } 107 108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 109 struct btrfs_qgroup *dest, 110 const struct btrfs_qgroup *src) 111 { 112 int i; 113 114 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 115 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 116 } 117 118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 119 struct btrfs_qgroup *dest, 120 const struct btrfs_qgroup *src) 121 { 122 int i; 123 124 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 125 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 126 } 127 128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 129 int mod) 130 { 131 if (qg->old_refcnt < seq) 132 qg->old_refcnt = seq; 133 qg->old_refcnt += mod; 134 } 135 136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 137 int mod) 138 { 139 if (qg->new_refcnt < seq) 140 qg->new_refcnt = seq; 141 qg->new_refcnt += mod; 142 } 143 144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq) 145 { 146 if (qg->old_refcnt < seq) 147 return 0; 148 return qg->old_refcnt - seq; 149 } 150 151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq) 152 { 153 if (qg->new_refcnt < seq) 154 return 0; 155 return qg->new_refcnt - seq; 156 } 157 158 static int 159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 160 int init_flags); 161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 162 163 static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node) 164 { 165 const u64 *qgroupid = key; 166 const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node); 167 168 if (qgroup->qgroupid < *qgroupid) 169 return -1; 170 else if (qgroup->qgroupid > *qgroupid) 171 return 1; 172 173 return 0; 174 } 175 176 /* must be called with qgroup_ioctl_lock held */ 177 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info, 178 u64 qgroupid) 179 { 180 struct rb_node *node; 181 182 node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp); 183 return rb_entry_safe(node, struct btrfs_qgroup, node); 184 } 185 186 static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing) 187 { 188 const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node); 189 190 return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing); 191 } 192 193 /* 194 * Add qgroup to the filesystem's qgroup tree. 195 * 196 * Must be called with qgroup_lock held and @prealloc preallocated. 197 * 198 * The control on the lifespan of @prealloc would be transferred to this 199 * function, thus caller should no longer touch @prealloc. 200 */ 201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 202 struct btrfs_qgroup *prealloc, 203 u64 qgroupid) 204 { 205 struct rb_node *node; 206 207 /* Caller must have pre-allocated @prealloc. */ 208 ASSERT(prealloc); 209 210 prealloc->qgroupid = qgroupid; 211 node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp); 212 if (node) { 213 kfree(prealloc); 214 return rb_entry(node, struct btrfs_qgroup, node); 215 } 216 217 INIT_LIST_HEAD(&prealloc->groups); 218 INIT_LIST_HEAD(&prealloc->members); 219 INIT_LIST_HEAD(&prealloc->dirty); 220 INIT_LIST_HEAD(&prealloc->iterator); 221 INIT_LIST_HEAD(&prealloc->nested_iterator); 222 223 return prealloc; 224 } 225 226 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) 227 { 228 struct btrfs_qgroup_list *list; 229 230 list_del(&qgroup->dirty); 231 while (!list_empty(&qgroup->groups)) { 232 list = list_first_entry(&qgroup->groups, 233 struct btrfs_qgroup_list, next_group); 234 list_del(&list->next_group); 235 list_del(&list->next_member); 236 kfree(list); 237 } 238 239 while (!list_empty(&qgroup->members)) { 240 list = list_first_entry(&qgroup->members, 241 struct btrfs_qgroup_list, next_member); 242 list_del(&list->next_group); 243 list_del(&list->next_member); 244 kfree(list); 245 } 246 } 247 248 /* must be called with qgroup_lock held */ 249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 250 { 251 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 252 253 if (!qgroup) 254 return -ENOENT; 255 256 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 257 __del_qgroup_rb(qgroup); 258 return 0; 259 } 260 261 /* 262 * Add relation specified by two qgroups. 263 * 264 * Must be called with qgroup_lock held, the ownership of @prealloc is 265 * transferred to this function and caller should not touch it anymore. 266 * 267 * Return: 0 on success 268 * -ENOENT if one of the qgroups is NULL 269 * <0 other errors 270 */ 271 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc, 272 struct btrfs_qgroup *member, 273 struct btrfs_qgroup *parent) 274 { 275 if (!member || !parent) { 276 kfree(prealloc); 277 return -ENOENT; 278 } 279 280 prealloc->group = parent; 281 prealloc->member = member; 282 list_add_tail(&prealloc->next_group, &member->groups); 283 list_add_tail(&prealloc->next_member, &parent->members); 284 285 return 0; 286 } 287 288 /* 289 * Add relation specified by two qgroup ids. 290 * 291 * Must be called with qgroup_lock held. 292 * 293 * Return: 0 on success 294 * -ENOENT if one of the ids does not exist 295 * <0 other errors 296 */ 297 static int add_relation_rb(struct btrfs_fs_info *fs_info, 298 struct btrfs_qgroup_list *prealloc, 299 u64 memberid, u64 parentid) 300 { 301 struct btrfs_qgroup *member; 302 struct btrfs_qgroup *parent; 303 304 member = find_qgroup_rb(fs_info, memberid); 305 parent = find_qgroup_rb(fs_info, parentid); 306 307 return __add_relation_rb(prealloc, member, parent); 308 } 309 310 /* Must be called with qgroup_lock held */ 311 static int del_relation_rb(struct btrfs_fs_info *fs_info, 312 u64 memberid, u64 parentid) 313 { 314 struct btrfs_qgroup *member; 315 struct btrfs_qgroup *parent; 316 struct btrfs_qgroup_list *list; 317 318 member = find_qgroup_rb(fs_info, memberid); 319 parent = find_qgroup_rb(fs_info, parentid); 320 if (!member || !parent) 321 return -ENOENT; 322 323 list_for_each_entry(list, &member->groups, next_group) { 324 if (list->group == parent) { 325 list_del(&list->next_group); 326 list_del(&list->next_member); 327 kfree(list); 328 return 0; 329 } 330 } 331 return -ENOENT; 332 } 333 334 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 335 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid, 336 u64 rfer, u64 excl) 337 { 338 struct btrfs_qgroup *qgroup; 339 340 qgroup = find_qgroup_rb(fs_info, qgroupid); 341 if (!qgroup) 342 return -EINVAL; 343 if (qgroup->rfer != rfer || qgroup->excl != excl) 344 return -EINVAL; 345 return 0; 346 } 347 #endif 348 349 static bool squota_check_parent_usage(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *parent) 350 { 351 u64 excl_sum = 0; 352 u64 rfer_sum = 0; 353 u64 excl_cmpr_sum = 0; 354 u64 rfer_cmpr_sum = 0; 355 struct btrfs_qgroup_list *glist; 356 int nr_members = 0; 357 bool mismatch; 358 359 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) 360 return false; 361 if (btrfs_qgroup_level(parent->qgroupid) == 0) 362 return false; 363 364 /* Eligible parent qgroup. Squota; level > 0; empty members list. */ 365 list_for_each_entry(glist, &parent->members, next_member) { 366 excl_sum += glist->member->excl; 367 rfer_sum += glist->member->rfer; 368 excl_cmpr_sum += glist->member->excl_cmpr; 369 rfer_cmpr_sum += glist->member->rfer_cmpr; 370 nr_members++; 371 } 372 mismatch = (parent->excl != excl_sum || parent->rfer != rfer_sum || 373 parent->excl_cmpr != excl_cmpr_sum || parent->rfer_cmpr != excl_cmpr_sum); 374 375 WARN(mismatch, 376 "parent squota qgroup %hu/%llu has mismatched usage from its %d members. " 377 "%llu %llu %llu %llu vs %llu %llu %llu %llu\n", 378 btrfs_qgroup_level(parent->qgroupid), 379 btrfs_qgroup_subvolid(parent->qgroupid), nr_members, parent->excl, 380 parent->rfer, parent->excl_cmpr, parent->rfer_cmpr, excl_sum, 381 rfer_sum, excl_cmpr_sum, rfer_cmpr_sum); 382 return mismatch; 383 } 384 385 __printf(2, 3) 386 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...) 387 { 388 const u64 old_flags = fs_info->qgroup_flags; 389 390 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 391 return; 392 fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | 393 BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 394 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 395 if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { 396 struct va_format vaf; 397 va_list args; 398 399 va_start(args, fmt); 400 vaf.fmt = fmt; 401 vaf.va = &args; 402 403 btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf); 404 va_end(args); 405 } 406 } 407 408 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, 409 struct extent_buffer *leaf, int slot, 410 struct btrfs_qgroup_status_item *ptr) 411 { 412 ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); 413 ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr)); 414 fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr); 415 } 416 417 /* 418 * The full config is read in one go, only called from open_ctree() 419 * It doesn't use any locking, as at this point we're still single-threaded 420 */ 421 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 422 { 423 struct btrfs_key key; 424 struct btrfs_key found_key; 425 struct btrfs_root *quota_root = fs_info->quota_root; 426 struct btrfs_path *path = NULL; 427 struct extent_buffer *l; 428 int slot; 429 int ret = 0; 430 u64 flags = 0; 431 u64 rescan_progress = 0; 432 433 if (!fs_info->quota_root) 434 return 0; 435 436 path = btrfs_alloc_path(); 437 if (!path) { 438 ret = -ENOMEM; 439 goto out; 440 } 441 442 ret = btrfs_sysfs_add_qgroups(fs_info); 443 if (ret < 0) 444 goto out; 445 /* default this to quota off, in case no status key is found */ 446 fs_info->qgroup_flags = 0; 447 448 /* 449 * pass 1: read status, all qgroup infos and limits 450 */ 451 key.objectid = 0; 452 key.type = 0; 453 key.offset = 0; 454 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 455 if (ret) 456 goto out; 457 458 while (1) { 459 struct btrfs_qgroup *qgroup; 460 461 slot = path->slots[0]; 462 l = path->nodes[0]; 463 btrfs_item_key_to_cpu(l, &found_key, slot); 464 465 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 466 struct btrfs_qgroup_status_item *ptr; 467 468 ptr = btrfs_item_ptr(l, slot, 469 struct btrfs_qgroup_status_item); 470 471 if (btrfs_qgroup_status_version(l, ptr) != 472 BTRFS_QGROUP_STATUS_VERSION) { 473 btrfs_err(fs_info, 474 "old qgroup version, quota disabled"); 475 goto out; 476 } 477 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); 478 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) 479 qgroup_read_enable_gen(fs_info, l, slot, ptr); 480 else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) 481 qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch"); 482 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 483 goto next1; 484 } 485 486 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 487 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 488 goto next1; 489 490 qgroup = find_qgroup_rb(fs_info, found_key.offset); 491 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 492 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) 493 qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config"); 494 if (!qgroup) { 495 struct btrfs_qgroup *prealloc; 496 struct btrfs_root *tree_root = fs_info->tree_root; 497 498 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); 499 if (!prealloc) { 500 ret = -ENOMEM; 501 goto out; 502 } 503 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 504 /* 505 * If a qgroup exists for a subvolume ID, it is possible 506 * that subvolume has been deleted, in which case 507 * reusing that ID would lead to incorrect accounting. 508 * 509 * Ensure that we skip any such subvol ids. 510 * 511 * We don't need to lock because this is only called 512 * during mount before we start doing things like creating 513 * subvolumes. 514 */ 515 if (btrfs_is_fstree(qgroup->qgroupid) && 516 qgroup->qgroupid > tree_root->free_objectid) 517 /* 518 * Don't need to check against BTRFS_LAST_FREE_OBJECTID, 519 * as it will get checked on the next call to 520 * btrfs_get_free_objectid. 521 */ 522 tree_root->free_objectid = qgroup->qgroupid + 1; 523 } 524 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 525 if (ret < 0) 526 goto out; 527 528 switch (found_key.type) { 529 case BTRFS_QGROUP_INFO_KEY: { 530 struct btrfs_qgroup_info_item *ptr; 531 532 ptr = btrfs_item_ptr(l, slot, 533 struct btrfs_qgroup_info_item); 534 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 535 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 536 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 537 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 538 /* generation currently unused */ 539 break; 540 } 541 case BTRFS_QGROUP_LIMIT_KEY: { 542 struct btrfs_qgroup_limit_item *ptr; 543 544 ptr = btrfs_item_ptr(l, slot, 545 struct btrfs_qgroup_limit_item); 546 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 547 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 548 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 549 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 550 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 551 break; 552 } 553 } 554 next1: 555 ret = btrfs_next_item(quota_root, path); 556 if (ret < 0) 557 goto out; 558 if (ret) 559 break; 560 } 561 btrfs_release_path(path); 562 563 /* 564 * pass 2: read all qgroup relations 565 */ 566 key.objectid = 0; 567 key.type = BTRFS_QGROUP_RELATION_KEY; 568 key.offset = 0; 569 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 570 if (ret) 571 goto out; 572 while (1) { 573 struct btrfs_qgroup_list *list = NULL; 574 575 slot = path->slots[0]; 576 l = path->nodes[0]; 577 btrfs_item_key_to_cpu(l, &found_key, slot); 578 579 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 580 goto next2; 581 582 if (found_key.objectid > found_key.offset) { 583 /* parent <- member, not needed to build config */ 584 /* FIXME should we omit the key completely? */ 585 goto next2; 586 } 587 588 list = kzalloc(sizeof(*list), GFP_KERNEL); 589 if (!list) { 590 ret = -ENOMEM; 591 goto out; 592 } 593 ret = add_relation_rb(fs_info, list, found_key.objectid, 594 found_key.offset); 595 list = NULL; 596 if (ret == -ENOENT) { 597 btrfs_warn(fs_info, 598 "orphan qgroup relation 0x%llx->0x%llx", 599 found_key.objectid, found_key.offset); 600 ret = 0; /* ignore the error */ 601 } 602 if (ret) 603 goto out; 604 next2: 605 ret = btrfs_next_item(quota_root, path); 606 if (ret < 0) 607 goto out; 608 if (ret) 609 break; 610 } 611 out: 612 btrfs_free_path(path); 613 fs_info->qgroup_flags |= flags; 614 if (ret >= 0) { 615 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON) 616 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 617 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 618 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 619 } else { 620 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 621 btrfs_sysfs_del_qgroups(fs_info); 622 } 623 624 return ret < 0 ? ret : 0; 625 } 626 627 /* 628 * Called in close_ctree() when quota is still enabled. This verifies we don't 629 * leak some reserved space. 630 * 631 * Return false if no reserved space is left. 632 * Return true if some reserved space is leaked. 633 */ 634 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info) 635 { 636 struct rb_node *node; 637 bool ret = false; 638 639 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) 640 return ret; 641 /* 642 * Since we're unmounting, there is no race and no need to grab qgroup 643 * lock. And here we don't go post-order to provide a more user 644 * friendly sorted result. 645 */ 646 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 647 struct btrfs_qgroup *qgroup; 648 int i; 649 650 qgroup = rb_entry(node, struct btrfs_qgroup, node); 651 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 652 if (qgroup->rsv.values[i]) { 653 ret = true; 654 btrfs_warn(fs_info, 655 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 656 btrfs_qgroup_level(qgroup->qgroupid), 657 btrfs_qgroup_subvolid(qgroup->qgroupid), 658 i, qgroup->rsv.values[i]); 659 } 660 } 661 } 662 return ret; 663 } 664 665 /* 666 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 667 * first two are in single-threaded paths. 668 */ 669 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 670 { 671 struct rb_node *n; 672 struct btrfs_qgroup *qgroup; 673 674 /* 675 * btrfs_quota_disable() can be called concurrently with 676 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the 677 * lock. 678 */ 679 spin_lock(&fs_info->qgroup_lock); 680 while ((n = rb_first(&fs_info->qgroup_tree))) { 681 qgroup = rb_entry(n, struct btrfs_qgroup, node); 682 rb_erase(n, &fs_info->qgroup_tree); 683 __del_qgroup_rb(qgroup); 684 spin_unlock(&fs_info->qgroup_lock); 685 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 686 kfree(qgroup); 687 spin_lock(&fs_info->qgroup_lock); 688 } 689 spin_unlock(&fs_info->qgroup_lock); 690 691 btrfs_sysfs_del_qgroups(fs_info); 692 } 693 694 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 695 u64 dst) 696 { 697 struct btrfs_root *quota_root = trans->fs_info->quota_root; 698 BTRFS_PATH_AUTO_FREE(path); 699 struct btrfs_key key; 700 701 path = btrfs_alloc_path(); 702 if (!path) 703 return -ENOMEM; 704 705 key.objectid = src; 706 key.type = BTRFS_QGROUP_RELATION_KEY; 707 key.offset = dst; 708 709 return btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 710 } 711 712 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 713 u64 dst) 714 { 715 int ret; 716 struct btrfs_root *quota_root = trans->fs_info->quota_root; 717 BTRFS_PATH_AUTO_FREE(path); 718 struct btrfs_key key; 719 720 path = btrfs_alloc_path(); 721 if (!path) 722 return -ENOMEM; 723 724 key.objectid = src; 725 key.type = BTRFS_QGROUP_RELATION_KEY; 726 key.offset = dst; 727 728 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 729 if (ret < 0) 730 return ret; 731 732 if (ret > 0) 733 return -ENOENT; 734 735 return btrfs_del_item(trans, quota_root, path); 736 } 737 738 static int add_qgroup_item(struct btrfs_trans_handle *trans, 739 struct btrfs_root *quota_root, u64 qgroupid) 740 { 741 int ret; 742 BTRFS_PATH_AUTO_FREE(path); 743 struct btrfs_qgroup_info_item *qgroup_info; 744 struct btrfs_qgroup_limit_item *qgroup_limit; 745 struct extent_buffer *leaf; 746 struct btrfs_key key; 747 748 if (btrfs_is_testing(quota_root->fs_info)) 749 return 0; 750 751 path = btrfs_alloc_path(); 752 if (!path) 753 return -ENOMEM; 754 755 key.objectid = 0; 756 key.type = BTRFS_QGROUP_INFO_KEY; 757 key.offset = qgroupid; 758 759 /* 760 * Avoid a transaction abort by catching -EEXIST here. In that 761 * case, we proceed by re-initializing the existing structure 762 * on disk. 763 */ 764 765 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 766 sizeof(*qgroup_info)); 767 if (ret && ret != -EEXIST) 768 return ret; 769 770 leaf = path->nodes[0]; 771 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 772 struct btrfs_qgroup_info_item); 773 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 774 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 775 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 776 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 777 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 778 779 btrfs_release_path(path); 780 781 key.type = BTRFS_QGROUP_LIMIT_KEY; 782 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 783 sizeof(*qgroup_limit)); 784 if (ret && ret != -EEXIST) 785 return ret; 786 787 leaf = path->nodes[0]; 788 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 789 struct btrfs_qgroup_limit_item); 790 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 791 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 792 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 793 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 794 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 795 796 return 0; 797 } 798 799 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 800 { 801 int ret; 802 struct btrfs_root *quota_root = trans->fs_info->quota_root; 803 BTRFS_PATH_AUTO_FREE(path); 804 struct btrfs_key key; 805 806 path = btrfs_alloc_path(); 807 if (!path) 808 return -ENOMEM; 809 810 key.objectid = 0; 811 key.type = BTRFS_QGROUP_INFO_KEY; 812 key.offset = qgroupid; 813 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 814 if (ret < 0) 815 return ret; 816 817 if (ret > 0) 818 return -ENOENT; 819 820 ret = btrfs_del_item(trans, quota_root, path); 821 if (ret) 822 return ret; 823 824 btrfs_release_path(path); 825 826 key.type = BTRFS_QGROUP_LIMIT_KEY; 827 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 828 if (ret < 0) 829 return ret; 830 831 if (ret > 0) 832 return -ENOENT; 833 834 return btrfs_del_item(trans, quota_root, path); 835 } 836 837 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 838 struct btrfs_qgroup *qgroup) 839 { 840 struct btrfs_root *quota_root = trans->fs_info->quota_root; 841 BTRFS_PATH_AUTO_FREE(path); 842 struct btrfs_key key; 843 struct extent_buffer *l; 844 struct btrfs_qgroup_limit_item *qgroup_limit; 845 int ret; 846 int slot; 847 848 key.objectid = 0; 849 key.type = BTRFS_QGROUP_LIMIT_KEY; 850 key.offset = qgroup->qgroupid; 851 852 path = btrfs_alloc_path(); 853 if (!path) 854 return -ENOMEM; 855 856 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 857 if (ret > 0) 858 ret = -ENOENT; 859 860 if (ret) 861 return ret; 862 863 l = path->nodes[0]; 864 slot = path->slots[0]; 865 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 866 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 867 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 868 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 869 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 870 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 871 872 return ret; 873 } 874 875 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 876 struct btrfs_qgroup *qgroup) 877 { 878 struct btrfs_fs_info *fs_info = trans->fs_info; 879 struct btrfs_root *quota_root = fs_info->quota_root; 880 BTRFS_PATH_AUTO_FREE(path); 881 struct btrfs_key key; 882 struct extent_buffer *l; 883 struct btrfs_qgroup_info_item *qgroup_info; 884 int ret; 885 int slot; 886 887 if (btrfs_is_testing(fs_info)) 888 return 0; 889 890 key.objectid = 0; 891 key.type = BTRFS_QGROUP_INFO_KEY; 892 key.offset = qgroup->qgroupid; 893 894 path = btrfs_alloc_path(); 895 if (!path) 896 return -ENOMEM; 897 898 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 899 if (ret > 0) 900 ret = -ENOENT; 901 902 if (ret) 903 return ret; 904 905 l = path->nodes[0]; 906 slot = path->slots[0]; 907 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 908 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 909 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 910 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 911 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 912 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 913 914 return ret; 915 } 916 917 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 918 { 919 struct btrfs_fs_info *fs_info = trans->fs_info; 920 struct btrfs_root *quota_root = fs_info->quota_root; 921 BTRFS_PATH_AUTO_FREE(path); 922 struct btrfs_key key; 923 struct extent_buffer *l; 924 struct btrfs_qgroup_status_item *ptr; 925 int ret; 926 int slot; 927 928 key.objectid = 0; 929 key.type = BTRFS_QGROUP_STATUS_KEY; 930 key.offset = 0; 931 932 path = btrfs_alloc_path(); 933 if (!path) 934 return -ENOMEM; 935 936 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 937 if (ret > 0) 938 ret = -ENOENT; 939 940 if (ret) 941 return ret; 942 943 l = path->nodes[0]; 944 slot = path->slots[0]; 945 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 946 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & 947 BTRFS_QGROUP_STATUS_FLAGS_MASK); 948 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 949 btrfs_set_qgroup_status_rescan(l, ptr, 950 fs_info->qgroup_rescan_progress.objectid); 951 952 return ret; 953 } 954 955 /* 956 * called with qgroup_lock held 957 */ 958 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 959 struct btrfs_root *root) 960 { 961 BTRFS_PATH_AUTO_FREE(path); 962 struct btrfs_key key; 963 struct extent_buffer *leaf = NULL; 964 int ret; 965 int nr = 0; 966 967 path = btrfs_alloc_path(); 968 if (!path) 969 return -ENOMEM; 970 971 key.objectid = 0; 972 key.type = 0; 973 key.offset = 0; 974 975 while (1) { 976 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 977 if (ret < 0) 978 return ret; 979 leaf = path->nodes[0]; 980 nr = btrfs_header_nritems(leaf); 981 if (!nr) 982 break; 983 /* 984 * delete the leaf one by one 985 * since the whole tree is going 986 * to be deleted. 987 */ 988 path->slots[0] = 0; 989 ret = btrfs_del_items(trans, root, path, 0, nr); 990 if (ret) 991 return ret; 992 993 btrfs_release_path(path); 994 } 995 996 return 0; 997 } 998 999 int btrfs_quota_enable(struct btrfs_fs_info *fs_info, 1000 struct btrfs_ioctl_quota_ctl_args *quota_ctl_args) 1001 { 1002 struct btrfs_root *quota_root; 1003 struct btrfs_root *tree_root = fs_info->tree_root; 1004 struct btrfs_path *path = NULL; 1005 struct btrfs_qgroup_status_item *ptr; 1006 struct extent_buffer *leaf; 1007 struct btrfs_key key; 1008 struct btrfs_key found_key; 1009 struct btrfs_qgroup *qgroup = NULL; 1010 struct btrfs_qgroup *prealloc = NULL; 1011 struct btrfs_trans_handle *trans = NULL; 1012 const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); 1013 int ret = 0; 1014 int slot; 1015 1016 /* 1017 * We need to have subvol_sem write locked, to prevent races between 1018 * concurrent tasks trying to enable quotas, because we will unlock 1019 * and relock qgroup_ioctl_lock before setting fs_info->quota_root 1020 * and before setting BTRFS_FS_QUOTA_ENABLED. 1021 */ 1022 lockdep_assert_held_write(&fs_info->subvol_sem); 1023 1024 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 1025 btrfs_err(fs_info, 1026 "qgroups are currently unsupported in extent tree v2"); 1027 return -EINVAL; 1028 } 1029 1030 mutex_lock(&fs_info->qgroup_ioctl_lock); 1031 if (fs_info->quota_root) 1032 goto out; 1033 1034 ret = btrfs_sysfs_add_qgroups(fs_info); 1035 if (ret < 0) 1036 goto out; 1037 1038 /* 1039 * Unlock qgroup_ioctl_lock before starting the transaction. This is to 1040 * avoid lock acquisition inversion problems (reported by lockdep) between 1041 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we 1042 * start a transaction. 1043 * After we started the transaction lock qgroup_ioctl_lock again and 1044 * check if someone else created the quota root in the meanwhile. If so, 1045 * just return success and release the transaction handle. 1046 * 1047 * Also we don't need to worry about someone else calling 1048 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because 1049 * that function returns 0 (success) when the sysfs entries already exist. 1050 */ 1051 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1052 1053 /* 1054 * 1 for quota root item 1055 * 1 for BTRFS_QGROUP_STATUS item 1056 * 1057 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 1058 * per subvolume. However those are not currently reserved since it 1059 * would be a lot of overkill. 1060 */ 1061 trans = btrfs_start_transaction(tree_root, 2); 1062 1063 mutex_lock(&fs_info->qgroup_ioctl_lock); 1064 if (IS_ERR(trans)) { 1065 ret = PTR_ERR(trans); 1066 trans = NULL; 1067 goto out; 1068 } 1069 1070 if (fs_info->quota_root) 1071 goto out; 1072 1073 /* 1074 * initially create the quota tree 1075 */ 1076 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 1077 if (IS_ERR(quota_root)) { 1078 ret = PTR_ERR(quota_root); 1079 btrfs_abort_transaction(trans, ret); 1080 goto out; 1081 } 1082 1083 path = btrfs_alloc_path(); 1084 if (unlikely(!path)) { 1085 ret = -ENOMEM; 1086 btrfs_abort_transaction(trans, ret); 1087 goto out_free_root; 1088 } 1089 1090 key.objectid = 0; 1091 key.type = BTRFS_QGROUP_STATUS_KEY; 1092 key.offset = 0; 1093 1094 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 1095 sizeof(*ptr)); 1096 if (unlikely(ret)) { 1097 btrfs_abort_transaction(trans, ret); 1098 goto out_free_path; 1099 } 1100 1101 leaf = path->nodes[0]; 1102 ptr = btrfs_item_ptr(leaf, path->slots[0], 1103 struct btrfs_qgroup_status_item); 1104 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1105 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1106 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; 1107 if (simple) { 1108 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1109 btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); 1110 btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); 1111 } else { 1112 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1113 } 1114 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & 1115 BTRFS_QGROUP_STATUS_FLAGS_MASK); 1116 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1117 1118 key.objectid = 0; 1119 key.type = BTRFS_ROOT_REF_KEY; 1120 key.offset = 0; 1121 1122 btrfs_release_path(path); 1123 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1124 if (ret > 0) 1125 goto out_add_root; 1126 if (unlikely(ret < 0)) { 1127 btrfs_abort_transaction(trans, ret); 1128 goto out_free_path; 1129 } 1130 1131 while (1) { 1132 slot = path->slots[0]; 1133 leaf = path->nodes[0]; 1134 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1135 1136 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1137 1138 /* Release locks on tree_root before we access quota_root */ 1139 btrfs_release_path(path); 1140 1141 /* We should not have a stray @prealloc pointer. */ 1142 ASSERT(prealloc == NULL); 1143 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1144 if (unlikely(!prealloc)) { 1145 ret = -ENOMEM; 1146 btrfs_abort_transaction(trans, ret); 1147 goto out_free_path; 1148 } 1149 1150 ret = add_qgroup_item(trans, quota_root, 1151 found_key.offset); 1152 if (unlikely(ret)) { 1153 btrfs_abort_transaction(trans, ret); 1154 goto out_free_path; 1155 } 1156 1157 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 1158 prealloc = NULL; 1159 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1160 if (unlikely(ret < 0)) { 1161 btrfs_abort_transaction(trans, ret); 1162 goto out_free_path; 1163 } 1164 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1165 path, 1, 0); 1166 if (unlikely(ret < 0)) { 1167 btrfs_abort_transaction(trans, ret); 1168 goto out_free_path; 1169 } 1170 if (ret > 0) { 1171 /* 1172 * Shouldn't happen, but in case it does we 1173 * don't need to do the btrfs_next_item, just 1174 * continue. 1175 */ 1176 continue; 1177 } 1178 } 1179 ret = btrfs_next_item(tree_root, path); 1180 if (unlikely(ret < 0)) { 1181 btrfs_abort_transaction(trans, ret); 1182 goto out_free_path; 1183 } 1184 if (ret) 1185 break; 1186 } 1187 1188 out_add_root: 1189 btrfs_release_path(path); 1190 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1191 if (unlikely(ret)) { 1192 btrfs_abort_transaction(trans, ret); 1193 goto out_free_path; 1194 } 1195 1196 ASSERT(prealloc == NULL); 1197 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1198 if (!prealloc) { 1199 ret = -ENOMEM; 1200 goto out_free_path; 1201 } 1202 qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID); 1203 prealloc = NULL; 1204 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1205 if (unlikely(ret < 0)) { 1206 btrfs_abort_transaction(trans, ret); 1207 goto out_free_path; 1208 } 1209 1210 fs_info->qgroup_enable_gen = trans->transid; 1211 1212 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1213 /* 1214 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid 1215 * a deadlock with tasks concurrently doing other qgroup operations, such 1216 * adding/removing qgroups or adding/deleting qgroup relations for example, 1217 * because all qgroup operations first start or join a transaction and then 1218 * lock the qgroup_ioctl_lock mutex. 1219 * We are safe from a concurrent task trying to enable quotas, by calling 1220 * this function, since we are serialized by fs_info->subvol_sem. 1221 */ 1222 ret = btrfs_commit_transaction(trans); 1223 trans = NULL; 1224 mutex_lock(&fs_info->qgroup_ioctl_lock); 1225 if (ret) 1226 goto out_free_path; 1227 1228 /* 1229 * Set quota enabled flag after committing the transaction, to avoid 1230 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1231 * creation. 1232 */ 1233 spin_lock(&fs_info->qgroup_lock); 1234 fs_info->quota_root = quota_root; 1235 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1236 spin_unlock(&fs_info->qgroup_lock); 1237 1238 /* Skip rescan for simple qgroups. */ 1239 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 1240 goto out_free_path; 1241 1242 ret = qgroup_rescan_init(fs_info, 0, 1); 1243 if (!ret) { 1244 qgroup_rescan_zero_tracking(fs_info); 1245 fs_info->qgroup_rescan_running = true; 1246 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1247 &fs_info->qgroup_rescan_work); 1248 } else { 1249 /* 1250 * We have set both BTRFS_FS_QUOTA_ENABLED and 1251 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with 1252 * -EINPROGRESS. That can happen because someone started the 1253 * rescan worker by calling quota rescan ioctl before we 1254 * attempted to initialize the rescan worker. Failure due to 1255 * quotas disabled in the meanwhile is not possible, because 1256 * we are holding a write lock on fs_info->subvol_sem, which 1257 * is also acquired when disabling quotas. 1258 * Ignore such error, and any other error would need to undo 1259 * everything we did in the transaction we just committed. 1260 */ 1261 ASSERT(ret == -EINPROGRESS); 1262 ret = 0; 1263 } 1264 1265 out_free_path: 1266 btrfs_free_path(path); 1267 out_free_root: 1268 if (ret) 1269 btrfs_put_root(quota_root); 1270 out: 1271 if (ret) 1272 btrfs_sysfs_del_qgroups(fs_info); 1273 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1274 if (ret && trans) 1275 btrfs_end_transaction(trans); 1276 else if (trans) 1277 ret = btrfs_end_transaction(trans); 1278 kfree(prealloc); 1279 return ret; 1280 } 1281 1282 /* 1283 * It is possible to have outstanding ordered extents which reserved bytes 1284 * before we disabled. We need to fully flush delalloc, ordered extents, and a 1285 * commit to ensure that we don't leak such reservations, only to have them 1286 * come back if we re-enable. 1287 * 1288 * - enable simple quotas 1289 * - reserve space 1290 * - release it, store rsv_bytes in OE 1291 * - disable quotas 1292 * - enable simple quotas (qgroup rsv are all 0) 1293 * - OE finishes 1294 * - run delayed refs 1295 * - free rsv_bytes, resulting in miscounting or even underflow 1296 */ 1297 static int flush_reservations(struct btrfs_fs_info *fs_info) 1298 { 1299 int ret; 1300 1301 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 1302 if (ret) 1303 return ret; 1304 btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); 1305 1306 return btrfs_commit_current_transaction(fs_info->tree_root); 1307 } 1308 1309 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1310 { 1311 struct btrfs_root *quota_root = NULL; 1312 struct btrfs_trans_handle *trans = NULL; 1313 int ret = 0; 1314 1315 /* 1316 * We need to have subvol_sem write locked to prevent races with 1317 * snapshot creation. 1318 */ 1319 lockdep_assert_held_write(&fs_info->subvol_sem); 1320 1321 /* 1322 * Relocation will mess with backrefs, so make sure we have the 1323 * cleaner_mutex held to protect us from relocate. 1324 */ 1325 lockdep_assert_held(&fs_info->cleaner_mutex); 1326 1327 mutex_lock(&fs_info->qgroup_ioctl_lock); 1328 if (!fs_info->quota_root) 1329 goto out; 1330 1331 /* 1332 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to 1333 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs 1334 * to lock that mutex while holding a transaction handle and the rescan 1335 * worker needs to commit a transaction. 1336 */ 1337 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1338 1339 /* 1340 * Request qgroup rescan worker to complete and wait for it. This wait 1341 * must be done before transaction start for quota disable since it may 1342 * deadlock with transaction by the qgroup rescan worker. 1343 */ 1344 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1345 btrfs_qgroup_wait_for_completion(fs_info, false); 1346 1347 /* 1348 * We have nothing held here and no trans handle, just return the error 1349 * if there is one and set back the quota enabled bit since we didn't 1350 * actually disable quotas. 1351 */ 1352 ret = flush_reservations(fs_info); 1353 if (ret) { 1354 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1355 return ret; 1356 } 1357 1358 /* 1359 * 1 For the root item 1360 * 1361 * We should also reserve enough items for the quota tree deletion in 1362 * btrfs_clean_quota_tree but this is not done. 1363 * 1364 * Also, we must always start a transaction without holding the mutex 1365 * qgroup_ioctl_lock, see btrfs_quota_enable(). 1366 */ 1367 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1368 1369 mutex_lock(&fs_info->qgroup_ioctl_lock); 1370 if (IS_ERR(trans)) { 1371 ret = PTR_ERR(trans); 1372 trans = NULL; 1373 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1374 goto out; 1375 } 1376 1377 if (!fs_info->quota_root) 1378 goto out; 1379 1380 spin_lock(&fs_info->qgroup_lock); 1381 quota_root = fs_info->quota_root; 1382 fs_info->quota_root = NULL; 1383 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1384 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1385 fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT; 1386 spin_unlock(&fs_info->qgroup_lock); 1387 1388 btrfs_free_qgroup_config(fs_info); 1389 1390 ret = btrfs_clean_quota_tree(trans, quota_root); 1391 if (unlikely(ret)) { 1392 btrfs_abort_transaction(trans, ret); 1393 goto out; 1394 } 1395 1396 ret = btrfs_del_root(trans, "a_root->root_key); 1397 if (unlikely(ret)) { 1398 btrfs_abort_transaction(trans, ret); 1399 goto out; 1400 } 1401 1402 spin_lock(&fs_info->trans_lock); 1403 list_del("a_root->dirty_list); 1404 spin_unlock(&fs_info->trans_lock); 1405 1406 btrfs_tree_lock(quota_root->node); 1407 btrfs_clear_buffer_dirty(trans, quota_root->node); 1408 btrfs_tree_unlock(quota_root->node); 1409 ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root), 1410 quota_root->node, 0, 1); 1411 1412 if (ret < 0) 1413 btrfs_abort_transaction(trans, ret); 1414 1415 out: 1416 btrfs_put_root(quota_root); 1417 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1418 if (ret && trans) 1419 btrfs_end_transaction(trans); 1420 else if (trans) 1421 ret = btrfs_commit_transaction(trans); 1422 return ret; 1423 } 1424 1425 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1426 struct btrfs_qgroup *qgroup) 1427 { 1428 if (list_empty(&qgroup->dirty)) 1429 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1430 } 1431 1432 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) 1433 { 1434 if (!list_empty(&qgroup->iterator)) 1435 return; 1436 1437 list_add_tail(&qgroup->iterator, head); 1438 } 1439 1440 static void qgroup_iterator_clean(struct list_head *head) 1441 { 1442 while (!list_empty(head)) { 1443 struct btrfs_qgroup *qgroup; 1444 1445 qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); 1446 list_del_init(&qgroup->iterator); 1447 } 1448 } 1449 1450 /* 1451 * The easy accounting, we're updating qgroup relationship whose child qgroup 1452 * only has exclusive extents. 1453 * 1454 * In this case, all exclusive extents will also be exclusive for parent, so 1455 * excl/rfer just get added/removed. 1456 * 1457 * So is qgroup reservation space, which should also be added/removed to 1458 * parent. 1459 * Or when child tries to release reservation space, parent will underflow its 1460 * reservation (for relationship adding case). 1461 * 1462 * Caller should hold fs_info->qgroup_lock. 1463 */ 1464 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, 1465 struct btrfs_qgroup *src, int sign) 1466 { 1467 struct btrfs_qgroup *qgroup; 1468 LIST_HEAD(qgroup_list); 1469 u64 num_bytes = src->excl; 1470 u64 num_bytes_cmpr = src->excl_cmpr; 1471 int ret = 0; 1472 1473 qgroup = find_qgroup_rb(fs_info, ref_root); 1474 if (!qgroup) 1475 goto out; 1476 1477 qgroup_iterator_add(&qgroup_list, qgroup); 1478 list_for_each_entry(qgroup, &qgroup_list, iterator) { 1479 struct btrfs_qgroup_list *glist; 1480 1481 qgroup->rfer += sign * num_bytes; 1482 qgroup->rfer_cmpr += sign * num_bytes_cmpr; 1483 1484 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1485 WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr); 1486 qgroup->excl += sign * num_bytes; 1487 qgroup->excl_cmpr += sign * num_bytes_cmpr; 1488 1489 if (sign > 0) 1490 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1491 else 1492 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1493 qgroup_dirty(fs_info, qgroup); 1494 1495 /* Append parent qgroups to @qgroup_list. */ 1496 list_for_each_entry(glist, &qgroup->groups, next_group) 1497 qgroup_iterator_add(&qgroup_list, glist->group); 1498 } 1499 ret = 0; 1500 out: 1501 qgroup_iterator_clean(&qgroup_list); 1502 return ret; 1503 } 1504 1505 1506 /* 1507 * Quick path for updating qgroup with only excl refs. 1508 * 1509 * In that case, just update all parent will be enough. 1510 * Or we needs to do a full rescan. 1511 * Caller should also hold fs_info->qgroup_lock. 1512 * 1513 * Return 0 for quick update, return >0 for need to full rescan 1514 * and mark INCONSISTENT flag. 1515 * Return < 0 for other error. 1516 */ 1517 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1518 u64 src, u64 dst, int sign) 1519 { 1520 struct btrfs_qgroup *qgroup; 1521 int ret = 1; 1522 1523 qgroup = find_qgroup_rb(fs_info, src); 1524 if (!qgroup) 1525 goto out; 1526 if (qgroup->excl == qgroup->rfer) { 1527 ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign); 1528 if (ret < 0) 1529 goto out; 1530 ret = 0; 1531 } 1532 out: 1533 if (ret) 1534 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1535 return ret; 1536 } 1537 1538 /* 1539 * Add relation between @src and @dst qgroup. The @prealloc is allocated by the 1540 * callers and transferred here (either used or freed on error). 1541 */ 1542 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst, 1543 struct btrfs_qgroup_list *prealloc) 1544 { 1545 struct btrfs_fs_info *fs_info = trans->fs_info; 1546 struct btrfs_qgroup *parent; 1547 struct btrfs_qgroup *member; 1548 struct btrfs_qgroup_list *list; 1549 int ret = 0; 1550 1551 ASSERT(prealloc); 1552 1553 /* Check the level of src and dst first */ 1554 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) { 1555 kfree(prealloc); 1556 return -EINVAL; 1557 } 1558 1559 mutex_lock(&fs_info->qgroup_ioctl_lock); 1560 if (!fs_info->quota_root) { 1561 ret = -ENOTCONN; 1562 goto out; 1563 } 1564 member = find_qgroup_rb(fs_info, src); 1565 parent = find_qgroup_rb(fs_info, dst); 1566 if (!member || !parent) { 1567 ret = -EINVAL; 1568 goto out; 1569 } 1570 1571 /* check if such qgroup relation exist firstly */ 1572 list_for_each_entry(list, &member->groups, next_group) { 1573 if (list->group == parent) { 1574 ret = -EEXIST; 1575 goto out; 1576 } 1577 } 1578 1579 ret = add_qgroup_relation_item(trans, src, dst); 1580 if (ret) 1581 goto out; 1582 1583 ret = add_qgroup_relation_item(trans, dst, src); 1584 if (ret) { 1585 del_qgroup_relation_item(trans, src, dst); 1586 goto out; 1587 } 1588 1589 spin_lock(&fs_info->qgroup_lock); 1590 ret = __add_relation_rb(prealloc, member, parent); 1591 prealloc = NULL; 1592 if (ret < 0) { 1593 spin_unlock(&fs_info->qgroup_lock); 1594 goto out; 1595 } 1596 ret = quick_update_accounting(fs_info, src, dst, 1); 1597 squota_check_parent_usage(fs_info, parent); 1598 spin_unlock(&fs_info->qgroup_lock); 1599 out: 1600 kfree(prealloc); 1601 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1602 return ret; 1603 } 1604 1605 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1606 u64 dst) 1607 { 1608 struct btrfs_fs_info *fs_info = trans->fs_info; 1609 struct btrfs_qgroup *parent; 1610 struct btrfs_qgroup *member; 1611 struct btrfs_qgroup_list *list; 1612 bool found = false; 1613 int ret = 0; 1614 int ret2; 1615 1616 if (!fs_info->quota_root) 1617 return -ENOTCONN; 1618 1619 member = find_qgroup_rb(fs_info, src); 1620 parent = find_qgroup_rb(fs_info, dst); 1621 /* 1622 * The parent/member pair doesn't exist, then try to delete the dead 1623 * relation items only. 1624 */ 1625 if (!member || !parent) 1626 goto delete_item; 1627 1628 /* check if such qgroup relation exist firstly */ 1629 list_for_each_entry(list, &member->groups, next_group) { 1630 if (list->group == parent) { 1631 found = true; 1632 break; 1633 } 1634 } 1635 1636 delete_item: 1637 ret = del_qgroup_relation_item(trans, src, dst); 1638 if (ret < 0 && ret != -ENOENT) 1639 return ret; 1640 ret2 = del_qgroup_relation_item(trans, dst, src); 1641 if (ret2 < 0 && ret2 != -ENOENT) 1642 return ret2; 1643 1644 /* At least one deletion succeeded, return 0 */ 1645 if (!ret || !ret2) 1646 ret = 0; 1647 1648 if (found) { 1649 spin_lock(&fs_info->qgroup_lock); 1650 del_relation_rb(fs_info, src, dst); 1651 ret = quick_update_accounting(fs_info, src, dst, -1); 1652 ASSERT(parent); 1653 squota_check_parent_usage(fs_info, parent); 1654 spin_unlock(&fs_info->qgroup_lock); 1655 } 1656 1657 return ret; 1658 } 1659 1660 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1661 u64 dst) 1662 { 1663 struct btrfs_fs_info *fs_info = trans->fs_info; 1664 int ret = 0; 1665 1666 mutex_lock(&fs_info->qgroup_ioctl_lock); 1667 ret = __del_qgroup_relation(trans, src, dst); 1668 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1669 1670 return ret; 1671 } 1672 1673 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1674 { 1675 struct btrfs_fs_info *fs_info = trans->fs_info; 1676 struct btrfs_root *quota_root; 1677 struct btrfs_qgroup *qgroup; 1678 struct btrfs_qgroup *prealloc = NULL; 1679 int ret = 0; 1680 1681 mutex_lock(&fs_info->qgroup_ioctl_lock); 1682 if (!fs_info->quota_root) { 1683 ret = -ENOTCONN; 1684 goto out; 1685 } 1686 quota_root = fs_info->quota_root; 1687 qgroup = find_qgroup_rb(fs_info, qgroupid); 1688 if (qgroup) { 1689 ret = -EEXIST; 1690 goto out; 1691 } 1692 1693 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1694 if (!prealloc) { 1695 ret = -ENOMEM; 1696 goto out; 1697 } 1698 1699 ret = add_qgroup_item(trans, quota_root, qgroupid); 1700 if (ret) 1701 goto out; 1702 1703 spin_lock(&fs_info->qgroup_lock); 1704 qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid); 1705 spin_unlock(&fs_info->qgroup_lock); 1706 prealloc = NULL; 1707 1708 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1709 out: 1710 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1711 kfree(prealloc); 1712 return ret; 1713 } 1714 1715 static bool can_delete_parent_qgroup(struct btrfs_qgroup *qgroup) 1716 1717 { 1718 ASSERT(btrfs_qgroup_level(qgroup->qgroupid)); 1719 return list_empty(&qgroup->members); 1720 } 1721 1722 /* 1723 * Return true if we can delete the squota qgroup and false otherwise. 1724 * 1725 * Rules for whether we can delete: 1726 * 1727 * A subvolume qgroup can be removed iff the subvolume is fully deleted, which 1728 * is iff there is 0 usage in the qgroup. 1729 * 1730 * A higher level qgroup can be removed iff it has no members. 1731 * Note: We audit its usage to warn on inconsitencies without blocking deletion. 1732 */ 1733 static bool can_delete_squota_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) 1734 { 1735 ASSERT(btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE); 1736 1737 if (btrfs_qgroup_level(qgroup->qgroupid) > 0) { 1738 squota_check_parent_usage(fs_info, qgroup); 1739 return can_delete_parent_qgroup(qgroup); 1740 } 1741 1742 return !(qgroup->rfer || qgroup->excl || qgroup->rfer_cmpr || qgroup->excl_cmpr); 1743 } 1744 1745 /* 1746 * Return 0 if we can not delete the qgroup (not empty or has children etc). 1747 * Return >0 if we can delete the qgroup. 1748 * Return <0 for other errors during tree search. 1749 */ 1750 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) 1751 { 1752 struct btrfs_key key; 1753 BTRFS_PATH_AUTO_FREE(path); 1754 1755 /* Since squotas cannot be inconsistent, they have special rules for deletion. */ 1756 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 1757 return can_delete_squota_qgroup(fs_info, qgroup); 1758 1759 /* For higher level qgroup, we can only delete it if it has no child. */ 1760 if (btrfs_qgroup_level(qgroup->qgroupid)) 1761 return can_delete_parent_qgroup(qgroup); 1762 1763 /* 1764 * For level-0 qgroups, we can only delete it if it has no subvolume 1765 * for it. 1766 * This means even a subvolume is unlinked but not yet fully dropped, 1767 * we can not delete the qgroup. 1768 */ 1769 key.objectid = qgroup->qgroupid; 1770 key.type = BTRFS_ROOT_ITEM_KEY; 1771 key.offset = -1ULL; 1772 path = btrfs_alloc_path(); 1773 if (!path) 1774 return -ENOMEM; 1775 1776 /* 1777 * The @ret from btrfs_find_root() exactly matches our definition for 1778 * the return value, thus can be returned directly. 1779 */ 1780 return btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL); 1781 } 1782 1783 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1784 { 1785 struct btrfs_fs_info *fs_info = trans->fs_info; 1786 struct btrfs_qgroup *qgroup; 1787 struct btrfs_qgroup_list *list; 1788 int ret = 0; 1789 1790 mutex_lock(&fs_info->qgroup_ioctl_lock); 1791 if (!fs_info->quota_root) { 1792 ret = -ENOTCONN; 1793 goto out; 1794 } 1795 1796 qgroup = find_qgroup_rb(fs_info, qgroupid); 1797 if (!qgroup) { 1798 ret = -ENOENT; 1799 goto out; 1800 } 1801 1802 ret = can_delete_qgroup(fs_info, qgroup); 1803 if (ret < 0) 1804 goto out; 1805 if (ret == 0) { 1806 ret = -EBUSY; 1807 goto out; 1808 } 1809 1810 /* Check if there are no children of this qgroup */ 1811 if (!list_empty(&qgroup->members)) { 1812 ret = -EBUSY; 1813 goto out; 1814 } 1815 1816 ret = del_qgroup_item(trans, qgroupid); 1817 if (ret && ret != -ENOENT) 1818 goto out; 1819 1820 while (!list_empty(&qgroup->groups)) { 1821 list = list_first_entry(&qgroup->groups, 1822 struct btrfs_qgroup_list, next_group); 1823 ret = __del_qgroup_relation(trans, qgroupid, 1824 list->group->qgroupid); 1825 if (ret) 1826 goto out; 1827 } 1828 1829 spin_lock(&fs_info->qgroup_lock); 1830 /* 1831 * Warn on reserved space. The subvolume should has no child nor 1832 * corresponding subvolume. 1833 * Thus its reserved space should all be zero, no matter if qgroup 1834 * is consistent or the mode. 1835 */ 1836 if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] || 1837 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] || 1838 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) { 1839 DEBUG_WARN(); 1840 btrfs_warn_rl(fs_info, 1841 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu", 1842 btrfs_qgroup_level(qgroup->qgroupid), 1843 btrfs_qgroup_subvolid(qgroup->qgroupid), 1844 qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA], 1845 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC], 1846 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]); 1847 1848 } 1849 /* 1850 * The same for rfer/excl numbers, but that's only if our qgroup is 1851 * consistent and if it's in regular qgroup mode. 1852 * For simple mode it's not as accurate thus we can hit non-zero values 1853 * very frequently. 1854 */ 1855 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL && 1856 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { 1857 if (qgroup->rfer || qgroup->excl || 1858 qgroup->rfer_cmpr || qgroup->excl_cmpr) { 1859 DEBUG_WARN(); 1860 qgroup_mark_inconsistent(fs_info, 1861 "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", 1862 btrfs_qgroup_level(qgroup->qgroupid), 1863 btrfs_qgroup_subvolid(qgroup->qgroupid), 1864 qgroup->rfer, qgroup->rfer_cmpr, 1865 qgroup->excl, qgroup->excl_cmpr); 1866 } 1867 } 1868 del_qgroup_rb(fs_info, qgroupid); 1869 spin_unlock(&fs_info->qgroup_lock); 1870 1871 /* 1872 * Remove the qgroup from sysfs now without holding the qgroup_lock 1873 * spinlock, since the sysfs_remove_group() function needs to take 1874 * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). 1875 */ 1876 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 1877 kfree(qgroup); 1878 out: 1879 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1880 return ret; 1881 } 1882 1883 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid) 1884 { 1885 struct btrfs_trans_handle *trans; 1886 int ret; 1887 1888 if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || 1889 !fs_info->quota_root) 1890 return 0; 1891 1892 /* 1893 * Commit current transaction to make sure all the rfer/excl numbers 1894 * get updated. 1895 */ 1896 ret = btrfs_commit_current_transaction(fs_info->quota_root); 1897 if (ret < 0) 1898 return ret; 1899 1900 /* Start new trans to delete the qgroup info and limit items. */ 1901 trans = btrfs_start_transaction(fs_info->quota_root, 2); 1902 if (IS_ERR(trans)) 1903 return PTR_ERR(trans); 1904 ret = btrfs_remove_qgroup(trans, subvolid); 1905 btrfs_end_transaction(trans); 1906 /* 1907 * It's squota and the subvolume still has numbers needed for future 1908 * accounting, in this case we can not delete it. Just skip it. 1909 * 1910 * Or the qgroup is already removed by a qgroup rescan. For both cases we're 1911 * safe to ignore them. 1912 */ 1913 if (ret == -EBUSY || ret == -ENOENT) 1914 ret = 0; 1915 return ret; 1916 } 1917 1918 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1919 struct btrfs_qgroup_limit *limit) 1920 { 1921 struct btrfs_fs_info *fs_info = trans->fs_info; 1922 struct btrfs_qgroup *qgroup; 1923 int ret = 0; 1924 /* Sometimes we would want to clear the limit on this qgroup. 1925 * To meet this requirement, we treat the -1 as a special value 1926 * which tell kernel to clear the limit on this qgroup. 1927 */ 1928 const u64 CLEAR_VALUE = -1; 1929 1930 mutex_lock(&fs_info->qgroup_ioctl_lock); 1931 if (!fs_info->quota_root) { 1932 ret = -ENOTCONN; 1933 goto out; 1934 } 1935 1936 qgroup = find_qgroup_rb(fs_info, qgroupid); 1937 if (!qgroup) { 1938 ret = -ENOENT; 1939 goto out; 1940 } 1941 1942 spin_lock(&fs_info->qgroup_lock); 1943 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1944 if (limit->max_rfer == CLEAR_VALUE) { 1945 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1946 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1947 qgroup->max_rfer = 0; 1948 } else { 1949 qgroup->max_rfer = limit->max_rfer; 1950 } 1951 } 1952 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1953 if (limit->max_excl == CLEAR_VALUE) { 1954 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1955 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1956 qgroup->max_excl = 0; 1957 } else { 1958 qgroup->max_excl = limit->max_excl; 1959 } 1960 } 1961 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1962 if (limit->rsv_rfer == CLEAR_VALUE) { 1963 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1964 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1965 qgroup->rsv_rfer = 0; 1966 } else { 1967 qgroup->rsv_rfer = limit->rsv_rfer; 1968 } 1969 } 1970 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1971 if (limit->rsv_excl == CLEAR_VALUE) { 1972 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1973 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1974 qgroup->rsv_excl = 0; 1975 } else { 1976 qgroup->rsv_excl = limit->rsv_excl; 1977 } 1978 } 1979 qgroup->lim_flags |= limit->flags; 1980 1981 spin_unlock(&fs_info->qgroup_lock); 1982 1983 ret = update_qgroup_limit_item(trans, qgroup); 1984 if (ret) 1985 qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret); 1986 1987 out: 1988 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1989 return ret; 1990 } 1991 1992 /* 1993 * Inform qgroup to trace one dirty extent, its info is recorded in @record. 1994 * So qgroup can account it at transaction committing time. 1995 * 1996 * No lock version, caller must acquire delayed ref lock and allocated memory, 1997 * then call btrfs_qgroup_trace_extent_post() after exiting lock context. 1998 * 1999 * Return 0 for success insert 2000 * Return >0 for existing record, caller can free @record safely. 2001 * Return <0 for insertion failure, caller can free @record safely. 2002 */ 2003 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 2004 struct btrfs_delayed_ref_root *delayed_refs, 2005 struct btrfs_qgroup_extent_record *record, 2006 u64 bytenr) 2007 { 2008 struct btrfs_qgroup_extent_record *existing, *ret; 2009 const unsigned long index = (bytenr >> fs_info->sectorsize_bits); 2010 2011 if (!btrfs_qgroup_full_accounting(fs_info)) 2012 return 1; 2013 2014 #if BITS_PER_LONG == 32 2015 if (bytenr >= MAX_LFS_FILESIZE) { 2016 btrfs_err_rl(fs_info, 2017 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit", 2018 bytenr); 2019 btrfs_err_32bit_limit(fs_info); 2020 return -EOVERFLOW; 2021 } 2022 #endif 2023 2024 trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr); 2025 2026 xa_lock(&delayed_refs->dirty_extents); 2027 existing = xa_load(&delayed_refs->dirty_extents, index); 2028 if (existing) { 2029 if (record->data_rsv && !existing->data_rsv) { 2030 existing->data_rsv = record->data_rsv; 2031 existing->data_rsv_refroot = record->data_rsv_refroot; 2032 } 2033 xa_unlock(&delayed_refs->dirty_extents); 2034 return 1; 2035 } 2036 2037 ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC); 2038 xa_unlock(&delayed_refs->dirty_extents); 2039 if (xa_is_err(ret)) { 2040 qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret)); 2041 return xa_err(ret); 2042 } 2043 2044 return 0; 2045 } 2046 2047 /* 2048 * Post handler after qgroup_trace_extent_nolock(). 2049 * 2050 * NOTE: Current qgroup does the expensive backref walk at transaction 2051 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming 2052 * new transaction. 2053 * This is designed to allow btrfs_find_all_roots() to get correct new_roots 2054 * result. 2055 * 2056 * However for old_roots there is no need to do backref walk at that time, 2057 * since we search commit roots to walk backref and result will always be 2058 * correct. 2059 * 2060 * Due to the nature of no lock version, we can't do backref there. 2061 * So we must call btrfs_qgroup_trace_extent_post() after exiting 2062 * spinlock context. 2063 * 2064 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result 2065 * using current root, then we can move all expensive backref walk out of 2066 * transaction committing, but not now as qgroup accounting will be wrong again. 2067 */ 2068 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, 2069 struct btrfs_qgroup_extent_record *qrecord, 2070 u64 bytenr) 2071 { 2072 struct btrfs_fs_info *fs_info = trans->fs_info; 2073 struct btrfs_backref_walk_ctx ctx = { 2074 .bytenr = bytenr, 2075 .fs_info = fs_info, 2076 }; 2077 int ret; 2078 2079 if (!btrfs_qgroup_full_accounting(fs_info)) 2080 return 0; 2081 /* 2082 * We are always called in a context where we are already holding a 2083 * transaction handle. Often we are called when adding a data delayed 2084 * reference from btrfs_truncate_inode_items() (truncating or unlinking), 2085 * in which case we will be holding a write lock on extent buffer from a 2086 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to 2087 * acquire fs_info->commit_root_sem, because that is a higher level lock 2088 * that must be acquired before locking any extent buffers. 2089 * 2090 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem 2091 * but we can't pass it a non-NULL transaction handle, because otherwise 2092 * it would not use commit roots and would lock extent buffers, causing 2093 * a deadlock if it ends up trying to read lock the same extent buffer 2094 * that was previously write locked at btrfs_truncate_inode_items(). 2095 * 2096 * So pass a NULL transaction handle to btrfs_find_all_roots() and 2097 * explicitly tell it to not acquire the commit_root_sem - if we are 2098 * holding a transaction handle we don't need its protection. 2099 */ 2100 ASSERT(trans != NULL); 2101 2102 if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2103 return 0; 2104 2105 ret = btrfs_find_all_roots(&ctx, true); 2106 if (ret < 0) { 2107 qgroup_mark_inconsistent(fs_info, 2108 "error accounting new delayed refs extent: %d", ret); 2109 return 0; 2110 } 2111 2112 /* 2113 * Here we don't need to get the lock of 2114 * trans->transaction->delayed_refs, since inserted qrecord won't 2115 * be deleted, only qrecord->node may be modified (new qrecord insert) 2116 * 2117 * So modifying qrecord->old_roots is safe here 2118 */ 2119 qrecord->old_roots = ctx.roots; 2120 return 0; 2121 } 2122 2123 /* 2124 * Inform qgroup to trace one dirty extent, specified by @bytenr and 2125 * @num_bytes. 2126 * So qgroup can account it at commit trans time. 2127 * 2128 * Better encapsulated version, with memory allocation and backref walk for 2129 * commit roots. 2130 * So this can sleep. 2131 * 2132 * Return 0 if the operation is done. 2133 * Return <0 for error, like memory allocation failure or invalid parameter 2134 * (NULL trans) 2135 */ 2136 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2137 u64 num_bytes) 2138 { 2139 struct btrfs_fs_info *fs_info = trans->fs_info; 2140 struct btrfs_qgroup_extent_record *record; 2141 struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs; 2142 const unsigned long index = (bytenr >> fs_info->sectorsize_bits); 2143 int ret; 2144 2145 if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0) 2146 return 0; 2147 record = kzalloc(sizeof(*record), GFP_NOFS); 2148 if (!record) 2149 return -ENOMEM; 2150 2151 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) { 2152 kfree(record); 2153 return -ENOMEM; 2154 } 2155 2156 record->num_bytes = num_bytes; 2157 2158 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr); 2159 if (ret) { 2160 /* Clean up if insertion fails or item exists. */ 2161 xa_release(&delayed_refs->dirty_extents, index); 2162 kfree(record); 2163 return 0; 2164 } 2165 return btrfs_qgroup_trace_extent_post(trans, record, bytenr); 2166 } 2167 2168 /* 2169 * Inform qgroup to trace all leaf items of data 2170 * 2171 * Return 0 for success 2172 * Return <0 for error(ENOMEM) 2173 */ 2174 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 2175 struct extent_buffer *eb) 2176 { 2177 struct btrfs_fs_info *fs_info = trans->fs_info; 2178 int nr = btrfs_header_nritems(eb); 2179 int i, extent_type, ret; 2180 struct btrfs_key key; 2181 struct btrfs_file_extent_item *fi; 2182 u64 bytenr, num_bytes; 2183 2184 /* We can be called directly from walk_up_proc() */ 2185 if (!btrfs_qgroup_full_accounting(fs_info)) 2186 return 0; 2187 2188 for (i = 0; i < nr; i++) { 2189 btrfs_item_key_to_cpu(eb, &key, i); 2190 2191 if (key.type != BTRFS_EXTENT_DATA_KEY) 2192 continue; 2193 2194 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2195 /* filter out non qgroup-accountable extents */ 2196 extent_type = btrfs_file_extent_type(eb, fi); 2197 2198 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 2199 continue; 2200 2201 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 2202 if (!bytenr) 2203 continue; 2204 2205 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 2206 2207 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes); 2208 if (ret) 2209 return ret; 2210 } 2211 cond_resched(); 2212 return 0; 2213 } 2214 2215 /* 2216 * Walk up the tree from the bottom, freeing leaves and any interior 2217 * nodes which have had all slots visited. If a node (leaf or 2218 * interior) is freed, the node above it will have it's slot 2219 * incremented. The root node will never be freed. 2220 * 2221 * At the end of this function, we should have a path which has all 2222 * slots incremented to the next position for a search. If we need to 2223 * read a new node it will be NULL and the node above it will have the 2224 * correct slot selected for a later read. 2225 * 2226 * If we increment the root nodes slot counter past the number of 2227 * elements, 1 is returned to signal completion of the search. 2228 */ 2229 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 2230 { 2231 int level = 0; 2232 int nr, slot; 2233 struct extent_buffer *eb; 2234 2235 if (root_level == 0) 2236 return 1; 2237 2238 while (level <= root_level) { 2239 eb = path->nodes[level]; 2240 nr = btrfs_header_nritems(eb); 2241 path->slots[level]++; 2242 slot = path->slots[level]; 2243 if (slot >= nr || level == 0) { 2244 /* 2245 * Don't free the root - we will detect this 2246 * condition after our loop and return a 2247 * positive value for caller to stop walking the tree. 2248 */ 2249 if (level != root_level) { 2250 btrfs_tree_unlock_rw(eb, path->locks[level]); 2251 path->locks[level] = 0; 2252 2253 free_extent_buffer(eb); 2254 path->nodes[level] = NULL; 2255 path->slots[level] = 0; 2256 } 2257 } else { 2258 /* 2259 * We have a valid slot to walk back down 2260 * from. Stop here so caller can process these 2261 * new nodes. 2262 */ 2263 break; 2264 } 2265 2266 level++; 2267 } 2268 2269 eb = path->nodes[root_level]; 2270 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 2271 return 1; 2272 2273 return 0; 2274 } 2275 2276 /* 2277 * Helper function to trace a subtree tree block swap. 2278 * 2279 * The swap will happen in highest tree block, but there may be a lot of 2280 * tree blocks involved. 2281 * 2282 * For example: 2283 * OO = Old tree blocks 2284 * NN = New tree blocks allocated during balance 2285 * 2286 * File tree (257) Reloc tree for 257 2287 * L2 OO NN 2288 * / \ / \ 2289 * L1 OO OO (a) OO NN (a) 2290 * / \ / \ / \ / \ 2291 * L0 OO OO OO OO OO OO NN NN 2292 * (b) (c) (b) (c) 2293 * 2294 * When calling qgroup_trace_extent_swap(), we will pass: 2295 * @src_eb = OO(a) 2296 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 2297 * @dst_level = 0 2298 * @root_level = 1 2299 * 2300 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 2301 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 2302 * 2303 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 2304 * 2305 * 1) Tree search from @src_eb 2306 * It should acts as a simplified btrfs_search_slot(). 2307 * The key for search can be extracted from @dst_path->nodes[dst_level] 2308 * (first key). 2309 * 2310 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 2311 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 2312 * They should be marked during previous (@dst_level = 1) iteration. 2313 * 2314 * 3) Mark file extents in leaves dirty 2315 * We don't have good way to pick out new file extents only. 2316 * So we still follow the old method by scanning all file extents in 2317 * the leave. 2318 * 2319 * This function can free us from keeping two paths, thus later we only need 2320 * to care about how to iterate all new tree blocks in reloc tree. 2321 */ 2322 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 2323 struct extent_buffer *src_eb, 2324 struct btrfs_path *dst_path, 2325 int dst_level, int root_level, 2326 bool trace_leaf) 2327 { 2328 struct btrfs_key key; 2329 BTRFS_PATH_AUTO_FREE(src_path); 2330 struct btrfs_fs_info *fs_info = trans->fs_info; 2331 u32 nodesize = fs_info->nodesize; 2332 int cur_level = root_level; 2333 int ret; 2334 2335 BUG_ON(dst_level > root_level); 2336 /* Level mismatch */ 2337 if (btrfs_header_level(src_eb) != root_level) 2338 return -EINVAL; 2339 2340 src_path = btrfs_alloc_path(); 2341 if (!src_path) 2342 return -ENOMEM; 2343 2344 if (dst_level) 2345 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2346 else 2347 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2348 2349 /* For src_path */ 2350 refcount_inc(&src_eb->refs); 2351 src_path->nodes[root_level] = src_eb; 2352 src_path->slots[root_level] = dst_path->slots[root_level]; 2353 src_path->locks[root_level] = 0; 2354 2355 /* A simplified version of btrfs_search_slot() */ 2356 while (cur_level >= dst_level) { 2357 struct btrfs_key src_key; 2358 struct btrfs_key dst_key; 2359 2360 if (src_path->nodes[cur_level] == NULL) { 2361 struct extent_buffer *eb; 2362 int parent_slot; 2363 2364 eb = src_path->nodes[cur_level + 1]; 2365 parent_slot = src_path->slots[cur_level + 1]; 2366 2367 eb = btrfs_read_node_slot(eb, parent_slot); 2368 if (IS_ERR(eb)) 2369 return PTR_ERR(eb); 2370 2371 src_path->nodes[cur_level] = eb; 2372 2373 btrfs_tree_read_lock(eb); 2374 src_path->locks[cur_level] = BTRFS_READ_LOCK; 2375 } 2376 2377 src_path->slots[cur_level] = dst_path->slots[cur_level]; 2378 if (cur_level) { 2379 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 2380 &dst_key, dst_path->slots[cur_level]); 2381 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 2382 &src_key, src_path->slots[cur_level]); 2383 } else { 2384 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 2385 &dst_key, dst_path->slots[cur_level]); 2386 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 2387 &src_key, src_path->slots[cur_level]); 2388 } 2389 /* Content mismatch, something went wrong */ 2390 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) 2391 return -ENOENT; 2392 cur_level--; 2393 } 2394 2395 /* 2396 * Now both @dst_path and @src_path have been populated, record the tree 2397 * blocks for qgroup accounting. 2398 */ 2399 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 2400 nodesize); 2401 if (ret < 0) 2402 return ret; 2403 ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start, 2404 nodesize); 2405 if (ret < 0) 2406 return ret; 2407 2408 /* Record leaf file extents */ 2409 if (dst_level == 0 && trace_leaf) { 2410 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 2411 if (ret < 0) 2412 return ret; 2413 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 2414 } 2415 2416 return ret; 2417 } 2418 2419 /* 2420 * Helper function to do recursive generation-aware depth-first search, to 2421 * locate all new tree blocks in a subtree of reloc tree. 2422 * 2423 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 2424 * reloc tree 2425 * L2 NN (a) 2426 * / \ 2427 * L1 OO NN (b) 2428 * / \ / \ 2429 * L0 OO OO OO NN 2430 * (c) (d) 2431 * If we pass: 2432 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 2433 * @cur_level = 1 2434 * @root_level = 1 2435 * 2436 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 2437 * above tree blocks along with their counter parts in file tree. 2438 * While during search, old tree blocks OO(c) will be skipped as tree block swap 2439 * won't affect OO(c). 2440 */ 2441 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 2442 struct extent_buffer *src_eb, 2443 struct btrfs_path *dst_path, 2444 int cur_level, int root_level, 2445 u64 last_snapshot, bool trace_leaf) 2446 { 2447 struct btrfs_fs_info *fs_info = trans->fs_info; 2448 struct extent_buffer *eb; 2449 bool need_cleanup = false; 2450 int ret = 0; 2451 int i; 2452 2453 /* Level sanity check */ 2454 if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2455 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2456 root_level < cur_level)) { 2457 btrfs_err_rl(fs_info, 2458 "%s: bad levels, cur_level=%d root_level=%d", 2459 __func__, cur_level, root_level); 2460 return -EUCLEAN; 2461 } 2462 2463 /* Read the tree block if needed */ 2464 if (dst_path->nodes[cur_level] == NULL) { 2465 int parent_slot; 2466 u64 child_gen; 2467 2468 /* 2469 * dst_path->nodes[root_level] must be initialized before 2470 * calling this function. 2471 */ 2472 if (unlikely(cur_level == root_level)) { 2473 btrfs_err_rl(fs_info, 2474 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2475 __func__, root_level, root_level, cur_level); 2476 return -EUCLEAN; 2477 } 2478 2479 /* 2480 * We need to get child blockptr/gen from parent before we can 2481 * read it. 2482 */ 2483 eb = dst_path->nodes[cur_level + 1]; 2484 parent_slot = dst_path->slots[cur_level + 1]; 2485 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2486 2487 /* This node is old, no need to trace */ 2488 if (child_gen < last_snapshot) 2489 return ret; 2490 2491 eb = btrfs_read_node_slot(eb, parent_slot); 2492 if (IS_ERR(eb)) 2493 return PTR_ERR(eb); 2494 2495 dst_path->nodes[cur_level] = eb; 2496 dst_path->slots[cur_level] = 0; 2497 2498 btrfs_tree_read_lock(eb); 2499 dst_path->locks[cur_level] = BTRFS_READ_LOCK; 2500 need_cleanup = true; 2501 } 2502 2503 /* Now record this tree block and its counter part for qgroups */ 2504 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2505 root_level, trace_leaf); 2506 if (ret < 0) 2507 goto cleanup; 2508 2509 eb = dst_path->nodes[cur_level]; 2510 2511 if (cur_level > 0) { 2512 /* Iterate all child tree blocks */ 2513 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2514 /* Skip old tree blocks as they won't be swapped */ 2515 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2516 continue; 2517 dst_path->slots[cur_level] = i; 2518 2519 /* Recursive call (at most 7 times) */ 2520 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2521 dst_path, cur_level - 1, root_level, 2522 last_snapshot, trace_leaf); 2523 if (ret < 0) 2524 goto cleanup; 2525 } 2526 } 2527 2528 cleanup: 2529 if (need_cleanup) { 2530 /* Clean up */ 2531 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2532 dst_path->locks[cur_level]); 2533 free_extent_buffer(dst_path->nodes[cur_level]); 2534 dst_path->nodes[cur_level] = NULL; 2535 dst_path->slots[cur_level] = 0; 2536 dst_path->locks[cur_level] = 0; 2537 } 2538 2539 return ret; 2540 } 2541 2542 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2543 struct extent_buffer *src_eb, 2544 struct extent_buffer *dst_eb, 2545 u64 last_snapshot, bool trace_leaf) 2546 { 2547 struct btrfs_fs_info *fs_info = trans->fs_info; 2548 struct btrfs_path *dst_path = NULL; 2549 int level; 2550 int ret; 2551 2552 if (!btrfs_qgroup_full_accounting(fs_info)) 2553 return 0; 2554 2555 /* Wrong parameter order */ 2556 if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) { 2557 btrfs_err_rl(fs_info, 2558 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2559 btrfs_header_generation(src_eb), 2560 btrfs_header_generation(dst_eb)); 2561 return -EUCLEAN; 2562 } 2563 2564 if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) { 2565 ret = -EIO; 2566 goto out; 2567 } 2568 2569 level = btrfs_header_level(dst_eb); 2570 dst_path = btrfs_alloc_path(); 2571 if (!dst_path) { 2572 ret = -ENOMEM; 2573 goto out; 2574 } 2575 /* For dst_path */ 2576 refcount_inc(&dst_eb->refs); 2577 dst_path->nodes[level] = dst_eb; 2578 dst_path->slots[level] = 0; 2579 dst_path->locks[level] = 0; 2580 2581 /* Do the generation aware breadth-first search */ 2582 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2583 level, last_snapshot, trace_leaf); 2584 if (ret < 0) 2585 goto out; 2586 ret = 0; 2587 2588 out: 2589 btrfs_free_path(dst_path); 2590 if (ret < 0) 2591 qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); 2592 return ret; 2593 } 2594 2595 /* 2596 * Inform qgroup to trace a whole subtree, including all its child tree 2597 * blocks and data. 2598 * The root tree block is specified by @root_eb. 2599 * 2600 * Normally used by relocation(tree block swap) and subvolume deletion. 2601 * 2602 * Return 0 for success 2603 * Return <0 for error(ENOMEM or tree search error) 2604 */ 2605 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2606 struct extent_buffer *root_eb, 2607 u64 root_gen, int root_level) 2608 { 2609 struct btrfs_fs_info *fs_info = trans->fs_info; 2610 int ret = 0; 2611 int level; 2612 u8 drop_subptree_thres; 2613 struct extent_buffer *eb = root_eb; 2614 BTRFS_PATH_AUTO_FREE(path); 2615 2616 ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL); 2617 ASSERT(root_eb != NULL); 2618 2619 if (!btrfs_qgroup_full_accounting(fs_info)) 2620 return 0; 2621 2622 spin_lock(&fs_info->qgroup_lock); 2623 drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; 2624 spin_unlock(&fs_info->qgroup_lock); 2625 2626 /* 2627 * This function only gets called for snapshot drop, if we hit a high 2628 * node here, it means we are going to change ownership for quite a lot 2629 * of extents, which will greatly slow down btrfs_commit_transaction(). 2630 * 2631 * So here if we find a high tree here, we just skip the accounting and 2632 * mark qgroup inconsistent. 2633 */ 2634 if (root_level >= drop_subptree_thres) { 2635 qgroup_mark_inconsistent(fs_info, "subtree level reached threshold"); 2636 return 0; 2637 } 2638 2639 if (!extent_buffer_uptodate(root_eb)) { 2640 struct btrfs_tree_parent_check check = { 2641 .transid = root_gen, 2642 .level = root_level 2643 }; 2644 2645 ret = btrfs_read_extent_buffer(root_eb, &check); 2646 if (ret) 2647 return ret; 2648 } 2649 2650 if (root_level == 0) 2651 return btrfs_qgroup_trace_leaf_items(trans, root_eb); 2652 2653 path = btrfs_alloc_path(); 2654 if (!path) 2655 return -ENOMEM; 2656 2657 /* 2658 * Walk down the tree. Missing extent blocks are filled in as 2659 * we go. Metadata is accounted every time we read a new 2660 * extent block. 2661 * 2662 * When we reach a leaf, we account for file extent items in it, 2663 * walk back up the tree (adjusting slot pointers as we go) 2664 * and restart the search process. 2665 */ 2666 refcount_inc(&root_eb->refs); /* For path */ 2667 path->nodes[root_level] = root_eb; 2668 path->slots[root_level] = 0; 2669 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2670 walk_down: 2671 level = root_level; 2672 while (level >= 0) { 2673 if (path->nodes[level] == NULL) { 2674 int parent_slot; 2675 u64 child_bytenr; 2676 2677 /* 2678 * We need to get child blockptr from parent before we 2679 * can read it. 2680 */ 2681 eb = path->nodes[level + 1]; 2682 parent_slot = path->slots[level + 1]; 2683 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2684 2685 eb = btrfs_read_node_slot(eb, parent_slot); 2686 if (IS_ERR(eb)) 2687 return PTR_ERR(eb); 2688 2689 path->nodes[level] = eb; 2690 path->slots[level] = 0; 2691 2692 btrfs_tree_read_lock(eb); 2693 path->locks[level] = BTRFS_READ_LOCK; 2694 2695 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2696 fs_info->nodesize); 2697 if (ret) 2698 return ret; 2699 } 2700 2701 if (level == 0) { 2702 ret = btrfs_qgroup_trace_leaf_items(trans, 2703 path->nodes[level]); 2704 if (ret) 2705 return ret; 2706 2707 /* Nonzero return here means we completed our search */ 2708 ret = adjust_slots_upwards(path, root_level); 2709 if (ret) 2710 break; 2711 2712 /* Restart search with new slots */ 2713 goto walk_down; 2714 } 2715 2716 level--; 2717 } 2718 2719 return 0; 2720 } 2721 2722 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup) 2723 { 2724 if (!list_empty(&qgroup->nested_iterator)) 2725 return; 2726 2727 list_add_tail(&qgroup->nested_iterator, head); 2728 } 2729 2730 static void qgroup_iterator_nested_clean(struct list_head *head) 2731 { 2732 while (!list_empty(head)) { 2733 struct btrfs_qgroup *qgroup; 2734 2735 qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator); 2736 list_del_init(&qgroup->nested_iterator); 2737 } 2738 } 2739 2740 #define UPDATE_NEW 0 2741 #define UPDATE_OLD 1 2742 /* 2743 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2744 */ 2745 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2746 struct ulist *roots, struct list_head *qgroups, 2747 u64 seq, bool update_old) 2748 { 2749 struct ulist_node *unode; 2750 struct ulist_iterator uiter; 2751 struct btrfs_qgroup *qg; 2752 2753 if (!roots) 2754 return; 2755 ULIST_ITER_INIT(&uiter); 2756 while ((unode = ulist_next(roots, &uiter))) { 2757 LIST_HEAD(tmp); 2758 2759 qg = find_qgroup_rb(fs_info, unode->val); 2760 if (!qg) 2761 continue; 2762 2763 qgroup_iterator_nested_add(qgroups, qg); 2764 qgroup_iterator_add(&tmp, qg); 2765 list_for_each_entry(qg, &tmp, iterator) { 2766 struct btrfs_qgroup_list *glist; 2767 2768 if (update_old) 2769 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2770 else 2771 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2772 2773 list_for_each_entry(glist, &qg->groups, next_group) { 2774 qgroup_iterator_nested_add(qgroups, glist->group); 2775 qgroup_iterator_add(&tmp, glist->group); 2776 } 2777 } 2778 qgroup_iterator_clean(&tmp); 2779 } 2780 } 2781 2782 /* 2783 * Update qgroup rfer/excl counters. 2784 * Rfer update is easy, codes can explain themselves. 2785 * 2786 * Excl update is tricky, the update is split into 2 parts. 2787 * Part 1: Possible exclusive <-> sharing detect: 2788 * | A | !A | 2789 * ------------------------------------- 2790 * B | * | - | 2791 * ------------------------------------- 2792 * !B | + | ** | 2793 * ------------------------------------- 2794 * 2795 * Conditions: 2796 * A: cur_old_roots < nr_old_roots (not exclusive before) 2797 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2798 * B: cur_new_roots < nr_new_roots (not exclusive now) 2799 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2800 * 2801 * Results: 2802 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2803 * *: Definitely not changed. **: Possible unchanged. 2804 * 2805 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2806 * 2807 * To make the logic clear, we first use condition A and B to split 2808 * combination into 4 results. 2809 * 2810 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2811 * only on variant maybe 0. 2812 * 2813 * Lastly, check result **, since there are 2 variants maybe 0, split them 2814 * again(2x2). 2815 * But this time we don't need to consider other things, the codes and logic 2816 * is easy to understand now. 2817 */ 2818 static void qgroup_update_counters(struct btrfs_fs_info *fs_info, 2819 struct list_head *qgroups, u64 nr_old_roots, 2820 u64 nr_new_roots, u64 num_bytes, u64 seq) 2821 { 2822 struct btrfs_qgroup *qg; 2823 2824 list_for_each_entry(qg, qgroups, nested_iterator) { 2825 u64 cur_new_count, cur_old_count; 2826 bool dirty = false; 2827 2828 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2829 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2830 2831 trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count, 2832 cur_new_count); 2833 2834 /* Rfer update part */ 2835 if (cur_old_count == 0 && cur_new_count > 0) { 2836 qg->rfer += num_bytes; 2837 qg->rfer_cmpr += num_bytes; 2838 dirty = true; 2839 } 2840 if (cur_old_count > 0 && cur_new_count == 0) { 2841 qg->rfer -= num_bytes; 2842 qg->rfer_cmpr -= num_bytes; 2843 dirty = true; 2844 } 2845 2846 /* Excl update part */ 2847 /* Exclusive/none -> shared case */ 2848 if (cur_old_count == nr_old_roots && 2849 cur_new_count < nr_new_roots) { 2850 /* Exclusive -> shared */ 2851 if (cur_old_count != 0) { 2852 qg->excl -= num_bytes; 2853 qg->excl_cmpr -= num_bytes; 2854 dirty = true; 2855 } 2856 } 2857 2858 /* Shared -> exclusive/none case */ 2859 if (cur_old_count < nr_old_roots && 2860 cur_new_count == nr_new_roots) { 2861 /* Shared->exclusive */ 2862 if (cur_new_count != 0) { 2863 qg->excl += num_bytes; 2864 qg->excl_cmpr += num_bytes; 2865 dirty = true; 2866 } 2867 } 2868 2869 /* Exclusive/none -> exclusive/none case */ 2870 if (cur_old_count == nr_old_roots && 2871 cur_new_count == nr_new_roots) { 2872 if (cur_old_count == 0) { 2873 /* None -> exclusive/none */ 2874 2875 if (cur_new_count != 0) { 2876 /* None -> exclusive */ 2877 qg->excl += num_bytes; 2878 qg->excl_cmpr += num_bytes; 2879 dirty = true; 2880 } 2881 /* None -> none, nothing changed */ 2882 } else { 2883 /* Exclusive -> exclusive/none */ 2884 2885 if (cur_new_count == 0) { 2886 /* Exclusive -> none */ 2887 qg->excl -= num_bytes; 2888 qg->excl_cmpr -= num_bytes; 2889 dirty = true; 2890 } 2891 /* Exclusive -> exclusive, nothing changed */ 2892 } 2893 } 2894 2895 if (dirty) 2896 qgroup_dirty(fs_info, qg); 2897 } 2898 } 2899 2900 /* 2901 * Check if the @roots potentially is a list of fs tree roots 2902 * 2903 * Return 0 for definitely not a fs/subvol tree roots ulist 2904 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2905 * one as well) 2906 */ 2907 static int maybe_fs_roots(struct ulist *roots) 2908 { 2909 struct ulist_node *unode; 2910 struct ulist_iterator uiter; 2911 2912 /* Empty one, still possible for fs roots */ 2913 if (!roots || roots->nnodes == 0) 2914 return 1; 2915 2916 ULIST_ITER_INIT(&uiter); 2917 unode = ulist_next(roots, &uiter); 2918 if (!unode) 2919 return 1; 2920 2921 /* 2922 * If it contains fs tree roots, then it must belong to fs/subvol 2923 * trees. 2924 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2925 */ 2926 return btrfs_is_fstree(unode->val); 2927 } 2928 2929 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2930 u64 num_bytes, struct ulist *old_roots, 2931 struct ulist *new_roots) 2932 { 2933 struct btrfs_fs_info *fs_info = trans->fs_info; 2934 LIST_HEAD(qgroups); 2935 u64 seq; 2936 u64 nr_new_roots = 0; 2937 u64 nr_old_roots = 0; 2938 int ret = 0; 2939 2940 /* 2941 * If quotas get disabled meanwhile, the resources need to be freed and 2942 * we can't just exit here. 2943 */ 2944 if (!btrfs_qgroup_full_accounting(fs_info) || 2945 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2946 goto out_free; 2947 2948 if (new_roots) { 2949 if (!maybe_fs_roots(new_roots)) 2950 goto out_free; 2951 nr_new_roots = new_roots->nnodes; 2952 } 2953 if (old_roots) { 2954 if (!maybe_fs_roots(old_roots)) 2955 goto out_free; 2956 nr_old_roots = old_roots->nnodes; 2957 } 2958 2959 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2960 if (nr_old_roots == 0 && nr_new_roots == 0) 2961 goto out_free; 2962 2963 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2964 num_bytes, nr_old_roots, nr_new_roots); 2965 2966 mutex_lock(&fs_info->qgroup_rescan_lock); 2967 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2968 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2969 mutex_unlock(&fs_info->qgroup_rescan_lock); 2970 ret = 0; 2971 goto out_free; 2972 } 2973 } 2974 mutex_unlock(&fs_info->qgroup_rescan_lock); 2975 2976 spin_lock(&fs_info->qgroup_lock); 2977 seq = fs_info->qgroup_seq; 2978 2979 /* Update old refcnts using old_roots */ 2980 qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD); 2981 2982 /* Update new refcnts using new_roots */ 2983 qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW); 2984 2985 qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots, 2986 num_bytes, seq); 2987 2988 /* 2989 * We're done using the iterator, release all its qgroups while holding 2990 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup() 2991 * and trigger use-after-free accesses to qgroups. 2992 */ 2993 qgroup_iterator_nested_clean(&qgroups); 2994 2995 /* 2996 * Bump qgroup_seq to avoid seq overlap 2997 */ 2998 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2999 spin_unlock(&fs_info->qgroup_lock); 3000 out_free: 3001 ulist_free(old_roots); 3002 ulist_free(new_roots); 3003 return ret; 3004 } 3005 3006 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 3007 { 3008 struct btrfs_fs_info *fs_info = trans->fs_info; 3009 struct btrfs_qgroup_extent_record *record; 3010 struct btrfs_delayed_ref_root *delayed_refs; 3011 struct ulist *new_roots = NULL; 3012 unsigned long index; 3013 u64 num_dirty_extents = 0; 3014 u64 qgroup_to_skip; 3015 int ret = 0; 3016 3017 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 3018 return 0; 3019 3020 delayed_refs = &trans->transaction->delayed_refs; 3021 qgroup_to_skip = delayed_refs->qgroup_to_skip; 3022 xa_for_each(&delayed_refs->dirty_extents, index, record) { 3023 const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits); 3024 3025 num_dirty_extents++; 3026 trace_btrfs_qgroup_account_extents(fs_info, record, bytenr); 3027 3028 if (!ret && !(fs_info->qgroup_flags & 3029 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { 3030 struct btrfs_backref_walk_ctx ctx = { 0 }; 3031 3032 ctx.bytenr = bytenr; 3033 ctx.fs_info = fs_info; 3034 3035 /* 3036 * Old roots should be searched when inserting qgroup 3037 * extent record. 3038 * 3039 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, 3040 * we may have some record inserted during 3041 * NO_ACCOUNTING (thus no old_roots populated), but 3042 * later we start rescan, which clears NO_ACCOUNTING, 3043 * leaving some inserted records without old_roots 3044 * populated. 3045 * 3046 * Those cases are rare and should not cause too much 3047 * time spent during commit_transaction(). 3048 */ 3049 if (!record->old_roots) { 3050 /* Search commit root to find old_roots */ 3051 ret = btrfs_find_all_roots(&ctx, false); 3052 if (ret < 0) 3053 goto cleanup; 3054 record->old_roots = ctx.roots; 3055 ctx.roots = NULL; 3056 } 3057 3058 /* 3059 * Use BTRFS_SEQ_LAST as time_seq to do special search, 3060 * which doesn't lock tree or delayed_refs and search 3061 * current root. It's safe inside commit_transaction(). 3062 */ 3063 ctx.trans = trans; 3064 ctx.time_seq = BTRFS_SEQ_LAST; 3065 ret = btrfs_find_all_roots(&ctx, false); 3066 if (ret < 0) 3067 goto cleanup; 3068 new_roots = ctx.roots; 3069 if (qgroup_to_skip) { 3070 ulist_del(new_roots, qgroup_to_skip, 0); 3071 ulist_del(record->old_roots, qgroup_to_skip, 3072 0); 3073 } 3074 ret = btrfs_qgroup_account_extent(trans, bytenr, 3075 record->num_bytes, 3076 record->old_roots, 3077 new_roots); 3078 record->old_roots = NULL; 3079 new_roots = NULL; 3080 } 3081 /* Free the reserved data space */ 3082 btrfs_qgroup_free_refroot(fs_info, 3083 record->data_rsv_refroot, 3084 record->data_rsv, 3085 BTRFS_QGROUP_RSV_DATA); 3086 cleanup: 3087 ulist_free(record->old_roots); 3088 ulist_free(new_roots); 3089 new_roots = NULL; 3090 xa_erase(&delayed_refs->dirty_extents, index); 3091 kfree(record); 3092 3093 } 3094 trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents); 3095 return ret; 3096 } 3097 3098 /* 3099 * Writes all changed qgroups to disk. 3100 * Called by the transaction commit path and the qgroup assign ioctl. 3101 */ 3102 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 3103 { 3104 struct btrfs_fs_info *fs_info = trans->fs_info; 3105 int ret = 0; 3106 3107 /* 3108 * In case we are called from the qgroup assign ioctl, assert that we 3109 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota 3110 * disable operation (ioctl) and access a freed quota root. 3111 */ 3112 if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) 3113 lockdep_assert_held(&fs_info->qgroup_ioctl_lock); 3114 3115 if (!fs_info->quota_root) 3116 return ret; 3117 3118 spin_lock(&fs_info->qgroup_lock); 3119 while (!list_empty(&fs_info->dirty_qgroups)) { 3120 struct btrfs_qgroup *qgroup; 3121 qgroup = list_first_entry(&fs_info->dirty_qgroups, 3122 struct btrfs_qgroup, dirty); 3123 list_del_init(&qgroup->dirty); 3124 spin_unlock(&fs_info->qgroup_lock); 3125 ret = update_qgroup_info_item(trans, qgroup); 3126 if (ret) 3127 qgroup_mark_inconsistent(fs_info, 3128 "qgroup info item update error %d", ret); 3129 ret = update_qgroup_limit_item(trans, qgroup); 3130 if (ret) 3131 qgroup_mark_inconsistent(fs_info, 3132 "qgroup limit item update error %d", ret); 3133 spin_lock(&fs_info->qgroup_lock); 3134 } 3135 if (btrfs_qgroup_enabled(fs_info)) 3136 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 3137 else 3138 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 3139 spin_unlock(&fs_info->qgroup_lock); 3140 3141 ret = update_qgroup_status_item(trans); 3142 if (ret) 3143 qgroup_mark_inconsistent(fs_info, 3144 "qgroup status item update error %d", ret); 3145 3146 return ret; 3147 } 3148 3149 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, 3150 struct btrfs_qgroup_inherit *inherit, 3151 size_t size) 3152 { 3153 if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) 3154 return -EOPNOTSUPP; 3155 if (size < sizeof(*inherit) || size > PAGE_SIZE) 3156 return -EINVAL; 3157 3158 /* 3159 * In the past we allowed btrfs_qgroup_inherit to specify to copy 3160 * rfer/excl numbers directly from other qgroups. This behavior has 3161 * been disabled in userspace for a very long time, but here we should 3162 * also disable it in kernel, as this behavior is known to mark qgroup 3163 * inconsistent, and a rescan would wipe out the changes anyway. 3164 * 3165 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies. 3166 */ 3167 if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0) 3168 return -EINVAL; 3169 3170 if (size != struct_size(inherit, qgroups, inherit->num_qgroups)) 3171 return -EINVAL; 3172 3173 /* 3174 * Skip the inherit source qgroups check if qgroup is not enabled. 3175 * Qgroup can still be later enabled causing problems, but in that case 3176 * btrfs_qgroup_inherit() would just ignore those invalid ones. 3177 */ 3178 if (!btrfs_qgroup_enabled(fs_info)) 3179 return 0; 3180 3181 /* 3182 * Now check all the remaining qgroups, they should all: 3183 * 3184 * - Exist 3185 * - Be higher level qgroups. 3186 */ 3187 for (int i = 0; i < inherit->num_qgroups; i++) { 3188 struct btrfs_qgroup *qgroup; 3189 u64 qgroupid = inherit->qgroups[i]; 3190 3191 if (btrfs_qgroup_level(qgroupid) == 0) 3192 return -EINVAL; 3193 3194 spin_lock(&fs_info->qgroup_lock); 3195 qgroup = find_qgroup_rb(fs_info, qgroupid); 3196 if (!qgroup) { 3197 spin_unlock(&fs_info->qgroup_lock); 3198 return -ENOENT; 3199 } 3200 spin_unlock(&fs_info->qgroup_lock); 3201 } 3202 return 0; 3203 } 3204 3205 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, 3206 u64 inode_rootid, 3207 struct btrfs_qgroup_inherit **inherit) 3208 { 3209 int i = 0; 3210 u64 num_qgroups = 0; 3211 struct btrfs_qgroup *inode_qg; 3212 struct btrfs_qgroup_list *qg_list; 3213 struct btrfs_qgroup_inherit *res; 3214 size_t struct_sz; 3215 u64 *qgids; 3216 3217 if (*inherit) 3218 return -EEXIST; 3219 3220 inode_qg = find_qgroup_rb(fs_info, inode_rootid); 3221 if (!inode_qg) 3222 return -ENOENT; 3223 3224 num_qgroups = list_count_nodes(&inode_qg->groups); 3225 3226 if (!num_qgroups) 3227 return 0; 3228 3229 struct_sz = struct_size(res, qgroups, num_qgroups); 3230 if (struct_sz == SIZE_MAX) 3231 return -ERANGE; 3232 3233 res = kzalloc(struct_sz, GFP_NOFS); 3234 if (!res) 3235 return -ENOMEM; 3236 res->num_qgroups = num_qgroups; 3237 qgids = res->qgroups; 3238 3239 list_for_each_entry(qg_list, &inode_qg->groups, next_group) 3240 qgids[i++] = qg_list->group->qgroupid; 3241 3242 *inherit = res; 3243 return 0; 3244 } 3245 3246 /* 3247 * Check if we can skip rescan when inheriting qgroups. If @src has a single 3248 * @parent, and that @parent is owning all its bytes exclusively, we can skip 3249 * the full rescan, by just adding nodesize to the @parent's excl/rfer. 3250 * 3251 * Return <0 for fatal errors (like srcid/parentid has no qgroup). 3252 * Return 0 if a quick inherit is done. 3253 * Return >0 if a quick inherit is not possible, and a full rescan is needed. 3254 */ 3255 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info, 3256 u64 srcid, u64 parentid) 3257 { 3258 struct btrfs_qgroup *src; 3259 struct btrfs_qgroup *parent; 3260 struct btrfs_qgroup *qgroup; 3261 struct btrfs_qgroup_list *list; 3262 LIST_HEAD(qgroup_list); 3263 const u32 nodesize = fs_info->nodesize; 3264 int nr_parents = 0; 3265 3266 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_FULL) 3267 return 0; 3268 3269 src = find_qgroup_rb(fs_info, srcid); 3270 if (!src) 3271 return -ENOENT; 3272 parent = find_qgroup_rb(fs_info, parentid); 3273 if (!parent) 3274 return -ENOENT; 3275 3276 /* 3277 * Source has no parent qgroup, but our new qgroup would have one. 3278 * Qgroup numbers would become inconsistent. 3279 */ 3280 if (list_empty(&src->groups)) 3281 return 1; 3282 3283 list_for_each_entry(list, &src->groups, next_group) { 3284 /* The parent is not the same, quick update is not possible. */ 3285 if (list->group->qgroupid != parentid) 3286 return 1; 3287 nr_parents++; 3288 /* 3289 * More than one parent qgroup, we can't be sure about accounting 3290 * consistency. 3291 */ 3292 if (nr_parents > 1) 3293 return 1; 3294 } 3295 3296 /* 3297 * The parent is not exclusively owning all its bytes. We're not sure 3298 * if the source has any bytes not fully owned by the parent. 3299 */ 3300 if (parent->excl != parent->rfer) 3301 return 1; 3302 3303 qgroup_iterator_add(&qgroup_list, parent); 3304 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3305 qgroup->rfer += nodesize; 3306 qgroup->rfer_cmpr += nodesize; 3307 qgroup->excl += nodesize; 3308 qgroup->excl_cmpr += nodesize; 3309 qgroup_dirty(fs_info, qgroup); 3310 3311 /* Append parent qgroups to @qgroup_list. */ 3312 list_for_each_entry(list, &qgroup->groups, next_group) 3313 qgroup_iterator_add(&qgroup_list, list->group); 3314 } 3315 qgroup_iterator_clean(&qgroup_list); 3316 return 0; 3317 } 3318 3319 /* 3320 * Copy the accounting information between qgroups. This is necessary 3321 * when a snapshot or a subvolume is created. Throwing an error will 3322 * cause a transaction abort so we take extra care here to only error 3323 * when a readonly fs is a reasonable outcome. 3324 */ 3325 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 3326 u64 objectid, u64 inode_rootid, 3327 struct btrfs_qgroup_inherit *inherit) 3328 { 3329 int ret = 0; 3330 u64 *i_qgroups; 3331 bool committing = false; 3332 struct btrfs_fs_info *fs_info = trans->fs_info; 3333 struct btrfs_root *quota_root; 3334 struct btrfs_qgroup *srcgroup; 3335 struct btrfs_qgroup *dstgroup; 3336 struct btrfs_qgroup *prealloc; 3337 struct btrfs_qgroup_list **qlist_prealloc = NULL; 3338 bool free_inherit = false; 3339 bool need_rescan = false; 3340 u32 level_size = 0; 3341 u64 nums; 3342 3343 if (!btrfs_qgroup_enabled(fs_info)) 3344 return 0; 3345 3346 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 3347 if (!prealloc) 3348 return -ENOMEM; 3349 3350 /* 3351 * There are only two callers of this function. 3352 * 3353 * One in create_subvol() in the ioctl context, which needs to hold 3354 * the qgroup_ioctl_lock. 3355 * 3356 * The other one in create_pending_snapshot() where no other qgroup 3357 * code can modify the fs as they all need to either start a new trans 3358 * or hold a trans handler, thus we don't need to hold 3359 * qgroup_ioctl_lock. 3360 * This would avoid long and complex lock chain and make lockdep happy. 3361 */ 3362 spin_lock(&fs_info->trans_lock); 3363 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 3364 committing = true; 3365 spin_unlock(&fs_info->trans_lock); 3366 3367 if (!committing) 3368 mutex_lock(&fs_info->qgroup_ioctl_lock); 3369 3370 quota_root = fs_info->quota_root; 3371 if (!quota_root) { 3372 ret = -EINVAL; 3373 goto out; 3374 } 3375 3376 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) { 3377 ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit); 3378 if (ret) 3379 goto out; 3380 free_inherit = true; 3381 } 3382 3383 if (inherit) { 3384 i_qgroups = (u64 *)(inherit + 1); 3385 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 3386 2 * inherit->num_excl_copies; 3387 for (int i = 0; i < nums; i++) { 3388 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 3389 3390 /* 3391 * Zero out invalid groups so we can ignore 3392 * them later. 3393 */ 3394 if (!srcgroup || 3395 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 3396 *i_qgroups = 0ULL; 3397 3398 ++i_qgroups; 3399 } 3400 } 3401 3402 /* 3403 * create a tracking group for the subvol itself 3404 */ 3405 ret = add_qgroup_item(trans, quota_root, objectid); 3406 if (ret) 3407 goto out; 3408 3409 /* 3410 * add qgroup to all inherited groups 3411 */ 3412 if (inherit) { 3413 i_qgroups = (u64 *)(inherit + 1); 3414 for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) { 3415 if (*i_qgroups == 0) 3416 continue; 3417 ret = add_qgroup_relation_item(trans, objectid, 3418 *i_qgroups); 3419 if (ret && ret != -EEXIST) 3420 goto out; 3421 ret = add_qgroup_relation_item(trans, *i_qgroups, 3422 objectid); 3423 if (ret && ret != -EEXIST) 3424 goto out; 3425 } 3426 ret = 0; 3427 3428 qlist_prealloc = kcalloc(inherit->num_qgroups, 3429 sizeof(struct btrfs_qgroup_list *), 3430 GFP_NOFS); 3431 if (!qlist_prealloc) { 3432 ret = -ENOMEM; 3433 goto out; 3434 } 3435 for (int i = 0; i < inherit->num_qgroups; i++) { 3436 qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list), 3437 GFP_NOFS); 3438 if (!qlist_prealloc[i]) { 3439 ret = -ENOMEM; 3440 goto out; 3441 } 3442 } 3443 } 3444 3445 spin_lock(&fs_info->qgroup_lock); 3446 3447 dstgroup = add_qgroup_rb(fs_info, prealloc, objectid); 3448 prealloc = NULL; 3449 3450 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 3451 dstgroup->lim_flags = inherit->lim.flags; 3452 dstgroup->max_rfer = inherit->lim.max_rfer; 3453 dstgroup->max_excl = inherit->lim.max_excl; 3454 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 3455 dstgroup->rsv_excl = inherit->lim.rsv_excl; 3456 3457 qgroup_dirty(fs_info, dstgroup); 3458 } 3459 3460 if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) { 3461 srcgroup = find_qgroup_rb(fs_info, srcid); 3462 if (!srcgroup) 3463 goto unlock; 3464 3465 /* 3466 * We call inherit after we clone the root in order to make sure 3467 * our counts don't go crazy, so at this point the only 3468 * difference between the two roots should be the root node. 3469 */ 3470 level_size = fs_info->nodesize; 3471 dstgroup->rfer = srcgroup->rfer; 3472 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 3473 dstgroup->excl = level_size; 3474 dstgroup->excl_cmpr = level_size; 3475 srcgroup->excl = level_size; 3476 srcgroup->excl_cmpr = level_size; 3477 3478 /* inherit the limit info */ 3479 dstgroup->lim_flags = srcgroup->lim_flags; 3480 dstgroup->max_rfer = srcgroup->max_rfer; 3481 dstgroup->max_excl = srcgroup->max_excl; 3482 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 3483 dstgroup->rsv_excl = srcgroup->rsv_excl; 3484 3485 qgroup_dirty(fs_info, dstgroup); 3486 qgroup_dirty(fs_info, srcgroup); 3487 3488 /* 3489 * If the source qgroup has parent but the new one doesn't, 3490 * we need a full rescan. 3491 */ 3492 if (!inherit && !list_empty(&srcgroup->groups)) 3493 need_rescan = true; 3494 } 3495 3496 if (!inherit) 3497 goto unlock; 3498 3499 i_qgroups = (u64 *)(inherit + 1); 3500 for (int i = 0; i < inherit->num_qgroups; i++) { 3501 if (*i_qgroups) { 3502 ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid, 3503 *i_qgroups); 3504 qlist_prealloc[i] = NULL; 3505 if (ret) 3506 goto unlock; 3507 } 3508 if (srcid) { 3509 /* Check if we can do a quick inherit. */ 3510 ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups); 3511 if (ret < 0) 3512 goto unlock; 3513 if (ret > 0) 3514 need_rescan = true; 3515 ret = 0; 3516 } 3517 ++i_qgroups; 3518 } 3519 3520 for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) { 3521 struct btrfs_qgroup *src; 3522 struct btrfs_qgroup *dst; 3523 3524 if (!i_qgroups[0] || !i_qgroups[1]) 3525 continue; 3526 3527 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3528 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3529 3530 if (!src || !dst) { 3531 ret = -EINVAL; 3532 goto unlock; 3533 } 3534 3535 dst->rfer = src->rfer - level_size; 3536 dst->rfer_cmpr = src->rfer_cmpr - level_size; 3537 3538 /* Manually tweaking numbers certainly needs a rescan */ 3539 need_rescan = true; 3540 } 3541 for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) { 3542 struct btrfs_qgroup *src; 3543 struct btrfs_qgroup *dst; 3544 3545 if (!i_qgroups[0] || !i_qgroups[1]) 3546 continue; 3547 3548 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3549 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3550 3551 if (!src || !dst) { 3552 ret = -EINVAL; 3553 goto unlock; 3554 } 3555 3556 dst->excl = src->excl + level_size; 3557 dst->excl_cmpr = src->excl_cmpr + level_size; 3558 need_rescan = true; 3559 } 3560 3561 unlock: 3562 spin_unlock(&fs_info->qgroup_lock); 3563 if (!ret) 3564 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 3565 out: 3566 if (!committing) 3567 mutex_unlock(&fs_info->qgroup_ioctl_lock); 3568 if (need_rescan) 3569 qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan"); 3570 if (qlist_prealloc) { 3571 for (int i = 0; i < inherit->num_qgroups; i++) 3572 kfree(qlist_prealloc[i]); 3573 kfree(qlist_prealloc); 3574 } 3575 if (free_inherit) 3576 kfree(inherit); 3577 kfree(prealloc); 3578 return ret; 3579 } 3580 3581 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 3582 { 3583 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 3584 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 3585 return false; 3586 3587 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 3588 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 3589 return false; 3590 3591 return true; 3592 } 3593 3594 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 3595 enum btrfs_qgroup_rsv_type type) 3596 { 3597 struct btrfs_qgroup *qgroup; 3598 struct btrfs_fs_info *fs_info = root->fs_info; 3599 u64 ref_root = btrfs_root_id(root); 3600 int ret = 0; 3601 LIST_HEAD(qgroup_list); 3602 3603 if (!btrfs_is_fstree(ref_root)) 3604 return 0; 3605 3606 if (num_bytes == 0) 3607 return 0; 3608 3609 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 3610 capable(CAP_SYS_RESOURCE)) 3611 enforce = false; 3612 3613 spin_lock(&fs_info->qgroup_lock); 3614 if (!fs_info->quota_root) 3615 goto out; 3616 3617 qgroup = find_qgroup_rb(fs_info, ref_root); 3618 if (!qgroup) 3619 goto out; 3620 3621 qgroup_iterator_add(&qgroup_list, qgroup); 3622 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3623 struct btrfs_qgroup_list *glist; 3624 3625 if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { 3626 ret = -EDQUOT; 3627 goto out; 3628 } 3629 3630 list_for_each_entry(glist, &qgroup->groups, next_group) 3631 qgroup_iterator_add(&qgroup_list, glist->group); 3632 } 3633 3634 ret = 0; 3635 /* 3636 * no limits exceeded, now record the reservation into all qgroups 3637 */ 3638 list_for_each_entry(qgroup, &qgroup_list, iterator) 3639 qgroup_rsv_add(fs_info, qgroup, num_bytes, type); 3640 3641 out: 3642 qgroup_iterator_clean(&qgroup_list); 3643 spin_unlock(&fs_info->qgroup_lock); 3644 return ret; 3645 } 3646 3647 /* 3648 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3649 * qgroup). 3650 * 3651 * Will handle all higher level qgroup too. 3652 * 3653 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3654 * This special case is only used for META_PERTRANS type. 3655 */ 3656 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3657 u64 ref_root, u64 num_bytes, 3658 enum btrfs_qgroup_rsv_type type) 3659 { 3660 struct btrfs_qgroup *qgroup; 3661 LIST_HEAD(qgroup_list); 3662 3663 if (!btrfs_is_fstree(ref_root)) 3664 return; 3665 3666 if (num_bytes == 0) 3667 return; 3668 3669 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3670 WARN(1, "%s: Invalid type to free", __func__); 3671 return; 3672 } 3673 spin_lock(&fs_info->qgroup_lock); 3674 3675 if (!fs_info->quota_root) 3676 goto out; 3677 3678 qgroup = find_qgroup_rb(fs_info, ref_root); 3679 if (!qgroup) 3680 goto out; 3681 3682 if (num_bytes == (u64)-1) 3683 /* 3684 * We're freeing all pertrans rsv, get reserved value from 3685 * level 0 qgroup as real num_bytes to free. 3686 */ 3687 num_bytes = qgroup->rsv.values[type]; 3688 3689 qgroup_iterator_add(&qgroup_list, qgroup); 3690 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3691 struct btrfs_qgroup_list *glist; 3692 3693 qgroup_rsv_release(fs_info, qgroup, num_bytes, type); 3694 list_for_each_entry(glist, &qgroup->groups, next_group) { 3695 qgroup_iterator_add(&qgroup_list, glist->group); 3696 } 3697 } 3698 out: 3699 qgroup_iterator_clean(&qgroup_list); 3700 spin_unlock(&fs_info->qgroup_lock); 3701 } 3702 3703 /* 3704 * Check if the leaf is the last leaf. Which means all node pointers 3705 * are at their last position. 3706 */ 3707 static bool is_last_leaf(struct btrfs_path *path) 3708 { 3709 int i; 3710 3711 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3712 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3713 return false; 3714 } 3715 return true; 3716 } 3717 3718 /* 3719 * returns < 0 on error, 0 when more leafs are to be scanned. 3720 * returns 1 when done. 3721 */ 3722 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3723 struct btrfs_path *path) 3724 { 3725 struct btrfs_fs_info *fs_info = trans->fs_info; 3726 struct btrfs_root *extent_root; 3727 struct btrfs_key found; 3728 struct extent_buffer *scratch_leaf = NULL; 3729 u64 num_bytes; 3730 bool done; 3731 int slot; 3732 int ret; 3733 3734 if (!btrfs_qgroup_full_accounting(fs_info)) 3735 return 1; 3736 3737 mutex_lock(&fs_info->qgroup_rescan_lock); 3738 extent_root = btrfs_extent_root(fs_info, 3739 fs_info->qgroup_rescan_progress.objectid); 3740 ret = btrfs_search_slot_for_read(extent_root, 3741 &fs_info->qgroup_rescan_progress, 3742 path, 1, 0); 3743 3744 btrfs_debug(fs_info, 3745 "current progress key " BTRFS_KEY_FMT ", search_slot ret %d", 3746 BTRFS_KEY_FMT_VALUE(&fs_info->qgroup_rescan_progress), ret); 3747 3748 if (ret) { 3749 /* 3750 * The rescan is about to end, we will not be scanning any 3751 * further blocks. We cannot unset the RESCAN flag here, because 3752 * we want to commit the transaction if everything went well. 3753 * To make the live accounting work in this phase, we set our 3754 * scan progress pointer such that every real extent objectid 3755 * will be smaller. 3756 */ 3757 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3758 btrfs_release_path(path); 3759 mutex_unlock(&fs_info->qgroup_rescan_lock); 3760 return ret; 3761 } 3762 done = is_last_leaf(path); 3763 3764 btrfs_item_key_to_cpu(path->nodes[0], &found, 3765 btrfs_header_nritems(path->nodes[0]) - 1); 3766 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3767 3768 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3769 if (!scratch_leaf) { 3770 ret = -ENOMEM; 3771 mutex_unlock(&fs_info->qgroup_rescan_lock); 3772 goto out; 3773 } 3774 slot = path->slots[0]; 3775 btrfs_release_path(path); 3776 mutex_unlock(&fs_info->qgroup_rescan_lock); 3777 3778 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3779 struct btrfs_backref_walk_ctx ctx = { 0 }; 3780 3781 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3782 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3783 found.type != BTRFS_METADATA_ITEM_KEY) 3784 continue; 3785 if (found.type == BTRFS_METADATA_ITEM_KEY) 3786 num_bytes = fs_info->nodesize; 3787 else 3788 num_bytes = found.offset; 3789 3790 ctx.bytenr = found.objectid; 3791 ctx.fs_info = fs_info; 3792 3793 ret = btrfs_find_all_roots(&ctx, false); 3794 if (ret < 0) 3795 goto out; 3796 /* For rescan, just pass old_roots as NULL */ 3797 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3798 num_bytes, NULL, ctx.roots); 3799 if (ret < 0) 3800 goto out; 3801 } 3802 out: 3803 if (scratch_leaf) 3804 free_extent_buffer(scratch_leaf); 3805 3806 if (done && !ret) { 3807 ret = 1; 3808 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3809 } 3810 return ret; 3811 } 3812 3813 static bool rescan_should_stop(struct btrfs_fs_info *fs_info) 3814 { 3815 if (btrfs_fs_closing(fs_info)) 3816 return true; 3817 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) 3818 return true; 3819 if (!btrfs_qgroup_enabled(fs_info)) 3820 return true; 3821 if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3822 return true; 3823 return false; 3824 } 3825 3826 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3827 { 3828 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3829 qgroup_rescan_work); 3830 struct btrfs_path *path; 3831 struct btrfs_trans_handle *trans = NULL; 3832 int ret = 0; 3833 bool stopped = false; 3834 bool did_leaf_rescans = false; 3835 3836 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 3837 return; 3838 3839 path = btrfs_alloc_path(); 3840 if (!path) { 3841 ret = -ENOMEM; 3842 goto out; 3843 } 3844 /* 3845 * Rescan should only search for commit root, and any later difference 3846 * should be recorded by qgroup 3847 */ 3848 path->search_commit_root = true; 3849 path->skip_locking = true; 3850 3851 while (!ret && !(stopped = rescan_should_stop(fs_info))) { 3852 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3853 if (IS_ERR(trans)) { 3854 ret = PTR_ERR(trans); 3855 break; 3856 } 3857 3858 ret = qgroup_rescan_leaf(trans, path); 3859 did_leaf_rescans = true; 3860 3861 if (ret > 0) 3862 btrfs_commit_transaction(trans); 3863 else 3864 btrfs_end_transaction(trans); 3865 } 3866 3867 out: 3868 btrfs_free_path(path); 3869 3870 mutex_lock(&fs_info->qgroup_rescan_lock); 3871 if (ret > 0 && 3872 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3873 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3874 } else if (ret < 0 || stopped) { 3875 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3876 } 3877 mutex_unlock(&fs_info->qgroup_rescan_lock); 3878 3879 /* 3880 * Only update status, since the previous part has already updated the 3881 * qgroup info, and only if we did any actual work. This also prevents 3882 * race with a concurrent quota disable, which has already set 3883 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at 3884 * btrfs_quota_disable(). 3885 */ 3886 if (did_leaf_rescans) { 3887 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3888 if (IS_ERR(trans)) { 3889 ret = PTR_ERR(trans); 3890 trans = NULL; 3891 btrfs_err(fs_info, 3892 "fail to start transaction for status update: %d", 3893 ret); 3894 } 3895 } else { 3896 trans = NULL; 3897 } 3898 3899 mutex_lock(&fs_info->qgroup_rescan_lock); 3900 if (!stopped || 3901 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3902 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3903 if (trans) { 3904 int ret2 = update_qgroup_status_item(trans); 3905 3906 if (ret2 < 0) { 3907 ret = ret2; 3908 btrfs_err(fs_info, "fail to update qgroup status: %d", ret); 3909 } 3910 } 3911 fs_info->qgroup_rescan_running = false; 3912 fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; 3913 complete_all(&fs_info->qgroup_rescan_completion); 3914 mutex_unlock(&fs_info->qgroup_rescan_lock); 3915 3916 if (!trans) 3917 return; 3918 3919 btrfs_end_transaction(trans); 3920 3921 if (stopped) { 3922 btrfs_info(fs_info, "qgroup scan paused"); 3923 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { 3924 btrfs_info(fs_info, "qgroup scan cancelled"); 3925 } else if (ret >= 0) { 3926 btrfs_info(fs_info, "qgroup scan completed%s", 3927 ret > 0 ? " (inconsistency flag cleared)" : ""); 3928 } else { 3929 btrfs_err(fs_info, "qgroup scan failed with %d", ret); 3930 } 3931 } 3932 3933 /* 3934 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3935 * memory required for the rescan context. 3936 */ 3937 static int 3938 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3939 int init_flags) 3940 { 3941 int ret = 0; 3942 3943 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { 3944 btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode"); 3945 return -EINVAL; 3946 } 3947 3948 if (!init_flags) { 3949 /* we're resuming qgroup rescan at mount time */ 3950 if (!(fs_info->qgroup_flags & 3951 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3952 btrfs_debug(fs_info, 3953 "qgroup rescan init failed, qgroup rescan is not queued"); 3954 ret = -EINVAL; 3955 } else if (!(fs_info->qgroup_flags & 3956 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3957 btrfs_debug(fs_info, 3958 "qgroup rescan init failed, qgroup is not enabled"); 3959 ret = -ENOTCONN; 3960 } 3961 3962 if (ret) 3963 return ret; 3964 } 3965 3966 mutex_lock(&fs_info->qgroup_rescan_lock); 3967 3968 if (init_flags) { 3969 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3970 ret = -EINPROGRESS; 3971 } else if (!(fs_info->qgroup_flags & 3972 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3973 btrfs_debug(fs_info, 3974 "qgroup rescan init failed, qgroup is not enabled"); 3975 ret = -ENOTCONN; 3976 } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 3977 /* Quota disable is in progress */ 3978 ret = -EBUSY; 3979 } 3980 3981 if (ret) { 3982 mutex_unlock(&fs_info->qgroup_rescan_lock); 3983 return ret; 3984 } 3985 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3986 } 3987 3988 memset(&fs_info->qgroup_rescan_progress, 0, 3989 sizeof(fs_info->qgroup_rescan_progress)); 3990 fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 3991 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 3992 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3993 init_completion(&fs_info->qgroup_rescan_completion); 3994 mutex_unlock(&fs_info->qgroup_rescan_lock); 3995 3996 btrfs_init_work(&fs_info->qgroup_rescan_work, 3997 btrfs_qgroup_rescan_worker, NULL); 3998 return 0; 3999 } 4000 4001 static void 4002 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 4003 { 4004 struct rb_node *n; 4005 struct btrfs_qgroup *qgroup; 4006 4007 spin_lock(&fs_info->qgroup_lock); 4008 /* clear all current qgroup tracking information */ 4009 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 4010 qgroup = rb_entry(n, struct btrfs_qgroup, node); 4011 qgroup->rfer = 0; 4012 qgroup->rfer_cmpr = 0; 4013 qgroup->excl = 0; 4014 qgroup->excl_cmpr = 0; 4015 qgroup_dirty(fs_info, qgroup); 4016 } 4017 spin_unlock(&fs_info->qgroup_lock); 4018 } 4019 4020 int 4021 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 4022 { 4023 int ret = 0; 4024 4025 ret = qgroup_rescan_init(fs_info, 0, 1); 4026 if (ret) 4027 return ret; 4028 4029 /* 4030 * We have set the rescan_progress to 0, which means no more 4031 * delayed refs will be accounted by btrfs_qgroup_account_ref. 4032 * However, btrfs_qgroup_account_ref may be right after its call 4033 * to btrfs_find_all_roots, in which case it would still do the 4034 * accounting. 4035 * To solve this, we're committing the transaction, which will 4036 * ensure we run all delayed refs and only after that, we are 4037 * going to clear all tracking information for a clean start. 4038 */ 4039 4040 ret = btrfs_commit_current_transaction(fs_info->fs_root); 4041 if (ret) { 4042 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 4043 return ret; 4044 } 4045 4046 qgroup_rescan_zero_tracking(fs_info); 4047 4048 mutex_lock(&fs_info->qgroup_rescan_lock); 4049 /* 4050 * The rescan worker is only for full accounting qgroups, check if it's 4051 * enabled as it is pointless to queue it otherwise. A concurrent quota 4052 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED. 4053 */ 4054 if (btrfs_qgroup_full_accounting(fs_info)) { 4055 fs_info->qgroup_rescan_running = true; 4056 btrfs_queue_work(fs_info->qgroup_rescan_workers, 4057 &fs_info->qgroup_rescan_work); 4058 } else { 4059 ret = -ENOTCONN; 4060 } 4061 mutex_unlock(&fs_info->qgroup_rescan_lock); 4062 4063 return ret; 4064 } 4065 4066 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 4067 bool interruptible) 4068 { 4069 int running; 4070 int ret = 0; 4071 4072 mutex_lock(&fs_info->qgroup_rescan_lock); 4073 running = fs_info->qgroup_rescan_running; 4074 mutex_unlock(&fs_info->qgroup_rescan_lock); 4075 4076 if (!running) 4077 return 0; 4078 4079 if (interruptible) 4080 ret = wait_for_completion_interruptible( 4081 &fs_info->qgroup_rescan_completion); 4082 else 4083 wait_for_completion(&fs_info->qgroup_rescan_completion); 4084 4085 return ret; 4086 } 4087 4088 /* 4089 * this is only called from open_ctree where we're still single threaded, thus 4090 * locking is omitted here. 4091 */ 4092 void 4093 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 4094 { 4095 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 4096 mutex_lock(&fs_info->qgroup_rescan_lock); 4097 fs_info->qgroup_rescan_running = true; 4098 btrfs_queue_work(fs_info->qgroup_rescan_workers, 4099 &fs_info->qgroup_rescan_work); 4100 mutex_unlock(&fs_info->qgroup_rescan_lock); 4101 } 4102 } 4103 4104 #define rbtree_iterate_from_safe(node, next, start) \ 4105 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 4106 4107 static int qgroup_unreserve_range(struct btrfs_inode *inode, 4108 struct extent_changeset *reserved, u64 start, 4109 u64 len) 4110 { 4111 struct rb_node *node; 4112 struct rb_node *next; 4113 struct ulist_node *entry; 4114 int ret = 0; 4115 4116 node = reserved->range_changed.root.rb_node; 4117 if (!node) 4118 return 0; 4119 while (node) { 4120 entry = rb_entry(node, struct ulist_node, rb_node); 4121 if (entry->val < start) 4122 node = node->rb_right; 4123 else 4124 node = node->rb_left; 4125 } 4126 4127 if (entry->val > start && rb_prev(&entry->rb_node)) 4128 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 4129 rb_node); 4130 4131 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 4132 u64 entry_start; 4133 u64 entry_end; 4134 u64 entry_len; 4135 int clear_ret; 4136 4137 entry = rb_entry(node, struct ulist_node, rb_node); 4138 entry_start = entry->val; 4139 entry_end = entry->aux; 4140 entry_len = entry_end - entry_start + 1; 4141 4142 if (entry_start >= start + len) 4143 break; 4144 if (entry_start + entry_len <= start) 4145 continue; 4146 /* 4147 * Now the entry is in [start, start + len), revert the 4148 * EXTENT_QGROUP_RESERVED bit. 4149 */ 4150 clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end, 4151 EXTENT_QGROUP_RESERVED, NULL); 4152 if (!ret && clear_ret < 0) 4153 ret = clear_ret; 4154 4155 ulist_del(&reserved->range_changed, entry->val, entry->aux); 4156 if (likely(reserved->bytes_changed >= entry_len)) { 4157 reserved->bytes_changed -= entry_len; 4158 } else { 4159 WARN_ON(1); 4160 reserved->bytes_changed = 0; 4161 } 4162 } 4163 4164 return ret; 4165 } 4166 4167 /* 4168 * Try to free some space for qgroup. 4169 * 4170 * For qgroup, there are only 3 ways to free qgroup space: 4171 * - Flush nodatacow write 4172 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 4173 * In theory, we should only flush nodatacow inodes, but it's not yet 4174 * possible, so we need to flush the whole root. 4175 * 4176 * - Wait for ordered extents 4177 * When ordered extents are finished, their reserved metadata is finally 4178 * converted to per_trans status, which can be freed by later commit 4179 * transaction. 4180 * 4181 * - Commit transaction 4182 * This would free the meta_per_trans space. 4183 * In theory this shouldn't provide much space, but any more qgroup space 4184 * is needed. 4185 */ 4186 static int try_flush_qgroup(struct btrfs_root *root) 4187 { 4188 int ret; 4189 4190 /* Can't hold an open transaction or we run the risk of deadlocking. */ 4191 ASSERT(current->journal_info == NULL); 4192 if (WARN_ON(current->journal_info)) 4193 return 0; 4194 4195 /* 4196 * We don't want to run flush again and again, so if there is a running 4197 * one, we won't try to start a new flush, but exit directly. 4198 */ 4199 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 4200 wait_event(root->qgroup_flush_wait, 4201 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 4202 return 0; 4203 } 4204 4205 ret = btrfs_start_delalloc_snapshot(root, true); 4206 if (ret < 0) 4207 goto out; 4208 btrfs_wait_ordered_extents(root, U64_MAX, NULL); 4209 4210 /* 4211 * After waiting for ordered extents run delayed iputs in order to free 4212 * space from unlinked files before committing the current transaction, 4213 * as ordered extents may have been holding the last reference of an 4214 * inode and they add a delayed iput when they complete. 4215 */ 4216 btrfs_run_delayed_iputs(root->fs_info); 4217 btrfs_wait_on_delayed_iputs(root->fs_info); 4218 4219 ret = btrfs_commit_current_transaction(root); 4220 out: 4221 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 4222 wake_up(&root->qgroup_flush_wait); 4223 return ret; 4224 } 4225 4226 static int qgroup_reserve_data(struct btrfs_inode *inode, 4227 struct extent_changeset **reserved_ret, u64 start, 4228 u64 len) 4229 { 4230 struct btrfs_root *root = inode->root; 4231 struct extent_changeset *reserved; 4232 bool new_reserved = false; 4233 u64 orig_reserved; 4234 u64 to_reserve; 4235 int ret; 4236 4237 if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4238 !btrfs_is_fstree(btrfs_root_id(root)) || len == 0) 4239 return 0; 4240 4241 /* @reserved parameter is mandatory for qgroup */ 4242 if (WARN_ON(!reserved_ret)) 4243 return -EINVAL; 4244 if (!*reserved_ret) { 4245 new_reserved = true; 4246 *reserved_ret = extent_changeset_alloc(); 4247 if (!*reserved_ret) 4248 return -ENOMEM; 4249 } 4250 reserved = *reserved_ret; 4251 /* Record already reserved space */ 4252 orig_reserved = reserved->bytes_changed; 4253 ret = btrfs_set_record_extent_bits(&inode->io_tree, start, 4254 start + len - 1, EXTENT_QGROUP_RESERVED, 4255 reserved); 4256 4257 /* Newly reserved space */ 4258 to_reserve = reserved->bytes_changed - orig_reserved; 4259 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 4260 to_reserve, QGROUP_RESERVE); 4261 if (ret < 0) 4262 goto out; 4263 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 4264 if (ret < 0) 4265 goto cleanup; 4266 4267 return ret; 4268 4269 cleanup: 4270 qgroup_unreserve_range(inode, reserved, start, len); 4271 out: 4272 if (new_reserved) { 4273 extent_changeset_free(reserved); 4274 *reserved_ret = NULL; 4275 } 4276 return ret; 4277 } 4278 4279 /* 4280 * Reserve qgroup space for range [start, start + len). 4281 * 4282 * This function will either reserve space from related qgroups or do nothing 4283 * if the range is already reserved. 4284 * 4285 * Return 0 for successful reservation 4286 * Return <0 for error (including -EQUOT) 4287 * 4288 * NOTE: This function may sleep for memory allocation, dirty page flushing and 4289 * commit transaction. So caller should not hold any dirty page locked. 4290 */ 4291 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 4292 struct extent_changeset **reserved_ret, u64 start, 4293 u64 len) 4294 { 4295 int ret; 4296 4297 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 4298 if (ret <= 0 && ret != -EDQUOT) 4299 return ret; 4300 4301 ret = try_flush_qgroup(inode->root); 4302 if (ret < 0) 4303 return ret; 4304 return qgroup_reserve_data(inode, reserved_ret, start, len); 4305 } 4306 4307 /* Free ranges specified by @reserved, normally in error path */ 4308 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 4309 struct extent_changeset *reserved, 4310 u64 start, u64 len, u64 *freed_ret) 4311 { 4312 struct btrfs_root *root = inode->root; 4313 struct ulist_node *unode; 4314 struct ulist_iterator uiter; 4315 struct extent_changeset changeset; 4316 u64 freed = 0; 4317 int ret; 4318 4319 extent_changeset_init(&changeset); 4320 len = round_up(start + len, root->fs_info->sectorsize); 4321 start = round_down(start, root->fs_info->sectorsize); 4322 4323 ULIST_ITER_INIT(&uiter); 4324 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 4325 u64 range_start = unode->val; 4326 /* unode->aux is the inclusive end */ 4327 u64 range_len = unode->aux - range_start + 1; 4328 u64 free_start; 4329 u64 free_len; 4330 4331 extent_changeset_release(&changeset); 4332 4333 /* Only free range in range [start, start + len) */ 4334 if (range_start >= start + len || 4335 range_start + range_len <= start) 4336 continue; 4337 free_start = max(range_start, start); 4338 free_len = min(start + len, range_start + range_len) - 4339 free_start; 4340 /* 4341 * TODO: To also modify reserved->ranges_reserved to reflect 4342 * the modification. 4343 * 4344 * However as long as we free qgroup reserved according to 4345 * EXTENT_QGROUP_RESERVED, we won't double free. 4346 * So not need to rush. 4347 */ 4348 ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start, 4349 free_start + free_len - 1, 4350 EXTENT_QGROUP_RESERVED, 4351 &changeset); 4352 if (ret < 0) 4353 goto out; 4354 freed += changeset.bytes_changed; 4355 } 4356 btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed, 4357 BTRFS_QGROUP_RSV_DATA); 4358 if (freed_ret) 4359 *freed_ret = freed; 4360 ret = 0; 4361 out: 4362 extent_changeset_release(&changeset); 4363 return ret; 4364 } 4365 4366 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 4367 struct extent_changeset *reserved, u64 start, u64 len, 4368 u64 *released, int free) 4369 { 4370 struct extent_changeset changeset; 4371 int trace_op = QGROUP_RELEASE; 4372 int ret; 4373 4374 if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 4375 return btrfs_clear_record_extent_bits(&inode->io_tree, start, 4376 start + len - 1, 4377 EXTENT_QGROUP_RESERVED, NULL); 4378 } 4379 4380 /* In release case, we shouldn't have @reserved */ 4381 WARN_ON(!free && reserved); 4382 if (free && reserved) 4383 return qgroup_free_reserved_data(inode, reserved, start, len, released); 4384 extent_changeset_init(&changeset); 4385 ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1, 4386 EXTENT_QGROUP_RESERVED, &changeset); 4387 if (ret < 0) 4388 goto out; 4389 4390 if (free) 4391 trace_op = QGROUP_FREE; 4392 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 4393 changeset.bytes_changed, trace_op); 4394 if (free) 4395 btrfs_qgroup_free_refroot(inode->root->fs_info, 4396 btrfs_root_id(inode->root), 4397 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4398 if (released) 4399 *released = changeset.bytes_changed; 4400 out: 4401 extent_changeset_release(&changeset); 4402 return ret; 4403 } 4404 4405 /* 4406 * Free a reserved space range from io_tree and related qgroups 4407 * 4408 * Should be called when a range of pages get invalidated before reaching disk. 4409 * Or for error cleanup case. 4410 * if @reserved is given, only reserved range in [@start, @start + @len) will 4411 * be freed. 4412 * 4413 * For data written to disk, use btrfs_qgroup_release_data(). 4414 * 4415 * NOTE: This function may sleep for memory allocation. 4416 */ 4417 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 4418 struct extent_changeset *reserved, 4419 u64 start, u64 len, u64 *freed) 4420 { 4421 return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1); 4422 } 4423 4424 /* 4425 * Release a reserved space range from io_tree only. 4426 * 4427 * Should be called when a range of pages get written to disk and corresponding 4428 * FILE_EXTENT is inserted into corresponding root. 4429 * 4430 * Since new qgroup accounting framework will only update qgroup numbers at 4431 * commit_transaction() time, its reserved space shouldn't be freed from 4432 * related qgroups. 4433 * 4434 * But we should release the range from io_tree, to allow further write to be 4435 * COWed. 4436 * 4437 * NOTE: This function may sleep for memory allocation. 4438 */ 4439 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released) 4440 { 4441 return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0); 4442 } 4443 4444 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4445 enum btrfs_qgroup_rsv_type type) 4446 { 4447 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4448 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4449 return; 4450 if (num_bytes == 0) 4451 return; 4452 4453 spin_lock(&root->qgroup_meta_rsv_lock); 4454 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 4455 root->qgroup_meta_rsv_prealloc += num_bytes; 4456 else 4457 root->qgroup_meta_rsv_pertrans += num_bytes; 4458 spin_unlock(&root->qgroup_meta_rsv_lock); 4459 } 4460 4461 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4462 enum btrfs_qgroup_rsv_type type) 4463 { 4464 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4465 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4466 return 0; 4467 if (num_bytes == 0) 4468 return 0; 4469 4470 spin_lock(&root->qgroup_meta_rsv_lock); 4471 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 4472 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 4473 num_bytes); 4474 root->qgroup_meta_rsv_prealloc -= num_bytes; 4475 } else { 4476 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 4477 num_bytes); 4478 root->qgroup_meta_rsv_pertrans -= num_bytes; 4479 } 4480 spin_unlock(&root->qgroup_meta_rsv_lock); 4481 return num_bytes; 4482 } 4483 4484 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4485 enum btrfs_qgroup_rsv_type type, bool enforce) 4486 { 4487 struct btrfs_fs_info *fs_info = root->fs_info; 4488 int ret; 4489 4490 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4491 !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0) 4492 return 0; 4493 4494 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4495 trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type); 4496 ret = qgroup_reserve(root, num_bytes, enforce, type); 4497 if (ret < 0) 4498 return ret; 4499 /* 4500 * Record what we have reserved into root. 4501 * 4502 * To avoid quota disabled->enabled underflow. 4503 * In that case, we may try to free space we haven't reserved 4504 * (since quota was disabled), so record what we reserved into root. 4505 * And ensure later release won't underflow this number. 4506 */ 4507 add_root_meta_rsv(root, num_bytes, type); 4508 return ret; 4509 } 4510 4511 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4512 enum btrfs_qgroup_rsv_type type, bool enforce, 4513 bool noflush) 4514 { 4515 int ret; 4516 4517 ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4518 if ((ret <= 0 && ret != -EDQUOT) || noflush) 4519 return ret; 4520 4521 ret = try_flush_qgroup(root); 4522 if (ret < 0) 4523 return ret; 4524 return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4525 } 4526 4527 /* 4528 * Per-transaction meta reservation should be all freed at transaction commit 4529 * time 4530 */ 4531 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 4532 { 4533 struct btrfs_fs_info *fs_info = root->fs_info; 4534 4535 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4536 !btrfs_is_fstree(btrfs_root_id(root))) 4537 return; 4538 4539 /* TODO: Update trace point to handle such free */ 4540 trace_btrfs_qgroup_meta_free_all_pertrans(root); 4541 /* Special value -1 means to free all reserved space */ 4542 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1, 4543 BTRFS_QGROUP_RSV_META_PERTRANS); 4544 } 4545 4546 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 4547 enum btrfs_qgroup_rsv_type type) 4548 { 4549 struct btrfs_fs_info *fs_info = root->fs_info; 4550 4551 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4552 !btrfs_is_fstree(btrfs_root_id(root))) 4553 return; 4554 4555 /* 4556 * reservation for META_PREALLOC can happen before quota is enabled, 4557 * which can lead to underflow. 4558 * Here ensure we will only free what we really have reserved. 4559 */ 4560 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 4561 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4562 trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type); 4563 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type); 4564 } 4565 4566 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 4567 int num_bytes) 4568 { 4569 struct btrfs_qgroup *qgroup; 4570 LIST_HEAD(qgroup_list); 4571 4572 if (num_bytes == 0) 4573 return; 4574 if (!fs_info->quota_root) 4575 return; 4576 4577 spin_lock(&fs_info->qgroup_lock); 4578 qgroup = find_qgroup_rb(fs_info, ref_root); 4579 if (!qgroup) 4580 goto out; 4581 4582 qgroup_iterator_add(&qgroup_list, qgroup); 4583 list_for_each_entry(qgroup, &qgroup_list, iterator) { 4584 struct btrfs_qgroup_list *glist; 4585 4586 qgroup_rsv_release(fs_info, qgroup, num_bytes, 4587 BTRFS_QGROUP_RSV_META_PREALLOC); 4588 if (!sb_rdonly(fs_info->sb)) 4589 qgroup_rsv_add(fs_info, qgroup, num_bytes, 4590 BTRFS_QGROUP_RSV_META_PERTRANS); 4591 4592 list_for_each_entry(glist, &qgroup->groups, next_group) 4593 qgroup_iterator_add(&qgroup_list, glist->group); 4594 } 4595 out: 4596 qgroup_iterator_clean(&qgroup_list); 4597 spin_unlock(&fs_info->qgroup_lock); 4598 } 4599 4600 /* 4601 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS. 4602 * 4603 * This is called when preallocated meta reservation needs to be used. 4604 * Normally after btrfs_join_transaction() call. 4605 */ 4606 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 4607 { 4608 struct btrfs_fs_info *fs_info = root->fs_info; 4609 4610 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4611 !btrfs_is_fstree(btrfs_root_id(root))) 4612 return; 4613 /* Same as btrfs_qgroup_free_meta_prealloc() */ 4614 num_bytes = sub_root_meta_rsv(root, num_bytes, 4615 BTRFS_QGROUP_RSV_META_PREALLOC); 4616 trace_btrfs_qgroup_meta_convert(root, num_bytes); 4617 qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes); 4618 if (!sb_rdonly(fs_info->sb)) 4619 add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS); 4620 } 4621 4622 /* 4623 * Check qgroup reserved space leaking, normally at destroy inode 4624 * time 4625 */ 4626 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 4627 { 4628 struct extent_changeset changeset; 4629 struct ulist_node *unode; 4630 struct ulist_iterator iter; 4631 int ret; 4632 4633 extent_changeset_init(&changeset); 4634 ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 4635 EXTENT_QGROUP_RESERVED, &changeset); 4636 4637 WARN_ON(ret < 0); 4638 if (WARN_ON(changeset.bytes_changed)) { 4639 ULIST_ITER_INIT(&iter); 4640 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 4641 btrfs_warn(inode->root->fs_info, 4642 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 4643 btrfs_ino(inode), unode->val, unode->aux); 4644 } 4645 btrfs_qgroup_free_refroot(inode->root->fs_info, 4646 btrfs_root_id(inode->root), 4647 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4648 4649 } 4650 extent_changeset_release(&changeset); 4651 } 4652 4653 void btrfs_qgroup_init_swapped_blocks( 4654 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 4655 { 4656 int i; 4657 4658 spin_lock_init(&swapped_blocks->lock); 4659 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 4660 swapped_blocks->blocks[i] = RB_ROOT; 4661 swapped_blocks->swapped = false; 4662 } 4663 4664 /* 4665 * Delete all swapped blocks record of @root. 4666 * Every record here means we skipped a full subtree scan for qgroup. 4667 * 4668 * Gets called when committing one transaction. 4669 */ 4670 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 4671 { 4672 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 4673 int i; 4674 4675 swapped_blocks = &root->swapped_blocks; 4676 4677 spin_lock(&swapped_blocks->lock); 4678 if (!swapped_blocks->swapped) 4679 goto out; 4680 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4681 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 4682 struct btrfs_qgroup_swapped_block *entry; 4683 struct btrfs_qgroup_swapped_block *next; 4684 4685 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 4686 node) 4687 kfree(entry); 4688 swapped_blocks->blocks[i] = RB_ROOT; 4689 } 4690 swapped_blocks->swapped = false; 4691 out: 4692 spin_unlock(&swapped_blocks->lock); 4693 } 4694 4695 static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node) 4696 { 4697 const u64 *bytenr = key; 4698 const struct btrfs_qgroup_swapped_block *block = rb_entry(node, 4699 struct btrfs_qgroup_swapped_block, node); 4700 4701 if (block->subvol_bytenr < *bytenr) 4702 return -1; 4703 else if (block->subvol_bytenr > *bytenr) 4704 return 1; 4705 4706 return 0; 4707 } 4708 4709 static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing) 4710 { 4711 const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new, 4712 struct btrfs_qgroup_swapped_block, node); 4713 4714 return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing); 4715 } 4716 4717 /* 4718 * Add subtree roots record into @subvol_root. 4719 * 4720 * @subvol_root: tree root of the subvolume tree get swapped 4721 * @bg: block group under balance 4722 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4723 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4724 * BOTH POINTERS ARE BEFORE TREE SWAP 4725 * @last_snapshot: last snapshot generation of the subvolume tree 4726 */ 4727 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root, 4728 struct btrfs_block_group *bg, 4729 struct extent_buffer *subvol_parent, int subvol_slot, 4730 struct extent_buffer *reloc_parent, int reloc_slot, 4731 u64 last_snapshot) 4732 { 4733 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4734 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4735 struct btrfs_qgroup_swapped_block *block; 4736 struct rb_node *node; 4737 int level = btrfs_header_level(subvol_parent) - 1; 4738 int ret = 0; 4739 4740 if (!btrfs_qgroup_full_accounting(fs_info)) 4741 return 0; 4742 4743 if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4744 btrfs_node_ptr_generation(reloc_parent, reloc_slot))) { 4745 btrfs_err_rl(fs_info, 4746 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4747 __func__, 4748 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4749 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4750 return -EUCLEAN; 4751 } 4752 4753 block = kmalloc(sizeof(*block), GFP_NOFS); 4754 if (!block) { 4755 ret = -ENOMEM; 4756 goto out; 4757 } 4758 4759 /* 4760 * @reloc_parent/slot is still before swap, while @block is going to 4761 * record the bytenr after swap, so we do the swap here. 4762 */ 4763 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4764 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4765 reloc_slot); 4766 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4767 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4768 subvol_slot); 4769 block->last_snapshot = last_snapshot; 4770 block->level = level; 4771 4772 /* 4773 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4774 * no one else can modify tree blocks thus we qgroup will not change 4775 * no matter the value of trace_leaf. 4776 */ 4777 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4778 block->trace_leaf = true; 4779 else 4780 block->trace_leaf = false; 4781 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4782 4783 /* Insert @block into @blocks */ 4784 spin_lock(&blocks->lock); 4785 node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp); 4786 if (node) { 4787 struct btrfs_qgroup_swapped_block *entry; 4788 4789 entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4790 4791 if (entry->subvol_generation != block->subvol_generation || 4792 entry->reloc_bytenr != block->reloc_bytenr || 4793 entry->reloc_generation != block->reloc_generation) { 4794 /* 4795 * Duplicated but mismatch entry found. Shouldn't happen. 4796 * Marking qgroup inconsistent should be enough for end 4797 * users. 4798 */ 4799 DEBUG_WARN("duplicated but mismatched entry found"); 4800 ret = -EEXIST; 4801 } 4802 kfree(block); 4803 goto out_unlock; 4804 } 4805 blocks->swapped = true; 4806 out_unlock: 4807 spin_unlock(&blocks->lock); 4808 out: 4809 if (ret < 0) 4810 qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret); 4811 return ret; 4812 } 4813 4814 /* 4815 * Check if the tree block is a subtree root, and if so do the needed 4816 * delayed subtree trace for qgroup. 4817 * 4818 * This is called during btrfs_cow_block(). 4819 */ 4820 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4821 struct btrfs_root *root, 4822 struct extent_buffer *subvol_eb) 4823 { 4824 struct btrfs_fs_info *fs_info = root->fs_info; 4825 struct btrfs_tree_parent_check check = { 0 }; 4826 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4827 struct btrfs_qgroup_swapped_block AUTO_KFREE(block); 4828 struct extent_buffer *reloc_eb = NULL; 4829 struct rb_node *node; 4830 bool swapped = false; 4831 int level = btrfs_header_level(subvol_eb); 4832 int ret = 0; 4833 int i; 4834 4835 if (!btrfs_qgroup_full_accounting(fs_info)) 4836 return 0; 4837 if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root) 4838 return 0; 4839 4840 spin_lock(&blocks->lock); 4841 if (!blocks->swapped) { 4842 spin_unlock(&blocks->lock); 4843 return 0; 4844 } 4845 node = rb_find(&subvol_eb->start, &blocks->blocks[level], 4846 qgroup_swapped_block_bytenr_key_cmp); 4847 if (!node) { 4848 spin_unlock(&blocks->lock); 4849 goto out; 4850 } 4851 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4852 4853 /* Found one, remove it from @blocks first and update blocks->swapped */ 4854 rb_erase(&block->node, &blocks->blocks[level]); 4855 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4856 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4857 swapped = true; 4858 break; 4859 } 4860 } 4861 blocks->swapped = swapped; 4862 spin_unlock(&blocks->lock); 4863 4864 check.level = block->level; 4865 check.transid = block->reloc_generation; 4866 check.has_first_key = true; 4867 memcpy(&check.first_key, &block->first_key, sizeof(check.first_key)); 4868 4869 /* Read out reloc subtree root */ 4870 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check); 4871 if (IS_ERR(reloc_eb)) { 4872 ret = PTR_ERR(reloc_eb); 4873 reloc_eb = NULL; 4874 goto free_out; 4875 } 4876 if (unlikely(!extent_buffer_uptodate(reloc_eb))) { 4877 ret = -EIO; 4878 goto free_out; 4879 } 4880 4881 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4882 block->last_snapshot, block->trace_leaf); 4883 free_out: 4884 free_extent_buffer(reloc_eb); 4885 out: 4886 if (ret < 0) { 4887 qgroup_mark_inconsistent(fs_info, 4888 "failed to account subtree at bytenr %llu: %d", 4889 subvol_eb->start, ret); 4890 } 4891 return ret; 4892 } 4893 4894 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4895 { 4896 struct btrfs_qgroup_extent_record *entry; 4897 unsigned long index; 4898 4899 xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) { 4900 ulist_free(entry->old_roots); 4901 kfree(entry); 4902 } 4903 xa_destroy(&trans->delayed_refs.dirty_extents); 4904 } 4905 4906 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, 4907 const struct btrfs_squota_delta *delta) 4908 { 4909 int ret; 4910 struct btrfs_qgroup *qgroup; 4911 struct btrfs_qgroup *qg; 4912 LIST_HEAD(qgroup_list); 4913 u64 root = delta->root; 4914 u64 num_bytes = delta->num_bytes; 4915 const int sign = (delta->is_inc ? 1 : -1); 4916 4917 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) 4918 return 0; 4919 4920 if (!btrfs_is_fstree(root)) 4921 return 0; 4922 4923 /* If the extent predates enabling quotas, don't count it. */ 4924 if (delta->generation < fs_info->qgroup_enable_gen) 4925 return 0; 4926 4927 spin_lock(&fs_info->qgroup_lock); 4928 qgroup = find_qgroup_rb(fs_info, root); 4929 if (!qgroup) { 4930 ret = -ENOENT; 4931 goto out; 4932 } 4933 4934 ret = 0; 4935 qgroup_iterator_add(&qgroup_list, qgroup); 4936 list_for_each_entry(qg, &qgroup_list, iterator) { 4937 struct btrfs_qgroup_list *glist; 4938 4939 qg->excl += num_bytes * sign; 4940 qg->rfer += num_bytes * sign; 4941 qgroup_dirty(fs_info, qg); 4942 4943 list_for_each_entry(glist, &qg->groups, next_group) 4944 qgroup_iterator_add(&qgroup_list, glist->group); 4945 } 4946 qgroup_iterator_clean(&qgroup_list); 4947 4948 out: 4949 spin_unlock(&fs_info->qgroup_lock); 4950 return ret; 4951 } 4952