1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2011 STRATO. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> 9 #include <linux/blkdev.h> 10 #include <linux/rbtree.h> 11 #include <linux/slab.h> 12 #include <linux/workqueue.h> 13 #include <linux/btrfs.h> 14 #include <linux/sched/mm.h> 15 16 #include "ctree.h" 17 #include "transaction.h" 18 #include "disk-io.h" 19 #include "locking.h" 20 #include "ulist.h" 21 #include "backref.h" 22 #include "extent_io.h" 23 #include "qgroup.h" 24 #include "block-group.h" 25 #include "sysfs.h" 26 #include "tree-mod-log.h" 27 #include "fs.h" 28 #include "accessors.h" 29 #include "extent-tree.h" 30 #include "root-tree.h" 31 #include "tree-checker.h" 32 33 enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info) 34 { 35 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) 36 return BTRFS_QGROUP_MODE_DISABLED; 37 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) 38 return BTRFS_QGROUP_MODE_SIMPLE; 39 return BTRFS_QGROUP_MODE_FULL; 40 } 41 42 bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info) 43 { 44 return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; 45 } 46 47 bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info) 48 { 49 return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; 50 } 51 52 /* 53 * Helpers to access qgroup reservation 54 * 55 * Callers should ensure the lock context and type are valid 56 */ 57 58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) 59 { 60 u64 ret = 0; 61 int i; 62 63 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 64 ret += qgroup->rsv.values[i]; 65 66 return ret; 67 } 68 69 #ifdef CONFIG_BTRFS_DEBUG 70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) 71 { 72 if (type == BTRFS_QGROUP_RSV_DATA) 73 return "data"; 74 if (type == BTRFS_QGROUP_RSV_META_PERTRANS) 75 return "meta_pertrans"; 76 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 77 return "meta_prealloc"; 78 return NULL; 79 } 80 #endif 81 82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, 83 struct btrfs_qgroup *qgroup, u64 num_bytes, 84 enum btrfs_qgroup_rsv_type type) 85 { 86 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); 87 qgroup->rsv.values[type] += num_bytes; 88 } 89 90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, 91 struct btrfs_qgroup *qgroup, u64 num_bytes, 92 enum btrfs_qgroup_rsv_type type) 93 { 94 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); 95 if (qgroup->rsv.values[type] >= num_bytes) { 96 qgroup->rsv.values[type] -= num_bytes; 97 return; 98 } 99 #ifdef CONFIG_BTRFS_DEBUG 100 WARN_RATELIMIT(1, 101 "qgroup %llu %s reserved space underflow, have %llu to free %llu", 102 qgroup->qgroupid, qgroup_rsv_type_str(type), 103 qgroup->rsv.values[type], num_bytes); 104 #endif 105 qgroup->rsv.values[type] = 0; 106 } 107 108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, 109 struct btrfs_qgroup *dest, 110 struct btrfs_qgroup *src) 111 { 112 int i; 113 114 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 115 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); 116 } 117 118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, 119 struct btrfs_qgroup *dest, 120 struct btrfs_qgroup *src) 121 { 122 int i; 123 124 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) 125 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); 126 } 127 128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, 129 int mod) 130 { 131 if (qg->old_refcnt < seq) 132 qg->old_refcnt = seq; 133 qg->old_refcnt += mod; 134 } 135 136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, 137 int mod) 138 { 139 if (qg->new_refcnt < seq) 140 qg->new_refcnt = seq; 141 qg->new_refcnt += mod; 142 } 143 144 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) 145 { 146 if (qg->old_refcnt < seq) 147 return 0; 148 return qg->old_refcnt - seq; 149 } 150 151 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) 152 { 153 if (qg->new_refcnt < seq) 154 return 0; 155 return qg->new_refcnt - seq; 156 } 157 158 /* 159 * glue structure to represent the relations between qgroups. 160 */ 161 struct btrfs_qgroup_list { 162 struct list_head next_group; 163 struct list_head next_member; 164 struct btrfs_qgroup *group; 165 struct btrfs_qgroup *member; 166 }; 167 168 static int 169 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 170 int init_flags); 171 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); 172 173 /* must be called with qgroup_ioctl_lock held */ 174 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, 175 u64 qgroupid) 176 { 177 struct rb_node *n = fs_info->qgroup_tree.rb_node; 178 struct btrfs_qgroup *qgroup; 179 180 while (n) { 181 qgroup = rb_entry(n, struct btrfs_qgroup, node); 182 if (qgroup->qgroupid < qgroupid) 183 n = n->rb_left; 184 else if (qgroup->qgroupid > qgroupid) 185 n = n->rb_right; 186 else 187 return qgroup; 188 } 189 return NULL; 190 } 191 192 /* 193 * Add qgroup to the filesystem's qgroup tree. 194 * 195 * Must be called with qgroup_lock held and @prealloc preallocated. 196 * 197 * The control on the lifespan of @prealloc would be transfered to this 198 * function, thus caller should no longer touch @prealloc. 199 */ 200 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, 201 struct btrfs_qgroup *prealloc, 202 u64 qgroupid) 203 { 204 struct rb_node **p = &fs_info->qgroup_tree.rb_node; 205 struct rb_node *parent = NULL; 206 struct btrfs_qgroup *qgroup; 207 208 /* Caller must have pre-allocated @prealloc. */ 209 ASSERT(prealloc); 210 211 while (*p) { 212 parent = *p; 213 qgroup = rb_entry(parent, struct btrfs_qgroup, node); 214 215 if (qgroup->qgroupid < qgroupid) { 216 p = &(*p)->rb_left; 217 } else if (qgroup->qgroupid > qgroupid) { 218 p = &(*p)->rb_right; 219 } else { 220 kfree(prealloc); 221 return qgroup; 222 } 223 } 224 225 qgroup = prealloc; 226 qgroup->qgroupid = qgroupid; 227 INIT_LIST_HEAD(&qgroup->groups); 228 INIT_LIST_HEAD(&qgroup->members); 229 INIT_LIST_HEAD(&qgroup->dirty); 230 INIT_LIST_HEAD(&qgroup->iterator); 231 INIT_LIST_HEAD(&qgroup->nested_iterator); 232 233 rb_link_node(&qgroup->node, parent, p); 234 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); 235 236 return qgroup; 237 } 238 239 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, 240 struct btrfs_qgroup *qgroup) 241 { 242 struct btrfs_qgroup_list *list; 243 244 list_del(&qgroup->dirty); 245 while (!list_empty(&qgroup->groups)) { 246 list = list_first_entry(&qgroup->groups, 247 struct btrfs_qgroup_list, next_group); 248 list_del(&list->next_group); 249 list_del(&list->next_member); 250 kfree(list); 251 } 252 253 while (!list_empty(&qgroup->members)) { 254 list = list_first_entry(&qgroup->members, 255 struct btrfs_qgroup_list, next_member); 256 list_del(&list->next_group); 257 list_del(&list->next_member); 258 kfree(list); 259 } 260 } 261 262 /* must be called with qgroup_lock held */ 263 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) 264 { 265 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); 266 267 if (!qgroup) 268 return -ENOENT; 269 270 rb_erase(&qgroup->node, &fs_info->qgroup_tree); 271 __del_qgroup_rb(fs_info, qgroup); 272 return 0; 273 } 274 275 /* 276 * Add relation specified by two qgroups. 277 * 278 * Must be called with qgroup_lock held, the ownership of @prealloc is 279 * transferred to this function and caller should not touch it anymore. 280 * 281 * Return: 0 on success 282 * -ENOENT if one of the qgroups is NULL 283 * <0 other errors 284 */ 285 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc, 286 struct btrfs_qgroup *member, 287 struct btrfs_qgroup *parent) 288 { 289 if (!member || !parent) { 290 kfree(prealloc); 291 return -ENOENT; 292 } 293 294 prealloc->group = parent; 295 prealloc->member = member; 296 list_add_tail(&prealloc->next_group, &member->groups); 297 list_add_tail(&prealloc->next_member, &parent->members); 298 299 return 0; 300 } 301 302 /* 303 * Add relation specified by two qgroup ids. 304 * 305 * Must be called with qgroup_lock held. 306 * 307 * Return: 0 on success 308 * -ENOENT if one of the ids does not exist 309 * <0 other errors 310 */ 311 static int add_relation_rb(struct btrfs_fs_info *fs_info, 312 struct btrfs_qgroup_list *prealloc, 313 u64 memberid, u64 parentid) 314 { 315 struct btrfs_qgroup *member; 316 struct btrfs_qgroup *parent; 317 318 member = find_qgroup_rb(fs_info, memberid); 319 parent = find_qgroup_rb(fs_info, parentid); 320 321 return __add_relation_rb(prealloc, member, parent); 322 } 323 324 /* Must be called with qgroup_lock held */ 325 static int del_relation_rb(struct btrfs_fs_info *fs_info, 326 u64 memberid, u64 parentid) 327 { 328 struct btrfs_qgroup *member; 329 struct btrfs_qgroup *parent; 330 struct btrfs_qgroup_list *list; 331 332 member = find_qgroup_rb(fs_info, memberid); 333 parent = find_qgroup_rb(fs_info, parentid); 334 if (!member || !parent) 335 return -ENOENT; 336 337 list_for_each_entry(list, &member->groups, next_group) { 338 if (list->group == parent) { 339 list_del(&list->next_group); 340 list_del(&list->next_member); 341 kfree(list); 342 return 0; 343 } 344 } 345 return -ENOENT; 346 } 347 348 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 349 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, 350 u64 rfer, u64 excl) 351 { 352 struct btrfs_qgroup *qgroup; 353 354 qgroup = find_qgroup_rb(fs_info, qgroupid); 355 if (!qgroup) 356 return -EINVAL; 357 if (qgroup->rfer != rfer || qgroup->excl != excl) 358 return -EINVAL; 359 return 0; 360 } 361 #endif 362 363 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info) 364 { 365 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 366 return; 367 fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | 368 BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 369 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 370 } 371 372 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, 373 struct extent_buffer *leaf, int slot, 374 struct btrfs_qgroup_status_item *ptr) 375 { 376 ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); 377 ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr)); 378 fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr); 379 } 380 381 /* 382 * The full config is read in one go, only called from open_ctree() 383 * It doesn't use any locking, as at this point we're still single-threaded 384 */ 385 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) 386 { 387 struct btrfs_key key; 388 struct btrfs_key found_key; 389 struct btrfs_root *quota_root = fs_info->quota_root; 390 struct btrfs_path *path = NULL; 391 struct extent_buffer *l; 392 int slot; 393 int ret = 0; 394 u64 flags = 0; 395 u64 rescan_progress = 0; 396 397 if (!fs_info->quota_root) 398 return 0; 399 400 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); 401 if (!fs_info->qgroup_ulist) { 402 ret = -ENOMEM; 403 goto out; 404 } 405 406 path = btrfs_alloc_path(); 407 if (!path) { 408 ret = -ENOMEM; 409 goto out; 410 } 411 412 ret = btrfs_sysfs_add_qgroups(fs_info); 413 if (ret < 0) 414 goto out; 415 /* default this to quota off, in case no status key is found */ 416 fs_info->qgroup_flags = 0; 417 418 /* 419 * pass 1: read status, all qgroup infos and limits 420 */ 421 key.objectid = 0; 422 key.type = 0; 423 key.offset = 0; 424 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); 425 if (ret) 426 goto out; 427 428 while (1) { 429 struct btrfs_qgroup *qgroup; 430 431 slot = path->slots[0]; 432 l = path->nodes[0]; 433 btrfs_item_key_to_cpu(l, &found_key, slot); 434 435 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { 436 struct btrfs_qgroup_status_item *ptr; 437 438 ptr = btrfs_item_ptr(l, slot, 439 struct btrfs_qgroup_status_item); 440 441 if (btrfs_qgroup_status_version(l, ptr) != 442 BTRFS_QGROUP_STATUS_VERSION) { 443 btrfs_err(fs_info, 444 "old qgroup version, quota disabled"); 445 goto out; 446 } 447 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); 448 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) { 449 qgroup_read_enable_gen(fs_info, l, slot, ptr); 450 } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) { 451 qgroup_mark_inconsistent(fs_info); 452 btrfs_err(fs_info, 453 "qgroup generation mismatch, marked as inconsistent"); 454 } 455 rescan_progress = btrfs_qgroup_status_rescan(l, ptr); 456 goto next1; 457 } 458 459 if (found_key.type != BTRFS_QGROUP_INFO_KEY && 460 found_key.type != BTRFS_QGROUP_LIMIT_KEY) 461 goto next1; 462 463 qgroup = find_qgroup_rb(fs_info, found_key.offset); 464 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || 465 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { 466 btrfs_err(fs_info, "inconsistent qgroup config"); 467 qgroup_mark_inconsistent(fs_info); 468 } 469 if (!qgroup) { 470 struct btrfs_qgroup *prealloc; 471 472 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); 473 if (!prealloc) { 474 ret = -ENOMEM; 475 goto out; 476 } 477 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 478 } 479 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 480 if (ret < 0) 481 goto out; 482 483 switch (found_key.type) { 484 case BTRFS_QGROUP_INFO_KEY: { 485 struct btrfs_qgroup_info_item *ptr; 486 487 ptr = btrfs_item_ptr(l, slot, 488 struct btrfs_qgroup_info_item); 489 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); 490 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); 491 qgroup->excl = btrfs_qgroup_info_excl(l, ptr); 492 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); 493 /* generation currently unused */ 494 break; 495 } 496 case BTRFS_QGROUP_LIMIT_KEY: { 497 struct btrfs_qgroup_limit_item *ptr; 498 499 ptr = btrfs_item_ptr(l, slot, 500 struct btrfs_qgroup_limit_item); 501 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); 502 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); 503 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); 504 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); 505 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); 506 break; 507 } 508 } 509 next1: 510 ret = btrfs_next_item(quota_root, path); 511 if (ret < 0) 512 goto out; 513 if (ret) 514 break; 515 } 516 btrfs_release_path(path); 517 518 /* 519 * pass 2: read all qgroup relations 520 */ 521 key.objectid = 0; 522 key.type = BTRFS_QGROUP_RELATION_KEY; 523 key.offset = 0; 524 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); 525 if (ret) 526 goto out; 527 while (1) { 528 struct btrfs_qgroup_list *list = NULL; 529 530 slot = path->slots[0]; 531 l = path->nodes[0]; 532 btrfs_item_key_to_cpu(l, &found_key, slot); 533 534 if (found_key.type != BTRFS_QGROUP_RELATION_KEY) 535 goto next2; 536 537 if (found_key.objectid > found_key.offset) { 538 /* parent <- member, not needed to build config */ 539 /* FIXME should we omit the key completely? */ 540 goto next2; 541 } 542 543 list = kzalloc(sizeof(*list), GFP_KERNEL); 544 if (!list) { 545 ret = -ENOMEM; 546 goto out; 547 } 548 ret = add_relation_rb(fs_info, list, found_key.objectid, 549 found_key.offset); 550 list = NULL; 551 if (ret == -ENOENT) { 552 btrfs_warn(fs_info, 553 "orphan qgroup relation 0x%llx->0x%llx", 554 found_key.objectid, found_key.offset); 555 ret = 0; /* ignore the error */ 556 } 557 if (ret) 558 goto out; 559 next2: 560 ret = btrfs_next_item(quota_root, path); 561 if (ret < 0) 562 goto out; 563 if (ret) 564 break; 565 } 566 out: 567 btrfs_free_path(path); 568 fs_info->qgroup_flags |= flags; 569 if (ret >= 0) { 570 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON) 571 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 572 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) 573 ret = qgroup_rescan_init(fs_info, rescan_progress, 0); 574 } else { 575 ulist_free(fs_info->qgroup_ulist); 576 fs_info->qgroup_ulist = NULL; 577 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 578 btrfs_sysfs_del_qgroups(fs_info); 579 } 580 581 return ret < 0 ? ret : 0; 582 } 583 584 /* 585 * Called in close_ctree() when quota is still enabled. This verifies we don't 586 * leak some reserved space. 587 * 588 * Return false if no reserved space is left. 589 * Return true if some reserved space is leaked. 590 */ 591 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) 592 { 593 struct rb_node *node; 594 bool ret = false; 595 596 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) 597 return ret; 598 /* 599 * Since we're unmounting, there is no race and no need to grab qgroup 600 * lock. And here we don't go post-order to provide a more user 601 * friendly sorted result. 602 */ 603 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { 604 struct btrfs_qgroup *qgroup; 605 int i; 606 607 qgroup = rb_entry(node, struct btrfs_qgroup, node); 608 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { 609 if (qgroup->rsv.values[i]) { 610 ret = true; 611 btrfs_warn(fs_info, 612 "qgroup %hu/%llu has unreleased space, type %d rsv %llu", 613 btrfs_qgroup_level(qgroup->qgroupid), 614 btrfs_qgroup_subvolid(qgroup->qgroupid), 615 i, qgroup->rsv.values[i]); 616 } 617 } 618 } 619 return ret; 620 } 621 622 /* 623 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), 624 * first two are in single-threaded paths.And for the third one, we have set 625 * quota_root to be null with qgroup_lock held before, so it is safe to clean 626 * up the in-memory structures without qgroup_lock held. 627 */ 628 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) 629 { 630 struct rb_node *n; 631 struct btrfs_qgroup *qgroup; 632 633 while ((n = rb_first(&fs_info->qgroup_tree))) { 634 qgroup = rb_entry(n, struct btrfs_qgroup, node); 635 rb_erase(n, &fs_info->qgroup_tree); 636 __del_qgroup_rb(fs_info, qgroup); 637 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 638 kfree(qgroup); 639 } 640 /* 641 * We call btrfs_free_qgroup_config() when unmounting 642 * filesystem and disabling quota, so we set qgroup_ulist 643 * to be null here to avoid double free. 644 */ 645 ulist_free(fs_info->qgroup_ulist); 646 fs_info->qgroup_ulist = NULL; 647 btrfs_sysfs_del_qgroups(fs_info); 648 } 649 650 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 651 u64 dst) 652 { 653 int ret; 654 struct btrfs_root *quota_root = trans->fs_info->quota_root; 655 struct btrfs_path *path; 656 struct btrfs_key key; 657 658 path = btrfs_alloc_path(); 659 if (!path) 660 return -ENOMEM; 661 662 key.objectid = src; 663 key.type = BTRFS_QGROUP_RELATION_KEY; 664 key.offset = dst; 665 666 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); 667 668 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 669 670 btrfs_free_path(path); 671 return ret; 672 } 673 674 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, 675 u64 dst) 676 { 677 int ret; 678 struct btrfs_root *quota_root = trans->fs_info->quota_root; 679 struct btrfs_path *path; 680 struct btrfs_key key; 681 682 path = btrfs_alloc_path(); 683 if (!path) 684 return -ENOMEM; 685 686 key.objectid = src; 687 key.type = BTRFS_QGROUP_RELATION_KEY; 688 key.offset = dst; 689 690 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 691 if (ret < 0) 692 goto out; 693 694 if (ret > 0) { 695 ret = -ENOENT; 696 goto out; 697 } 698 699 ret = btrfs_del_item(trans, quota_root, path); 700 out: 701 btrfs_free_path(path); 702 return ret; 703 } 704 705 static int add_qgroup_item(struct btrfs_trans_handle *trans, 706 struct btrfs_root *quota_root, u64 qgroupid) 707 { 708 int ret; 709 struct btrfs_path *path; 710 struct btrfs_qgroup_info_item *qgroup_info; 711 struct btrfs_qgroup_limit_item *qgroup_limit; 712 struct extent_buffer *leaf; 713 struct btrfs_key key; 714 715 if (btrfs_is_testing(quota_root->fs_info)) 716 return 0; 717 718 path = btrfs_alloc_path(); 719 if (!path) 720 return -ENOMEM; 721 722 key.objectid = 0; 723 key.type = BTRFS_QGROUP_INFO_KEY; 724 key.offset = qgroupid; 725 726 /* 727 * Avoid a transaction abort by catching -EEXIST here. In that 728 * case, we proceed by re-initializing the existing structure 729 * on disk. 730 */ 731 732 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 733 sizeof(*qgroup_info)); 734 if (ret && ret != -EEXIST) 735 goto out; 736 737 leaf = path->nodes[0]; 738 qgroup_info = btrfs_item_ptr(leaf, path->slots[0], 739 struct btrfs_qgroup_info_item); 740 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); 741 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); 742 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); 743 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); 744 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); 745 746 btrfs_mark_buffer_dirty(trans, leaf); 747 748 btrfs_release_path(path); 749 750 key.type = BTRFS_QGROUP_LIMIT_KEY; 751 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 752 sizeof(*qgroup_limit)); 753 if (ret && ret != -EEXIST) 754 goto out; 755 756 leaf = path->nodes[0]; 757 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], 758 struct btrfs_qgroup_limit_item); 759 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); 760 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); 761 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); 762 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); 763 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); 764 765 btrfs_mark_buffer_dirty(trans, leaf); 766 767 ret = 0; 768 out: 769 btrfs_free_path(path); 770 return ret; 771 } 772 773 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) 774 { 775 int ret; 776 struct btrfs_root *quota_root = trans->fs_info->quota_root; 777 struct btrfs_path *path; 778 struct btrfs_key key; 779 780 path = btrfs_alloc_path(); 781 if (!path) 782 return -ENOMEM; 783 784 key.objectid = 0; 785 key.type = BTRFS_QGROUP_INFO_KEY; 786 key.offset = qgroupid; 787 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 788 if (ret < 0) 789 goto out; 790 791 if (ret > 0) { 792 ret = -ENOENT; 793 goto out; 794 } 795 796 ret = btrfs_del_item(trans, quota_root, path); 797 if (ret) 798 goto out; 799 800 btrfs_release_path(path); 801 802 key.type = BTRFS_QGROUP_LIMIT_KEY; 803 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); 804 if (ret < 0) 805 goto out; 806 807 if (ret > 0) { 808 ret = -ENOENT; 809 goto out; 810 } 811 812 ret = btrfs_del_item(trans, quota_root, path); 813 814 out: 815 btrfs_free_path(path); 816 return ret; 817 } 818 819 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, 820 struct btrfs_qgroup *qgroup) 821 { 822 struct btrfs_root *quota_root = trans->fs_info->quota_root; 823 struct btrfs_path *path; 824 struct btrfs_key key; 825 struct extent_buffer *l; 826 struct btrfs_qgroup_limit_item *qgroup_limit; 827 int ret; 828 int slot; 829 830 key.objectid = 0; 831 key.type = BTRFS_QGROUP_LIMIT_KEY; 832 key.offset = qgroup->qgroupid; 833 834 path = btrfs_alloc_path(); 835 if (!path) 836 return -ENOMEM; 837 838 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 839 if (ret > 0) 840 ret = -ENOENT; 841 842 if (ret) 843 goto out; 844 845 l = path->nodes[0]; 846 slot = path->slots[0]; 847 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); 848 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); 849 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); 850 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); 851 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); 852 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); 853 854 btrfs_mark_buffer_dirty(trans, l); 855 856 out: 857 btrfs_free_path(path); 858 return ret; 859 } 860 861 static int update_qgroup_info_item(struct btrfs_trans_handle *trans, 862 struct btrfs_qgroup *qgroup) 863 { 864 struct btrfs_fs_info *fs_info = trans->fs_info; 865 struct btrfs_root *quota_root = fs_info->quota_root; 866 struct btrfs_path *path; 867 struct btrfs_key key; 868 struct extent_buffer *l; 869 struct btrfs_qgroup_info_item *qgroup_info; 870 int ret; 871 int slot; 872 873 if (btrfs_is_testing(fs_info)) 874 return 0; 875 876 key.objectid = 0; 877 key.type = BTRFS_QGROUP_INFO_KEY; 878 key.offset = qgroup->qgroupid; 879 880 path = btrfs_alloc_path(); 881 if (!path) 882 return -ENOMEM; 883 884 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 885 if (ret > 0) 886 ret = -ENOENT; 887 888 if (ret) 889 goto out; 890 891 l = path->nodes[0]; 892 slot = path->slots[0]; 893 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); 894 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); 895 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); 896 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); 897 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); 898 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); 899 900 btrfs_mark_buffer_dirty(trans, l); 901 902 out: 903 btrfs_free_path(path); 904 return ret; 905 } 906 907 static int update_qgroup_status_item(struct btrfs_trans_handle *trans) 908 { 909 struct btrfs_fs_info *fs_info = trans->fs_info; 910 struct btrfs_root *quota_root = fs_info->quota_root; 911 struct btrfs_path *path; 912 struct btrfs_key key; 913 struct extent_buffer *l; 914 struct btrfs_qgroup_status_item *ptr; 915 int ret; 916 int slot; 917 918 key.objectid = 0; 919 key.type = BTRFS_QGROUP_STATUS_KEY; 920 key.offset = 0; 921 922 path = btrfs_alloc_path(); 923 if (!path) 924 return -ENOMEM; 925 926 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); 927 if (ret > 0) 928 ret = -ENOENT; 929 930 if (ret) 931 goto out; 932 933 l = path->nodes[0]; 934 slot = path->slots[0]; 935 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); 936 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & 937 BTRFS_QGROUP_STATUS_FLAGS_MASK); 938 btrfs_set_qgroup_status_generation(l, ptr, trans->transid); 939 btrfs_set_qgroup_status_rescan(l, ptr, 940 fs_info->qgroup_rescan_progress.objectid); 941 942 btrfs_mark_buffer_dirty(trans, l); 943 944 out: 945 btrfs_free_path(path); 946 return ret; 947 } 948 949 /* 950 * called with qgroup_lock held 951 */ 952 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, 953 struct btrfs_root *root) 954 { 955 struct btrfs_path *path; 956 struct btrfs_key key; 957 struct extent_buffer *leaf = NULL; 958 int ret; 959 int nr = 0; 960 961 path = btrfs_alloc_path(); 962 if (!path) 963 return -ENOMEM; 964 965 key.objectid = 0; 966 key.offset = 0; 967 key.type = 0; 968 969 while (1) { 970 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 971 if (ret < 0) 972 goto out; 973 leaf = path->nodes[0]; 974 nr = btrfs_header_nritems(leaf); 975 if (!nr) 976 break; 977 /* 978 * delete the leaf one by one 979 * since the whole tree is going 980 * to be deleted. 981 */ 982 path->slots[0] = 0; 983 ret = btrfs_del_items(trans, root, path, 0, nr); 984 if (ret) 985 goto out; 986 987 btrfs_release_path(path); 988 } 989 ret = 0; 990 out: 991 btrfs_free_path(path); 992 return ret; 993 } 994 995 int btrfs_quota_enable(struct btrfs_fs_info *fs_info, 996 struct btrfs_ioctl_quota_ctl_args *quota_ctl_args) 997 { 998 struct btrfs_root *quota_root; 999 struct btrfs_root *tree_root = fs_info->tree_root; 1000 struct btrfs_path *path = NULL; 1001 struct btrfs_qgroup_status_item *ptr; 1002 struct extent_buffer *leaf; 1003 struct btrfs_key key; 1004 struct btrfs_key found_key; 1005 struct btrfs_qgroup *qgroup = NULL; 1006 struct btrfs_qgroup *prealloc = NULL; 1007 struct btrfs_trans_handle *trans = NULL; 1008 struct ulist *ulist = NULL; 1009 const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); 1010 int ret = 0; 1011 int slot; 1012 1013 /* 1014 * We need to have subvol_sem write locked, to prevent races between 1015 * concurrent tasks trying to enable quotas, because we will unlock 1016 * and relock qgroup_ioctl_lock before setting fs_info->quota_root 1017 * and before setting BTRFS_FS_QUOTA_ENABLED. 1018 */ 1019 lockdep_assert_held_write(&fs_info->subvol_sem); 1020 1021 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { 1022 btrfs_err(fs_info, 1023 "qgroups are currently unsupported in extent tree v2"); 1024 return -EINVAL; 1025 } 1026 1027 mutex_lock(&fs_info->qgroup_ioctl_lock); 1028 if (fs_info->quota_root) 1029 goto out; 1030 1031 ulist = ulist_alloc(GFP_KERNEL); 1032 if (!ulist) { 1033 ret = -ENOMEM; 1034 goto out; 1035 } 1036 1037 ret = btrfs_sysfs_add_qgroups(fs_info); 1038 if (ret < 0) 1039 goto out; 1040 1041 /* 1042 * Unlock qgroup_ioctl_lock before starting the transaction. This is to 1043 * avoid lock acquisition inversion problems (reported by lockdep) between 1044 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we 1045 * start a transaction. 1046 * After we started the transaction lock qgroup_ioctl_lock again and 1047 * check if someone else created the quota root in the meanwhile. If so, 1048 * just return success and release the transaction handle. 1049 * 1050 * Also we don't need to worry about someone else calling 1051 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because 1052 * that function returns 0 (success) when the sysfs entries already exist. 1053 */ 1054 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1055 1056 /* 1057 * 1 for quota root item 1058 * 1 for BTRFS_QGROUP_STATUS item 1059 * 1060 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items 1061 * per subvolume. However those are not currently reserved since it 1062 * would be a lot of overkill. 1063 */ 1064 trans = btrfs_start_transaction(tree_root, 2); 1065 1066 mutex_lock(&fs_info->qgroup_ioctl_lock); 1067 if (IS_ERR(trans)) { 1068 ret = PTR_ERR(trans); 1069 trans = NULL; 1070 goto out; 1071 } 1072 1073 if (fs_info->quota_root) 1074 goto out; 1075 1076 fs_info->qgroup_ulist = ulist; 1077 ulist = NULL; 1078 1079 /* 1080 * initially create the quota tree 1081 */ 1082 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); 1083 if (IS_ERR(quota_root)) { 1084 ret = PTR_ERR(quota_root); 1085 btrfs_abort_transaction(trans, ret); 1086 goto out; 1087 } 1088 1089 path = btrfs_alloc_path(); 1090 if (!path) { 1091 ret = -ENOMEM; 1092 btrfs_abort_transaction(trans, ret); 1093 goto out_free_root; 1094 } 1095 1096 key.objectid = 0; 1097 key.type = BTRFS_QGROUP_STATUS_KEY; 1098 key.offset = 0; 1099 1100 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 1101 sizeof(*ptr)); 1102 if (ret) { 1103 btrfs_abort_transaction(trans, ret); 1104 goto out_free_path; 1105 } 1106 1107 leaf = path->nodes[0]; 1108 ptr = btrfs_item_ptr(leaf, path->slots[0], 1109 struct btrfs_qgroup_status_item); 1110 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); 1111 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); 1112 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; 1113 if (simple) { 1114 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1115 btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); 1116 } else { 1117 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1118 } 1119 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & 1120 BTRFS_QGROUP_STATUS_FLAGS_MASK); 1121 btrfs_set_qgroup_status_rescan(leaf, ptr, 0); 1122 1123 btrfs_mark_buffer_dirty(trans, leaf); 1124 1125 key.objectid = 0; 1126 key.type = BTRFS_ROOT_REF_KEY; 1127 key.offset = 0; 1128 1129 btrfs_release_path(path); 1130 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); 1131 if (ret > 0) 1132 goto out_add_root; 1133 if (ret < 0) { 1134 btrfs_abort_transaction(trans, ret); 1135 goto out_free_path; 1136 } 1137 1138 while (1) { 1139 slot = path->slots[0]; 1140 leaf = path->nodes[0]; 1141 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1142 1143 if (found_key.type == BTRFS_ROOT_REF_KEY) { 1144 1145 /* Release locks on tree_root before we access quota_root */ 1146 btrfs_release_path(path); 1147 1148 /* We should not have a stray @prealloc pointer. */ 1149 ASSERT(prealloc == NULL); 1150 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1151 if (!prealloc) { 1152 ret = -ENOMEM; 1153 btrfs_abort_transaction(trans, ret); 1154 goto out_free_path; 1155 } 1156 1157 ret = add_qgroup_item(trans, quota_root, 1158 found_key.offset); 1159 if (ret) { 1160 btrfs_abort_transaction(trans, ret); 1161 goto out_free_path; 1162 } 1163 1164 qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); 1165 prealloc = NULL; 1166 if (IS_ERR(qgroup)) { 1167 ret = PTR_ERR(qgroup); 1168 btrfs_abort_transaction(trans, ret); 1169 goto out_free_path; 1170 } 1171 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1172 if (ret < 0) { 1173 btrfs_abort_transaction(trans, ret); 1174 goto out_free_path; 1175 } 1176 ret = btrfs_search_slot_for_read(tree_root, &found_key, 1177 path, 1, 0); 1178 if (ret < 0) { 1179 btrfs_abort_transaction(trans, ret); 1180 goto out_free_path; 1181 } 1182 if (ret > 0) { 1183 /* 1184 * Shouldn't happen, but in case it does we 1185 * don't need to do the btrfs_next_item, just 1186 * continue. 1187 */ 1188 continue; 1189 } 1190 } 1191 ret = btrfs_next_item(tree_root, path); 1192 if (ret < 0) { 1193 btrfs_abort_transaction(trans, ret); 1194 goto out_free_path; 1195 } 1196 if (ret) 1197 break; 1198 } 1199 1200 out_add_root: 1201 btrfs_release_path(path); 1202 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); 1203 if (ret) { 1204 btrfs_abort_transaction(trans, ret); 1205 goto out_free_path; 1206 } 1207 1208 ASSERT(prealloc == NULL); 1209 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1210 if (!prealloc) { 1211 ret = -ENOMEM; 1212 goto out_free_path; 1213 } 1214 qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID); 1215 prealloc = NULL; 1216 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1217 if (ret < 0) { 1218 btrfs_abort_transaction(trans, ret); 1219 goto out_free_path; 1220 } 1221 1222 fs_info->qgroup_enable_gen = trans->transid; 1223 1224 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1225 /* 1226 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid 1227 * a deadlock with tasks concurrently doing other qgroup operations, such 1228 * adding/removing qgroups or adding/deleting qgroup relations for example, 1229 * because all qgroup operations first start or join a transaction and then 1230 * lock the qgroup_ioctl_lock mutex. 1231 * We are safe from a concurrent task trying to enable quotas, by calling 1232 * this function, since we are serialized by fs_info->subvol_sem. 1233 */ 1234 ret = btrfs_commit_transaction(trans); 1235 trans = NULL; 1236 mutex_lock(&fs_info->qgroup_ioctl_lock); 1237 if (ret) 1238 goto out_free_path; 1239 1240 /* 1241 * Set quota enabled flag after committing the transaction, to avoid 1242 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot 1243 * creation. 1244 */ 1245 spin_lock(&fs_info->qgroup_lock); 1246 fs_info->quota_root = quota_root; 1247 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1248 if (simple) 1249 btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); 1250 spin_unlock(&fs_info->qgroup_lock); 1251 1252 /* Skip rescan for simple qgroups. */ 1253 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 1254 goto out_free_path; 1255 1256 ret = qgroup_rescan_init(fs_info, 0, 1); 1257 if (!ret) { 1258 qgroup_rescan_zero_tracking(fs_info); 1259 fs_info->qgroup_rescan_running = true; 1260 btrfs_queue_work(fs_info->qgroup_rescan_workers, 1261 &fs_info->qgroup_rescan_work); 1262 } else { 1263 /* 1264 * We have set both BTRFS_FS_QUOTA_ENABLED and 1265 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with 1266 * -EINPROGRESS. That can happen because someone started the 1267 * rescan worker by calling quota rescan ioctl before we 1268 * attempted to initialize the rescan worker. Failure due to 1269 * quotas disabled in the meanwhile is not possible, because 1270 * we are holding a write lock on fs_info->subvol_sem, which 1271 * is also acquired when disabling quotas. 1272 * Ignore such error, and any other error would need to undo 1273 * everything we did in the transaction we just committed. 1274 */ 1275 ASSERT(ret == -EINPROGRESS); 1276 ret = 0; 1277 } 1278 1279 out_free_path: 1280 btrfs_free_path(path); 1281 out_free_root: 1282 if (ret) 1283 btrfs_put_root(quota_root); 1284 out: 1285 if (ret) { 1286 ulist_free(fs_info->qgroup_ulist); 1287 fs_info->qgroup_ulist = NULL; 1288 btrfs_sysfs_del_qgroups(fs_info); 1289 } 1290 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1291 if (ret && trans) 1292 btrfs_end_transaction(trans); 1293 else if (trans) 1294 ret = btrfs_end_transaction(trans); 1295 ulist_free(ulist); 1296 kfree(prealloc); 1297 return ret; 1298 } 1299 1300 /* 1301 * It is possible to have outstanding ordered extents which reserved bytes 1302 * before we disabled. We need to fully flush delalloc, ordered extents, and a 1303 * commit to ensure that we don't leak such reservations, only to have them 1304 * come back if we re-enable. 1305 * 1306 * - enable simple quotas 1307 * - reserve space 1308 * - release it, store rsv_bytes in OE 1309 * - disable quotas 1310 * - enable simple quotas (qgroup rsv are all 0) 1311 * - OE finishes 1312 * - run delayed refs 1313 * - free rsv_bytes, resulting in miscounting or even underflow 1314 */ 1315 static int flush_reservations(struct btrfs_fs_info *fs_info) 1316 { 1317 struct btrfs_trans_handle *trans; 1318 int ret; 1319 1320 ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); 1321 if (ret) 1322 return ret; 1323 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 1324 trans = btrfs_join_transaction(fs_info->tree_root); 1325 if (IS_ERR(trans)) 1326 return PTR_ERR(trans); 1327 btrfs_commit_transaction(trans); 1328 1329 return ret; 1330 } 1331 1332 int btrfs_quota_disable(struct btrfs_fs_info *fs_info) 1333 { 1334 struct btrfs_root *quota_root; 1335 struct btrfs_trans_handle *trans = NULL; 1336 int ret = 0; 1337 1338 /* 1339 * We need to have subvol_sem write locked to prevent races with 1340 * snapshot creation. 1341 */ 1342 lockdep_assert_held_write(&fs_info->subvol_sem); 1343 1344 /* 1345 * Lock the cleaner mutex to prevent races with concurrent relocation, 1346 * because relocation may be building backrefs for blocks of the quota 1347 * root while we are deleting the root. This is like dropping fs roots 1348 * of deleted snapshots/subvolumes, we need the same protection. 1349 * 1350 * This also prevents races between concurrent tasks trying to disable 1351 * quotas, because we will unlock and relock qgroup_ioctl_lock across 1352 * BTRFS_FS_QUOTA_ENABLED changes. 1353 */ 1354 mutex_lock(&fs_info->cleaner_mutex); 1355 1356 mutex_lock(&fs_info->qgroup_ioctl_lock); 1357 if (!fs_info->quota_root) 1358 goto out; 1359 1360 /* 1361 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to 1362 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs 1363 * to lock that mutex while holding a transaction handle and the rescan 1364 * worker needs to commit a transaction. 1365 */ 1366 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1367 1368 /* 1369 * Request qgroup rescan worker to complete and wait for it. This wait 1370 * must be done before transaction start for quota disable since it may 1371 * deadlock with transaction by the qgroup rescan worker. 1372 */ 1373 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1374 btrfs_qgroup_wait_for_completion(fs_info, false); 1375 1376 ret = flush_reservations(fs_info); 1377 if (ret) 1378 goto out_unlock_cleaner; 1379 1380 /* 1381 * 1 For the root item 1382 * 1383 * We should also reserve enough items for the quota tree deletion in 1384 * btrfs_clean_quota_tree but this is not done. 1385 * 1386 * Also, we must always start a transaction without holding the mutex 1387 * qgroup_ioctl_lock, see btrfs_quota_enable(). 1388 */ 1389 trans = btrfs_start_transaction(fs_info->tree_root, 1); 1390 1391 mutex_lock(&fs_info->qgroup_ioctl_lock); 1392 if (IS_ERR(trans)) { 1393 ret = PTR_ERR(trans); 1394 trans = NULL; 1395 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 1396 goto out; 1397 } 1398 1399 if (!fs_info->quota_root) 1400 goto out; 1401 1402 spin_lock(&fs_info->qgroup_lock); 1403 quota_root = fs_info->quota_root; 1404 fs_info->quota_root = NULL; 1405 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1406 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; 1407 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL; 1408 spin_unlock(&fs_info->qgroup_lock); 1409 1410 btrfs_free_qgroup_config(fs_info); 1411 1412 ret = btrfs_clean_quota_tree(trans, quota_root); 1413 if (ret) { 1414 btrfs_abort_transaction(trans, ret); 1415 goto out; 1416 } 1417 1418 ret = btrfs_del_root(trans, "a_root->root_key); 1419 if (ret) { 1420 btrfs_abort_transaction(trans, ret); 1421 goto out; 1422 } 1423 1424 spin_lock(&fs_info->trans_lock); 1425 list_del("a_root->dirty_list); 1426 spin_unlock(&fs_info->trans_lock); 1427 1428 btrfs_tree_lock(quota_root->node); 1429 btrfs_clear_buffer_dirty(trans, quota_root->node); 1430 btrfs_tree_unlock(quota_root->node); 1431 btrfs_free_tree_block(trans, btrfs_root_id(quota_root), 1432 quota_root->node, 0, 1); 1433 1434 btrfs_put_root(quota_root); 1435 1436 out: 1437 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1438 if (ret && trans) 1439 btrfs_end_transaction(trans); 1440 else if (trans) 1441 ret = btrfs_commit_transaction(trans); 1442 out_unlock_cleaner: 1443 mutex_unlock(&fs_info->cleaner_mutex); 1444 1445 return ret; 1446 } 1447 1448 static void qgroup_dirty(struct btrfs_fs_info *fs_info, 1449 struct btrfs_qgroup *qgroup) 1450 { 1451 if (list_empty(&qgroup->dirty)) 1452 list_add(&qgroup->dirty, &fs_info->dirty_qgroups); 1453 } 1454 1455 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) 1456 { 1457 if (!list_empty(&qgroup->iterator)) 1458 return; 1459 1460 list_add_tail(&qgroup->iterator, head); 1461 } 1462 1463 static void qgroup_iterator_clean(struct list_head *head) 1464 { 1465 while (!list_empty(head)) { 1466 struct btrfs_qgroup *qgroup; 1467 1468 qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); 1469 list_del_init(&qgroup->iterator); 1470 } 1471 } 1472 1473 /* 1474 * The easy accounting, we're updating qgroup relationship whose child qgroup 1475 * only has exclusive extents. 1476 * 1477 * In this case, all exclusive extents will also be exclusive for parent, so 1478 * excl/rfer just get added/removed. 1479 * 1480 * So is qgroup reservation space, which should also be added/removed to 1481 * parent. 1482 * Or when child tries to release reservation space, parent will underflow its 1483 * reservation (for relationship adding case). 1484 * 1485 * Caller should hold fs_info->qgroup_lock. 1486 */ 1487 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, 1488 struct btrfs_qgroup *src, int sign) 1489 { 1490 struct btrfs_qgroup *qgroup; 1491 struct btrfs_qgroup *cur; 1492 LIST_HEAD(qgroup_list); 1493 u64 num_bytes = src->excl; 1494 int ret = 0; 1495 1496 qgroup = find_qgroup_rb(fs_info, ref_root); 1497 if (!qgroup) 1498 goto out; 1499 1500 qgroup_iterator_add(&qgroup_list, qgroup); 1501 list_for_each_entry(cur, &qgroup_list, iterator) { 1502 struct btrfs_qgroup_list *glist; 1503 1504 qgroup->rfer += sign * num_bytes; 1505 qgroup->rfer_cmpr += sign * num_bytes; 1506 1507 WARN_ON(sign < 0 && qgroup->excl < num_bytes); 1508 qgroup->excl += sign * num_bytes; 1509 qgroup->excl_cmpr += sign * num_bytes; 1510 1511 if (sign > 0) 1512 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); 1513 else 1514 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); 1515 qgroup_dirty(fs_info, qgroup); 1516 1517 /* Append parent qgroups to @qgroup_list. */ 1518 list_for_each_entry(glist, &qgroup->groups, next_group) 1519 qgroup_iterator_add(&qgroup_list, glist->group); 1520 } 1521 ret = 0; 1522 out: 1523 qgroup_iterator_clean(&qgroup_list); 1524 return ret; 1525 } 1526 1527 1528 /* 1529 * Quick path for updating qgroup with only excl refs. 1530 * 1531 * In that case, just update all parent will be enough. 1532 * Or we needs to do a full rescan. 1533 * Caller should also hold fs_info->qgroup_lock. 1534 * 1535 * Return 0 for quick update, return >0 for need to full rescan 1536 * and mark INCONSISTENT flag. 1537 * Return < 0 for other error. 1538 */ 1539 static int quick_update_accounting(struct btrfs_fs_info *fs_info, 1540 u64 src, u64 dst, int sign) 1541 { 1542 struct btrfs_qgroup *qgroup; 1543 int ret = 1; 1544 int err = 0; 1545 1546 qgroup = find_qgroup_rb(fs_info, src); 1547 if (!qgroup) 1548 goto out; 1549 if (qgroup->excl == qgroup->rfer) { 1550 ret = 0; 1551 err = __qgroup_excl_accounting(fs_info, dst, qgroup, sign); 1552 if (err < 0) { 1553 ret = err; 1554 goto out; 1555 } 1556 } 1557 out: 1558 if (ret) 1559 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 1560 return ret; 1561 } 1562 1563 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst) 1564 { 1565 struct btrfs_fs_info *fs_info = trans->fs_info; 1566 struct btrfs_qgroup *parent; 1567 struct btrfs_qgroup *member; 1568 struct btrfs_qgroup_list *list; 1569 struct btrfs_qgroup_list *prealloc = NULL; 1570 int ret = 0; 1571 1572 /* Check the level of src and dst first */ 1573 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) 1574 return -EINVAL; 1575 1576 mutex_lock(&fs_info->qgroup_ioctl_lock); 1577 if (!fs_info->quota_root) { 1578 ret = -ENOTCONN; 1579 goto out; 1580 } 1581 member = find_qgroup_rb(fs_info, src); 1582 parent = find_qgroup_rb(fs_info, dst); 1583 if (!member || !parent) { 1584 ret = -EINVAL; 1585 goto out; 1586 } 1587 1588 /* check if such qgroup relation exist firstly */ 1589 list_for_each_entry(list, &member->groups, next_group) { 1590 if (list->group == parent) { 1591 ret = -EEXIST; 1592 goto out; 1593 } 1594 } 1595 1596 prealloc = kzalloc(sizeof(*list), GFP_NOFS); 1597 if (!prealloc) { 1598 ret = -ENOMEM; 1599 goto out; 1600 } 1601 ret = add_qgroup_relation_item(trans, src, dst); 1602 if (ret) 1603 goto out; 1604 1605 ret = add_qgroup_relation_item(trans, dst, src); 1606 if (ret) { 1607 del_qgroup_relation_item(trans, src, dst); 1608 goto out; 1609 } 1610 1611 spin_lock(&fs_info->qgroup_lock); 1612 ret = __add_relation_rb(prealloc, member, parent); 1613 prealloc = NULL; 1614 if (ret < 0) { 1615 spin_unlock(&fs_info->qgroup_lock); 1616 goto out; 1617 } 1618 ret = quick_update_accounting(fs_info, src, dst, 1); 1619 spin_unlock(&fs_info->qgroup_lock); 1620 out: 1621 kfree(prealloc); 1622 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1623 return ret; 1624 } 1625 1626 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1627 u64 dst) 1628 { 1629 struct btrfs_fs_info *fs_info = trans->fs_info; 1630 struct btrfs_qgroup *parent; 1631 struct btrfs_qgroup *member; 1632 struct btrfs_qgroup_list *list; 1633 bool found = false; 1634 int ret = 0; 1635 int ret2; 1636 1637 if (!fs_info->quota_root) { 1638 ret = -ENOTCONN; 1639 goto out; 1640 } 1641 1642 member = find_qgroup_rb(fs_info, src); 1643 parent = find_qgroup_rb(fs_info, dst); 1644 /* 1645 * The parent/member pair doesn't exist, then try to delete the dead 1646 * relation items only. 1647 */ 1648 if (!member || !parent) 1649 goto delete_item; 1650 1651 /* check if such qgroup relation exist firstly */ 1652 list_for_each_entry(list, &member->groups, next_group) { 1653 if (list->group == parent) { 1654 found = true; 1655 break; 1656 } 1657 } 1658 1659 delete_item: 1660 ret = del_qgroup_relation_item(trans, src, dst); 1661 if (ret < 0 && ret != -ENOENT) 1662 goto out; 1663 ret2 = del_qgroup_relation_item(trans, dst, src); 1664 if (ret2 < 0 && ret2 != -ENOENT) 1665 goto out; 1666 1667 /* At least one deletion succeeded, return 0 */ 1668 if (!ret || !ret2) 1669 ret = 0; 1670 1671 if (found) { 1672 spin_lock(&fs_info->qgroup_lock); 1673 del_relation_rb(fs_info, src, dst); 1674 ret = quick_update_accounting(fs_info, src, dst, -1); 1675 spin_unlock(&fs_info->qgroup_lock); 1676 } 1677 out: 1678 return ret; 1679 } 1680 1681 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, 1682 u64 dst) 1683 { 1684 struct btrfs_fs_info *fs_info = trans->fs_info; 1685 int ret = 0; 1686 1687 mutex_lock(&fs_info->qgroup_ioctl_lock); 1688 ret = __del_qgroup_relation(trans, src, dst); 1689 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1690 1691 return ret; 1692 } 1693 1694 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1695 { 1696 struct btrfs_fs_info *fs_info = trans->fs_info; 1697 struct btrfs_root *quota_root; 1698 struct btrfs_qgroup *qgroup; 1699 struct btrfs_qgroup *prealloc = NULL; 1700 int ret = 0; 1701 1702 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) 1703 return 0; 1704 1705 mutex_lock(&fs_info->qgroup_ioctl_lock); 1706 if (!fs_info->quota_root) { 1707 ret = -ENOTCONN; 1708 goto out; 1709 } 1710 quota_root = fs_info->quota_root; 1711 qgroup = find_qgroup_rb(fs_info, qgroupid); 1712 if (qgroup) { 1713 ret = -EEXIST; 1714 goto out; 1715 } 1716 1717 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 1718 if (!prealloc) { 1719 ret = -ENOMEM; 1720 goto out; 1721 } 1722 1723 ret = add_qgroup_item(trans, quota_root, qgroupid); 1724 if (ret) 1725 goto out; 1726 1727 spin_lock(&fs_info->qgroup_lock); 1728 qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid); 1729 spin_unlock(&fs_info->qgroup_lock); 1730 prealloc = NULL; 1731 1732 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); 1733 out: 1734 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1735 kfree(prealloc); 1736 return ret; 1737 } 1738 1739 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) 1740 { 1741 struct btrfs_fs_info *fs_info = trans->fs_info; 1742 struct btrfs_qgroup *qgroup; 1743 struct btrfs_qgroup_list *list; 1744 int ret = 0; 1745 1746 mutex_lock(&fs_info->qgroup_ioctl_lock); 1747 if (!fs_info->quota_root) { 1748 ret = -ENOTCONN; 1749 goto out; 1750 } 1751 1752 qgroup = find_qgroup_rb(fs_info, qgroupid); 1753 if (!qgroup) { 1754 ret = -ENOENT; 1755 goto out; 1756 } 1757 1758 /* Check if there are no children of this qgroup */ 1759 if (!list_empty(&qgroup->members)) { 1760 ret = -EBUSY; 1761 goto out; 1762 } 1763 1764 ret = del_qgroup_item(trans, qgroupid); 1765 if (ret && ret != -ENOENT) 1766 goto out; 1767 1768 while (!list_empty(&qgroup->groups)) { 1769 list = list_first_entry(&qgroup->groups, 1770 struct btrfs_qgroup_list, next_group); 1771 ret = __del_qgroup_relation(trans, qgroupid, 1772 list->group->qgroupid); 1773 if (ret) 1774 goto out; 1775 } 1776 1777 spin_lock(&fs_info->qgroup_lock); 1778 del_qgroup_rb(fs_info, qgroupid); 1779 spin_unlock(&fs_info->qgroup_lock); 1780 1781 /* 1782 * Remove the qgroup from sysfs now without holding the qgroup_lock 1783 * spinlock, since the sysfs_remove_group() function needs to take 1784 * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). 1785 */ 1786 btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 1787 kfree(qgroup); 1788 out: 1789 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1790 return ret; 1791 } 1792 1793 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, 1794 struct btrfs_qgroup_limit *limit) 1795 { 1796 struct btrfs_fs_info *fs_info = trans->fs_info; 1797 struct btrfs_qgroup *qgroup; 1798 int ret = 0; 1799 /* Sometimes we would want to clear the limit on this qgroup. 1800 * To meet this requirement, we treat the -1 as a special value 1801 * which tell kernel to clear the limit on this qgroup. 1802 */ 1803 const u64 CLEAR_VALUE = -1; 1804 1805 mutex_lock(&fs_info->qgroup_ioctl_lock); 1806 if (!fs_info->quota_root) { 1807 ret = -ENOTCONN; 1808 goto out; 1809 } 1810 1811 qgroup = find_qgroup_rb(fs_info, qgroupid); 1812 if (!qgroup) { 1813 ret = -ENOENT; 1814 goto out; 1815 } 1816 1817 spin_lock(&fs_info->qgroup_lock); 1818 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { 1819 if (limit->max_rfer == CLEAR_VALUE) { 1820 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1821 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; 1822 qgroup->max_rfer = 0; 1823 } else { 1824 qgroup->max_rfer = limit->max_rfer; 1825 } 1826 } 1827 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { 1828 if (limit->max_excl == CLEAR_VALUE) { 1829 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1830 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; 1831 qgroup->max_excl = 0; 1832 } else { 1833 qgroup->max_excl = limit->max_excl; 1834 } 1835 } 1836 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { 1837 if (limit->rsv_rfer == CLEAR_VALUE) { 1838 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1839 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; 1840 qgroup->rsv_rfer = 0; 1841 } else { 1842 qgroup->rsv_rfer = limit->rsv_rfer; 1843 } 1844 } 1845 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { 1846 if (limit->rsv_excl == CLEAR_VALUE) { 1847 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1848 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; 1849 qgroup->rsv_excl = 0; 1850 } else { 1851 qgroup->rsv_excl = limit->rsv_excl; 1852 } 1853 } 1854 qgroup->lim_flags |= limit->flags; 1855 1856 spin_unlock(&fs_info->qgroup_lock); 1857 1858 ret = update_qgroup_limit_item(trans, qgroup); 1859 if (ret) { 1860 qgroup_mark_inconsistent(fs_info); 1861 btrfs_info(fs_info, "unable to update quota limit for %llu", 1862 qgroupid); 1863 } 1864 1865 out: 1866 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1867 return ret; 1868 } 1869 1870 /* 1871 * Inform qgroup to trace one dirty extent, its info is recorded in @record. 1872 * So qgroup can account it at transaction committing time. 1873 * 1874 * No lock version, caller must acquire delayed ref lock and allocated memory, 1875 * then call btrfs_qgroup_trace_extent_post() after exiting lock context. 1876 * 1877 * Return 0 for success insert 1878 * Return >0 for existing record, caller can free @record safely. 1879 * Error is not possible 1880 */ 1881 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, 1882 struct btrfs_delayed_ref_root *delayed_refs, 1883 struct btrfs_qgroup_extent_record *record) 1884 { 1885 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1886 struct rb_node *parent_node = NULL; 1887 struct btrfs_qgroup_extent_record *entry; 1888 u64 bytenr = record->bytenr; 1889 1890 if (!btrfs_qgroup_full_accounting(fs_info)) 1891 return 0; 1892 1893 lockdep_assert_held(&delayed_refs->lock); 1894 trace_btrfs_qgroup_trace_extent(fs_info, record); 1895 1896 while (*p) { 1897 parent_node = *p; 1898 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1899 node); 1900 if (bytenr < entry->bytenr) { 1901 p = &(*p)->rb_left; 1902 } else if (bytenr > entry->bytenr) { 1903 p = &(*p)->rb_right; 1904 } else { 1905 if (record->data_rsv && !entry->data_rsv) { 1906 entry->data_rsv = record->data_rsv; 1907 entry->data_rsv_refroot = 1908 record->data_rsv_refroot; 1909 } 1910 return 1; 1911 } 1912 } 1913 1914 rb_link_node(&record->node, parent_node, p); 1915 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1916 return 0; 1917 } 1918 1919 /* 1920 * Post handler after qgroup_trace_extent_nolock(). 1921 * 1922 * NOTE: Current qgroup does the expensive backref walk at transaction 1923 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming 1924 * new transaction. 1925 * This is designed to allow btrfs_find_all_roots() to get correct new_roots 1926 * result. 1927 * 1928 * However for old_roots there is no need to do backref walk at that time, 1929 * since we search commit roots to walk backref and result will always be 1930 * correct. 1931 * 1932 * Due to the nature of no lock version, we can't do backref there. 1933 * So we must call btrfs_qgroup_trace_extent_post() after exiting 1934 * spinlock context. 1935 * 1936 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result 1937 * using current root, then we can move all expensive backref walk out of 1938 * transaction committing, but not now as qgroup accounting will be wrong again. 1939 */ 1940 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, 1941 struct btrfs_qgroup_extent_record *qrecord) 1942 { 1943 struct btrfs_backref_walk_ctx ctx = { 0 }; 1944 int ret; 1945 1946 if (!btrfs_qgroup_full_accounting(trans->fs_info)) 1947 return 0; 1948 /* 1949 * We are always called in a context where we are already holding a 1950 * transaction handle. Often we are called when adding a data delayed 1951 * reference from btrfs_truncate_inode_items() (truncating or unlinking), 1952 * in which case we will be holding a write lock on extent buffer from a 1953 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to 1954 * acquire fs_info->commit_root_sem, because that is a higher level lock 1955 * that must be acquired before locking any extent buffers. 1956 * 1957 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem 1958 * but we can't pass it a non-NULL transaction handle, because otherwise 1959 * it would not use commit roots and would lock extent buffers, causing 1960 * a deadlock if it ends up trying to read lock the same extent buffer 1961 * that was previously write locked at btrfs_truncate_inode_items(). 1962 * 1963 * So pass a NULL transaction handle to btrfs_find_all_roots() and 1964 * explicitly tell it to not acquire the commit_root_sem - if we are 1965 * holding a transaction handle we don't need its protection. 1966 */ 1967 ASSERT(trans != NULL); 1968 1969 if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 1970 return 0; 1971 1972 ctx.bytenr = qrecord->bytenr; 1973 ctx.fs_info = trans->fs_info; 1974 1975 ret = btrfs_find_all_roots(&ctx, true); 1976 if (ret < 0) { 1977 qgroup_mark_inconsistent(trans->fs_info); 1978 btrfs_warn(trans->fs_info, 1979 "error accounting new delayed refs extent (err code: %d), quota inconsistent", 1980 ret); 1981 return 0; 1982 } 1983 1984 /* 1985 * Here we don't need to get the lock of 1986 * trans->transaction->delayed_refs, since inserted qrecord won't 1987 * be deleted, only qrecord->node may be modified (new qrecord insert) 1988 * 1989 * So modifying qrecord->old_roots is safe here 1990 */ 1991 qrecord->old_roots = ctx.roots; 1992 return 0; 1993 } 1994 1995 /* 1996 * Inform qgroup to trace one dirty extent, specified by @bytenr and 1997 * @num_bytes. 1998 * So qgroup can account it at commit trans time. 1999 * 2000 * Better encapsulated version, with memory allocation and backref walk for 2001 * commit roots. 2002 * So this can sleep. 2003 * 2004 * Return 0 if the operation is done. 2005 * Return <0 for error, like memory allocation failure or invalid parameter 2006 * (NULL trans) 2007 */ 2008 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2009 u64 num_bytes) 2010 { 2011 struct btrfs_fs_info *fs_info = trans->fs_info; 2012 struct btrfs_qgroup_extent_record *record; 2013 struct btrfs_delayed_ref_root *delayed_refs; 2014 int ret; 2015 2016 if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0) 2017 return 0; 2018 record = kzalloc(sizeof(*record), GFP_NOFS); 2019 if (!record) 2020 return -ENOMEM; 2021 2022 delayed_refs = &trans->transaction->delayed_refs; 2023 record->bytenr = bytenr; 2024 record->num_bytes = num_bytes; 2025 record->old_roots = NULL; 2026 2027 spin_lock(&delayed_refs->lock); 2028 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); 2029 spin_unlock(&delayed_refs->lock); 2030 if (ret > 0) { 2031 kfree(record); 2032 return 0; 2033 } 2034 return btrfs_qgroup_trace_extent_post(trans, record); 2035 } 2036 2037 /* 2038 * Inform qgroup to trace all leaf items of data 2039 * 2040 * Return 0 for success 2041 * Return <0 for error(ENOMEM) 2042 */ 2043 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, 2044 struct extent_buffer *eb) 2045 { 2046 struct btrfs_fs_info *fs_info = trans->fs_info; 2047 int nr = btrfs_header_nritems(eb); 2048 int i, extent_type, ret; 2049 struct btrfs_key key; 2050 struct btrfs_file_extent_item *fi; 2051 u64 bytenr, num_bytes; 2052 2053 /* We can be called directly from walk_up_proc() */ 2054 if (!btrfs_qgroup_full_accounting(fs_info)) 2055 return 0; 2056 2057 for (i = 0; i < nr; i++) { 2058 btrfs_item_key_to_cpu(eb, &key, i); 2059 2060 if (key.type != BTRFS_EXTENT_DATA_KEY) 2061 continue; 2062 2063 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); 2064 /* filter out non qgroup-accountable extents */ 2065 extent_type = btrfs_file_extent_type(eb, fi); 2066 2067 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 2068 continue; 2069 2070 bytenr = btrfs_file_extent_disk_bytenr(eb, fi); 2071 if (!bytenr) 2072 continue; 2073 2074 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 2075 2076 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes); 2077 if (ret) 2078 return ret; 2079 } 2080 cond_resched(); 2081 return 0; 2082 } 2083 2084 /* 2085 * Walk up the tree from the bottom, freeing leaves and any interior 2086 * nodes which have had all slots visited. If a node (leaf or 2087 * interior) is freed, the node above it will have it's slot 2088 * incremented. The root node will never be freed. 2089 * 2090 * At the end of this function, we should have a path which has all 2091 * slots incremented to the next position for a search. If we need to 2092 * read a new node it will be NULL and the node above it will have the 2093 * correct slot selected for a later read. 2094 * 2095 * If we increment the root nodes slot counter past the number of 2096 * elements, 1 is returned to signal completion of the search. 2097 */ 2098 static int adjust_slots_upwards(struct btrfs_path *path, int root_level) 2099 { 2100 int level = 0; 2101 int nr, slot; 2102 struct extent_buffer *eb; 2103 2104 if (root_level == 0) 2105 return 1; 2106 2107 while (level <= root_level) { 2108 eb = path->nodes[level]; 2109 nr = btrfs_header_nritems(eb); 2110 path->slots[level]++; 2111 slot = path->slots[level]; 2112 if (slot >= nr || level == 0) { 2113 /* 2114 * Don't free the root - we will detect this 2115 * condition after our loop and return a 2116 * positive value for caller to stop walking the tree. 2117 */ 2118 if (level != root_level) { 2119 btrfs_tree_unlock_rw(eb, path->locks[level]); 2120 path->locks[level] = 0; 2121 2122 free_extent_buffer(eb); 2123 path->nodes[level] = NULL; 2124 path->slots[level] = 0; 2125 } 2126 } else { 2127 /* 2128 * We have a valid slot to walk back down 2129 * from. Stop here so caller can process these 2130 * new nodes. 2131 */ 2132 break; 2133 } 2134 2135 level++; 2136 } 2137 2138 eb = path->nodes[root_level]; 2139 if (path->slots[root_level] >= btrfs_header_nritems(eb)) 2140 return 1; 2141 2142 return 0; 2143 } 2144 2145 /* 2146 * Helper function to trace a subtree tree block swap. 2147 * 2148 * The swap will happen in highest tree block, but there may be a lot of 2149 * tree blocks involved. 2150 * 2151 * For example: 2152 * OO = Old tree blocks 2153 * NN = New tree blocks allocated during balance 2154 * 2155 * File tree (257) Reloc tree for 257 2156 * L2 OO NN 2157 * / \ / \ 2158 * L1 OO OO (a) OO NN (a) 2159 * / \ / \ / \ / \ 2160 * L0 OO OO OO OO OO OO NN NN 2161 * (b) (c) (b) (c) 2162 * 2163 * When calling qgroup_trace_extent_swap(), we will pass: 2164 * @src_eb = OO(a) 2165 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] 2166 * @dst_level = 0 2167 * @root_level = 1 2168 * 2169 * In that case, qgroup_trace_extent_swap() will search from OO(a) to 2170 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. 2171 * 2172 * The main work of qgroup_trace_extent_swap() can be split into 3 parts: 2173 * 2174 * 1) Tree search from @src_eb 2175 * It should acts as a simplified btrfs_search_slot(). 2176 * The key for search can be extracted from @dst_path->nodes[dst_level] 2177 * (first key). 2178 * 2179 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty 2180 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. 2181 * They should be marked during previous (@dst_level = 1) iteration. 2182 * 2183 * 3) Mark file extents in leaves dirty 2184 * We don't have good way to pick out new file extents only. 2185 * So we still follow the old method by scanning all file extents in 2186 * the leave. 2187 * 2188 * This function can free us from keeping two paths, thus later we only need 2189 * to care about how to iterate all new tree blocks in reloc tree. 2190 */ 2191 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, 2192 struct extent_buffer *src_eb, 2193 struct btrfs_path *dst_path, 2194 int dst_level, int root_level, 2195 bool trace_leaf) 2196 { 2197 struct btrfs_key key; 2198 struct btrfs_path *src_path; 2199 struct btrfs_fs_info *fs_info = trans->fs_info; 2200 u32 nodesize = fs_info->nodesize; 2201 int cur_level = root_level; 2202 int ret; 2203 2204 BUG_ON(dst_level > root_level); 2205 /* Level mismatch */ 2206 if (btrfs_header_level(src_eb) != root_level) 2207 return -EINVAL; 2208 2209 src_path = btrfs_alloc_path(); 2210 if (!src_path) { 2211 ret = -ENOMEM; 2212 goto out; 2213 } 2214 2215 if (dst_level) 2216 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2217 else 2218 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); 2219 2220 /* For src_path */ 2221 atomic_inc(&src_eb->refs); 2222 src_path->nodes[root_level] = src_eb; 2223 src_path->slots[root_level] = dst_path->slots[root_level]; 2224 src_path->locks[root_level] = 0; 2225 2226 /* A simplified version of btrfs_search_slot() */ 2227 while (cur_level >= dst_level) { 2228 struct btrfs_key src_key; 2229 struct btrfs_key dst_key; 2230 2231 if (src_path->nodes[cur_level] == NULL) { 2232 struct extent_buffer *eb; 2233 int parent_slot; 2234 2235 eb = src_path->nodes[cur_level + 1]; 2236 parent_slot = src_path->slots[cur_level + 1]; 2237 2238 eb = btrfs_read_node_slot(eb, parent_slot); 2239 if (IS_ERR(eb)) { 2240 ret = PTR_ERR(eb); 2241 goto out; 2242 } 2243 2244 src_path->nodes[cur_level] = eb; 2245 2246 btrfs_tree_read_lock(eb); 2247 src_path->locks[cur_level] = BTRFS_READ_LOCK; 2248 } 2249 2250 src_path->slots[cur_level] = dst_path->slots[cur_level]; 2251 if (cur_level) { 2252 btrfs_node_key_to_cpu(dst_path->nodes[cur_level], 2253 &dst_key, dst_path->slots[cur_level]); 2254 btrfs_node_key_to_cpu(src_path->nodes[cur_level], 2255 &src_key, src_path->slots[cur_level]); 2256 } else { 2257 btrfs_item_key_to_cpu(dst_path->nodes[cur_level], 2258 &dst_key, dst_path->slots[cur_level]); 2259 btrfs_item_key_to_cpu(src_path->nodes[cur_level], 2260 &src_key, src_path->slots[cur_level]); 2261 } 2262 /* Content mismatch, something went wrong */ 2263 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { 2264 ret = -ENOENT; 2265 goto out; 2266 } 2267 cur_level--; 2268 } 2269 2270 /* 2271 * Now both @dst_path and @src_path have been populated, record the tree 2272 * blocks for qgroup accounting. 2273 */ 2274 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, 2275 nodesize); 2276 if (ret < 0) 2277 goto out; 2278 ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start, 2279 nodesize); 2280 if (ret < 0) 2281 goto out; 2282 2283 /* Record leaf file extents */ 2284 if (dst_level == 0 && trace_leaf) { 2285 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); 2286 if (ret < 0) 2287 goto out; 2288 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); 2289 } 2290 out: 2291 btrfs_free_path(src_path); 2292 return ret; 2293 } 2294 2295 /* 2296 * Helper function to do recursive generation-aware depth-first search, to 2297 * locate all new tree blocks in a subtree of reloc tree. 2298 * 2299 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) 2300 * reloc tree 2301 * L2 NN (a) 2302 * / \ 2303 * L1 OO NN (b) 2304 * / \ / \ 2305 * L0 OO OO OO NN 2306 * (c) (d) 2307 * If we pass: 2308 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], 2309 * @cur_level = 1 2310 * @root_level = 1 2311 * 2312 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace 2313 * above tree blocks along with their counter parts in file tree. 2314 * While during search, old tree blocks OO(c) will be skipped as tree block swap 2315 * won't affect OO(c). 2316 */ 2317 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, 2318 struct extent_buffer *src_eb, 2319 struct btrfs_path *dst_path, 2320 int cur_level, int root_level, 2321 u64 last_snapshot, bool trace_leaf) 2322 { 2323 struct btrfs_fs_info *fs_info = trans->fs_info; 2324 struct extent_buffer *eb; 2325 bool need_cleanup = false; 2326 int ret = 0; 2327 int i; 2328 2329 /* Level sanity check */ 2330 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || 2331 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || 2332 root_level < cur_level) { 2333 btrfs_err_rl(fs_info, 2334 "%s: bad levels, cur_level=%d root_level=%d", 2335 __func__, cur_level, root_level); 2336 return -EUCLEAN; 2337 } 2338 2339 /* Read the tree block if needed */ 2340 if (dst_path->nodes[cur_level] == NULL) { 2341 int parent_slot; 2342 u64 child_gen; 2343 2344 /* 2345 * dst_path->nodes[root_level] must be initialized before 2346 * calling this function. 2347 */ 2348 if (cur_level == root_level) { 2349 btrfs_err_rl(fs_info, 2350 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", 2351 __func__, root_level, root_level, cur_level); 2352 return -EUCLEAN; 2353 } 2354 2355 /* 2356 * We need to get child blockptr/gen from parent before we can 2357 * read it. 2358 */ 2359 eb = dst_path->nodes[cur_level + 1]; 2360 parent_slot = dst_path->slots[cur_level + 1]; 2361 child_gen = btrfs_node_ptr_generation(eb, parent_slot); 2362 2363 /* This node is old, no need to trace */ 2364 if (child_gen < last_snapshot) 2365 goto out; 2366 2367 eb = btrfs_read_node_slot(eb, parent_slot); 2368 if (IS_ERR(eb)) { 2369 ret = PTR_ERR(eb); 2370 goto out; 2371 } 2372 2373 dst_path->nodes[cur_level] = eb; 2374 dst_path->slots[cur_level] = 0; 2375 2376 btrfs_tree_read_lock(eb); 2377 dst_path->locks[cur_level] = BTRFS_READ_LOCK; 2378 need_cleanup = true; 2379 } 2380 2381 /* Now record this tree block and its counter part for qgroups */ 2382 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, 2383 root_level, trace_leaf); 2384 if (ret < 0) 2385 goto cleanup; 2386 2387 eb = dst_path->nodes[cur_level]; 2388 2389 if (cur_level > 0) { 2390 /* Iterate all child tree blocks */ 2391 for (i = 0; i < btrfs_header_nritems(eb); i++) { 2392 /* Skip old tree blocks as they won't be swapped */ 2393 if (btrfs_node_ptr_generation(eb, i) < last_snapshot) 2394 continue; 2395 dst_path->slots[cur_level] = i; 2396 2397 /* Recursive call (at most 7 times) */ 2398 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, 2399 dst_path, cur_level - 1, root_level, 2400 last_snapshot, trace_leaf); 2401 if (ret < 0) 2402 goto cleanup; 2403 } 2404 } 2405 2406 cleanup: 2407 if (need_cleanup) { 2408 /* Clean up */ 2409 btrfs_tree_unlock_rw(dst_path->nodes[cur_level], 2410 dst_path->locks[cur_level]); 2411 free_extent_buffer(dst_path->nodes[cur_level]); 2412 dst_path->nodes[cur_level] = NULL; 2413 dst_path->slots[cur_level] = 0; 2414 dst_path->locks[cur_level] = 0; 2415 } 2416 out: 2417 return ret; 2418 } 2419 2420 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, 2421 struct extent_buffer *src_eb, 2422 struct extent_buffer *dst_eb, 2423 u64 last_snapshot, bool trace_leaf) 2424 { 2425 struct btrfs_fs_info *fs_info = trans->fs_info; 2426 struct btrfs_path *dst_path = NULL; 2427 int level; 2428 int ret; 2429 2430 if (!btrfs_qgroup_full_accounting(fs_info)) 2431 return 0; 2432 2433 /* Wrong parameter order */ 2434 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { 2435 btrfs_err_rl(fs_info, 2436 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, 2437 btrfs_header_generation(src_eb), 2438 btrfs_header_generation(dst_eb)); 2439 return -EUCLEAN; 2440 } 2441 2442 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { 2443 ret = -EIO; 2444 goto out; 2445 } 2446 2447 level = btrfs_header_level(dst_eb); 2448 dst_path = btrfs_alloc_path(); 2449 if (!dst_path) { 2450 ret = -ENOMEM; 2451 goto out; 2452 } 2453 /* For dst_path */ 2454 atomic_inc(&dst_eb->refs); 2455 dst_path->nodes[level] = dst_eb; 2456 dst_path->slots[level] = 0; 2457 dst_path->locks[level] = 0; 2458 2459 /* Do the generation aware breadth-first search */ 2460 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, 2461 level, last_snapshot, trace_leaf); 2462 if (ret < 0) 2463 goto out; 2464 ret = 0; 2465 2466 out: 2467 btrfs_free_path(dst_path); 2468 if (ret < 0) 2469 qgroup_mark_inconsistent(fs_info); 2470 return ret; 2471 } 2472 2473 /* 2474 * Inform qgroup to trace a whole subtree, including all its child tree 2475 * blocks and data. 2476 * The root tree block is specified by @root_eb. 2477 * 2478 * Normally used by relocation(tree block swap) and subvolume deletion. 2479 * 2480 * Return 0 for success 2481 * Return <0 for error(ENOMEM or tree search error) 2482 */ 2483 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, 2484 struct extent_buffer *root_eb, 2485 u64 root_gen, int root_level) 2486 { 2487 struct btrfs_fs_info *fs_info = trans->fs_info; 2488 int ret = 0; 2489 int level; 2490 u8 drop_subptree_thres; 2491 struct extent_buffer *eb = root_eb; 2492 struct btrfs_path *path = NULL; 2493 2494 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); 2495 BUG_ON(root_eb == NULL); 2496 2497 if (!btrfs_qgroup_full_accounting(fs_info)) 2498 return 0; 2499 2500 spin_lock(&fs_info->qgroup_lock); 2501 drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; 2502 spin_unlock(&fs_info->qgroup_lock); 2503 2504 /* 2505 * This function only gets called for snapshot drop, if we hit a high 2506 * node here, it means we are going to change ownership for quite a lot 2507 * of extents, which will greatly slow down btrfs_commit_transaction(). 2508 * 2509 * So here if we find a high tree here, we just skip the accounting and 2510 * mark qgroup inconsistent. 2511 */ 2512 if (root_level >= drop_subptree_thres) { 2513 qgroup_mark_inconsistent(fs_info); 2514 return 0; 2515 } 2516 2517 if (!extent_buffer_uptodate(root_eb)) { 2518 struct btrfs_tree_parent_check check = { 2519 .has_first_key = false, 2520 .transid = root_gen, 2521 .level = root_level 2522 }; 2523 2524 ret = btrfs_read_extent_buffer(root_eb, &check); 2525 if (ret) 2526 goto out; 2527 } 2528 2529 if (root_level == 0) { 2530 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); 2531 goto out; 2532 } 2533 2534 path = btrfs_alloc_path(); 2535 if (!path) 2536 return -ENOMEM; 2537 2538 /* 2539 * Walk down the tree. Missing extent blocks are filled in as 2540 * we go. Metadata is accounted every time we read a new 2541 * extent block. 2542 * 2543 * When we reach a leaf, we account for file extent items in it, 2544 * walk back up the tree (adjusting slot pointers as we go) 2545 * and restart the search process. 2546 */ 2547 atomic_inc(&root_eb->refs); /* For path */ 2548 path->nodes[root_level] = root_eb; 2549 path->slots[root_level] = 0; 2550 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ 2551 walk_down: 2552 level = root_level; 2553 while (level >= 0) { 2554 if (path->nodes[level] == NULL) { 2555 int parent_slot; 2556 u64 child_bytenr; 2557 2558 /* 2559 * We need to get child blockptr from parent before we 2560 * can read it. 2561 */ 2562 eb = path->nodes[level + 1]; 2563 parent_slot = path->slots[level + 1]; 2564 child_bytenr = btrfs_node_blockptr(eb, parent_slot); 2565 2566 eb = btrfs_read_node_slot(eb, parent_slot); 2567 if (IS_ERR(eb)) { 2568 ret = PTR_ERR(eb); 2569 goto out; 2570 } 2571 2572 path->nodes[level] = eb; 2573 path->slots[level] = 0; 2574 2575 btrfs_tree_read_lock(eb); 2576 path->locks[level] = BTRFS_READ_LOCK; 2577 2578 ret = btrfs_qgroup_trace_extent(trans, child_bytenr, 2579 fs_info->nodesize); 2580 if (ret) 2581 goto out; 2582 } 2583 2584 if (level == 0) { 2585 ret = btrfs_qgroup_trace_leaf_items(trans, 2586 path->nodes[level]); 2587 if (ret) 2588 goto out; 2589 2590 /* Nonzero return here means we completed our search */ 2591 ret = adjust_slots_upwards(path, root_level); 2592 if (ret) 2593 break; 2594 2595 /* Restart search with new slots */ 2596 goto walk_down; 2597 } 2598 2599 level--; 2600 } 2601 2602 ret = 0; 2603 out: 2604 btrfs_free_path(path); 2605 2606 return ret; 2607 } 2608 2609 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup) 2610 { 2611 if (!list_empty(&qgroup->nested_iterator)) 2612 return; 2613 2614 list_add_tail(&qgroup->nested_iterator, head); 2615 } 2616 2617 static void qgroup_iterator_nested_clean(struct list_head *head) 2618 { 2619 while (!list_empty(head)) { 2620 struct btrfs_qgroup *qgroup; 2621 2622 qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator); 2623 list_del_init(&qgroup->nested_iterator); 2624 } 2625 } 2626 2627 #define UPDATE_NEW 0 2628 #define UPDATE_OLD 1 2629 /* 2630 * Walk all of the roots that points to the bytenr and adjust their refcnts. 2631 */ 2632 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info, 2633 struct ulist *roots, struct list_head *qgroups, 2634 u64 seq, int update_old) 2635 { 2636 struct ulist_node *unode; 2637 struct ulist_iterator uiter; 2638 struct btrfs_qgroup *qg; 2639 2640 if (!roots) 2641 return; 2642 ULIST_ITER_INIT(&uiter); 2643 while ((unode = ulist_next(roots, &uiter))) { 2644 LIST_HEAD(tmp); 2645 2646 qg = find_qgroup_rb(fs_info, unode->val); 2647 if (!qg) 2648 continue; 2649 2650 qgroup_iterator_nested_add(qgroups, qg); 2651 qgroup_iterator_add(&tmp, qg); 2652 list_for_each_entry(qg, &tmp, iterator) { 2653 struct btrfs_qgroup_list *glist; 2654 2655 if (update_old) 2656 btrfs_qgroup_update_old_refcnt(qg, seq, 1); 2657 else 2658 btrfs_qgroup_update_new_refcnt(qg, seq, 1); 2659 2660 list_for_each_entry(glist, &qg->groups, next_group) { 2661 qgroup_iterator_nested_add(qgroups, glist->group); 2662 qgroup_iterator_add(&tmp, glist->group); 2663 } 2664 } 2665 qgroup_iterator_clean(&tmp); 2666 } 2667 } 2668 2669 /* 2670 * Update qgroup rfer/excl counters. 2671 * Rfer update is easy, codes can explain themselves. 2672 * 2673 * Excl update is tricky, the update is split into 2 parts. 2674 * Part 1: Possible exclusive <-> sharing detect: 2675 * | A | !A | 2676 * ------------------------------------- 2677 * B | * | - | 2678 * ------------------------------------- 2679 * !B | + | ** | 2680 * ------------------------------------- 2681 * 2682 * Conditions: 2683 * A: cur_old_roots < nr_old_roots (not exclusive before) 2684 * !A: cur_old_roots == nr_old_roots (possible exclusive before) 2685 * B: cur_new_roots < nr_new_roots (not exclusive now) 2686 * !B: cur_new_roots == nr_new_roots (possible exclusive now) 2687 * 2688 * Results: 2689 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing 2690 * *: Definitely not changed. **: Possible unchanged. 2691 * 2692 * For !A and !B condition, the exception is cur_old/new_roots == 0 case. 2693 * 2694 * To make the logic clear, we first use condition A and B to split 2695 * combination into 4 results. 2696 * 2697 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them 2698 * only on variant maybe 0. 2699 * 2700 * Lastly, check result **, since there are 2 variants maybe 0, split them 2701 * again(2x2). 2702 * But this time we don't need to consider other things, the codes and logic 2703 * is easy to understand now. 2704 */ 2705 static void qgroup_update_counters(struct btrfs_fs_info *fs_info, 2706 struct list_head *qgroups, u64 nr_old_roots, 2707 u64 nr_new_roots, u64 num_bytes, u64 seq) 2708 { 2709 struct btrfs_qgroup *qg; 2710 2711 list_for_each_entry(qg, qgroups, nested_iterator) { 2712 u64 cur_new_count, cur_old_count; 2713 bool dirty = false; 2714 2715 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); 2716 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); 2717 2718 trace_qgroup_update_counters(fs_info, qg, cur_old_count, 2719 cur_new_count); 2720 2721 /* Rfer update part */ 2722 if (cur_old_count == 0 && cur_new_count > 0) { 2723 qg->rfer += num_bytes; 2724 qg->rfer_cmpr += num_bytes; 2725 dirty = true; 2726 } 2727 if (cur_old_count > 0 && cur_new_count == 0) { 2728 qg->rfer -= num_bytes; 2729 qg->rfer_cmpr -= num_bytes; 2730 dirty = true; 2731 } 2732 2733 /* Excl update part */ 2734 /* Exclusive/none -> shared case */ 2735 if (cur_old_count == nr_old_roots && 2736 cur_new_count < nr_new_roots) { 2737 /* Exclusive -> shared */ 2738 if (cur_old_count != 0) { 2739 qg->excl -= num_bytes; 2740 qg->excl_cmpr -= num_bytes; 2741 dirty = true; 2742 } 2743 } 2744 2745 /* Shared -> exclusive/none case */ 2746 if (cur_old_count < nr_old_roots && 2747 cur_new_count == nr_new_roots) { 2748 /* Shared->exclusive */ 2749 if (cur_new_count != 0) { 2750 qg->excl += num_bytes; 2751 qg->excl_cmpr += num_bytes; 2752 dirty = true; 2753 } 2754 } 2755 2756 /* Exclusive/none -> exclusive/none case */ 2757 if (cur_old_count == nr_old_roots && 2758 cur_new_count == nr_new_roots) { 2759 if (cur_old_count == 0) { 2760 /* None -> exclusive/none */ 2761 2762 if (cur_new_count != 0) { 2763 /* None -> exclusive */ 2764 qg->excl += num_bytes; 2765 qg->excl_cmpr += num_bytes; 2766 dirty = true; 2767 } 2768 /* None -> none, nothing changed */ 2769 } else { 2770 /* Exclusive -> exclusive/none */ 2771 2772 if (cur_new_count == 0) { 2773 /* Exclusive -> none */ 2774 qg->excl -= num_bytes; 2775 qg->excl_cmpr -= num_bytes; 2776 dirty = true; 2777 } 2778 /* Exclusive -> exclusive, nothing changed */ 2779 } 2780 } 2781 2782 if (dirty) 2783 qgroup_dirty(fs_info, qg); 2784 } 2785 } 2786 2787 /* 2788 * Check if the @roots potentially is a list of fs tree roots 2789 * 2790 * Return 0 for definitely not a fs/subvol tree roots ulist 2791 * Return 1 for possible fs/subvol tree roots in the list (considering an empty 2792 * one as well) 2793 */ 2794 static int maybe_fs_roots(struct ulist *roots) 2795 { 2796 struct ulist_node *unode; 2797 struct ulist_iterator uiter; 2798 2799 /* Empty one, still possible for fs roots */ 2800 if (!roots || roots->nnodes == 0) 2801 return 1; 2802 2803 ULIST_ITER_INIT(&uiter); 2804 unode = ulist_next(roots, &uiter); 2805 if (!unode) 2806 return 1; 2807 2808 /* 2809 * If it contains fs tree roots, then it must belong to fs/subvol 2810 * trees. 2811 * If it contains a non-fs tree, it won't be shared with fs/subvol trees. 2812 */ 2813 return is_fstree(unode->val); 2814 } 2815 2816 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, 2817 u64 num_bytes, struct ulist *old_roots, 2818 struct ulist *new_roots) 2819 { 2820 struct btrfs_fs_info *fs_info = trans->fs_info; 2821 LIST_HEAD(qgroups); 2822 u64 seq; 2823 u64 nr_new_roots = 0; 2824 u64 nr_old_roots = 0; 2825 int ret = 0; 2826 2827 /* 2828 * If quotas get disabled meanwhile, the resources need to be freed and 2829 * we can't just exit here. 2830 */ 2831 if (!btrfs_qgroup_full_accounting(fs_info) || 2832 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) 2833 goto out_free; 2834 2835 if (new_roots) { 2836 if (!maybe_fs_roots(new_roots)) 2837 goto out_free; 2838 nr_new_roots = new_roots->nnodes; 2839 } 2840 if (old_roots) { 2841 if (!maybe_fs_roots(old_roots)) 2842 goto out_free; 2843 nr_old_roots = old_roots->nnodes; 2844 } 2845 2846 /* Quick exit, either not fs tree roots, or won't affect any qgroup */ 2847 if (nr_old_roots == 0 && nr_new_roots == 0) 2848 goto out_free; 2849 2850 BUG_ON(!fs_info->quota_root); 2851 2852 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, 2853 num_bytes, nr_old_roots, nr_new_roots); 2854 2855 mutex_lock(&fs_info->qgroup_rescan_lock); 2856 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 2857 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { 2858 mutex_unlock(&fs_info->qgroup_rescan_lock); 2859 ret = 0; 2860 goto out_free; 2861 } 2862 } 2863 mutex_unlock(&fs_info->qgroup_rescan_lock); 2864 2865 spin_lock(&fs_info->qgroup_lock); 2866 seq = fs_info->qgroup_seq; 2867 2868 /* Update old refcnts using old_roots */ 2869 qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD); 2870 2871 /* Update new refcnts using new_roots */ 2872 qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW); 2873 2874 qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots, 2875 num_bytes, seq); 2876 2877 /* 2878 * Bump qgroup_seq to avoid seq overlap 2879 */ 2880 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; 2881 spin_unlock(&fs_info->qgroup_lock); 2882 out_free: 2883 qgroup_iterator_nested_clean(&qgroups); 2884 ulist_free(old_roots); 2885 ulist_free(new_roots); 2886 return ret; 2887 } 2888 2889 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) 2890 { 2891 struct btrfs_fs_info *fs_info = trans->fs_info; 2892 struct btrfs_qgroup_extent_record *record; 2893 struct btrfs_delayed_ref_root *delayed_refs; 2894 struct ulist *new_roots = NULL; 2895 struct rb_node *node; 2896 u64 num_dirty_extents = 0; 2897 u64 qgroup_to_skip; 2898 int ret = 0; 2899 2900 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 2901 return 0; 2902 2903 delayed_refs = &trans->transaction->delayed_refs; 2904 qgroup_to_skip = delayed_refs->qgroup_to_skip; 2905 while ((node = rb_first(&delayed_refs->dirty_extent_root))) { 2906 record = rb_entry(node, struct btrfs_qgroup_extent_record, 2907 node); 2908 2909 num_dirty_extents++; 2910 trace_btrfs_qgroup_account_extents(fs_info, record); 2911 2912 if (!ret && !(fs_info->qgroup_flags & 2913 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { 2914 struct btrfs_backref_walk_ctx ctx = { 0 }; 2915 2916 ctx.bytenr = record->bytenr; 2917 ctx.fs_info = fs_info; 2918 2919 /* 2920 * Old roots should be searched when inserting qgroup 2921 * extent record. 2922 * 2923 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, 2924 * we may have some record inserted during 2925 * NO_ACCOUNTING (thus no old_roots populated), but 2926 * later we start rescan, which clears NO_ACCOUNTING, 2927 * leaving some inserted records without old_roots 2928 * populated. 2929 * 2930 * Those cases are rare and should not cause too much 2931 * time spent during commit_transaction(). 2932 */ 2933 if (!record->old_roots) { 2934 /* Search commit root to find old_roots */ 2935 ret = btrfs_find_all_roots(&ctx, false); 2936 if (ret < 0) 2937 goto cleanup; 2938 record->old_roots = ctx.roots; 2939 ctx.roots = NULL; 2940 } 2941 2942 /* Free the reserved data space */ 2943 btrfs_qgroup_free_refroot(fs_info, 2944 record->data_rsv_refroot, 2945 record->data_rsv, 2946 BTRFS_QGROUP_RSV_DATA); 2947 /* 2948 * Use BTRFS_SEQ_LAST as time_seq to do special search, 2949 * which doesn't lock tree or delayed_refs and search 2950 * current root. It's safe inside commit_transaction(). 2951 */ 2952 ctx.trans = trans; 2953 ctx.time_seq = BTRFS_SEQ_LAST; 2954 ret = btrfs_find_all_roots(&ctx, false); 2955 if (ret < 0) 2956 goto cleanup; 2957 new_roots = ctx.roots; 2958 if (qgroup_to_skip) { 2959 ulist_del(new_roots, qgroup_to_skip, 0); 2960 ulist_del(record->old_roots, qgroup_to_skip, 2961 0); 2962 } 2963 ret = btrfs_qgroup_account_extent(trans, record->bytenr, 2964 record->num_bytes, 2965 record->old_roots, 2966 new_roots); 2967 record->old_roots = NULL; 2968 new_roots = NULL; 2969 } 2970 cleanup: 2971 ulist_free(record->old_roots); 2972 ulist_free(new_roots); 2973 new_roots = NULL; 2974 rb_erase(node, &delayed_refs->dirty_extent_root); 2975 kfree(record); 2976 2977 } 2978 trace_qgroup_num_dirty_extents(fs_info, trans->transid, 2979 num_dirty_extents); 2980 return ret; 2981 } 2982 2983 /* 2984 * Writes all changed qgroups to disk. 2985 * Called by the transaction commit path and the qgroup assign ioctl. 2986 */ 2987 int btrfs_run_qgroups(struct btrfs_trans_handle *trans) 2988 { 2989 struct btrfs_fs_info *fs_info = trans->fs_info; 2990 int ret = 0; 2991 2992 /* 2993 * In case we are called from the qgroup assign ioctl, assert that we 2994 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota 2995 * disable operation (ioctl) and access a freed quota root. 2996 */ 2997 if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) 2998 lockdep_assert_held(&fs_info->qgroup_ioctl_lock); 2999 3000 if (!fs_info->quota_root) 3001 return ret; 3002 3003 spin_lock(&fs_info->qgroup_lock); 3004 while (!list_empty(&fs_info->dirty_qgroups)) { 3005 struct btrfs_qgroup *qgroup; 3006 qgroup = list_first_entry(&fs_info->dirty_qgroups, 3007 struct btrfs_qgroup, dirty); 3008 list_del_init(&qgroup->dirty); 3009 spin_unlock(&fs_info->qgroup_lock); 3010 ret = update_qgroup_info_item(trans, qgroup); 3011 if (ret) 3012 qgroup_mark_inconsistent(fs_info); 3013 ret = update_qgroup_limit_item(trans, qgroup); 3014 if (ret) 3015 qgroup_mark_inconsistent(fs_info); 3016 spin_lock(&fs_info->qgroup_lock); 3017 } 3018 if (btrfs_qgroup_enabled(fs_info)) 3019 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; 3020 else 3021 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 3022 spin_unlock(&fs_info->qgroup_lock); 3023 3024 ret = update_qgroup_status_item(trans); 3025 if (ret) 3026 qgroup_mark_inconsistent(fs_info); 3027 3028 return ret; 3029 } 3030 3031 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, 3032 u64 inode_rootid, 3033 struct btrfs_qgroup_inherit **inherit) 3034 { 3035 int i = 0; 3036 u64 num_qgroups = 0; 3037 struct btrfs_qgroup *inode_qg; 3038 struct btrfs_qgroup_list *qg_list; 3039 struct btrfs_qgroup_inherit *res; 3040 size_t struct_sz; 3041 u64 *qgids; 3042 3043 if (*inherit) 3044 return -EEXIST; 3045 3046 inode_qg = find_qgroup_rb(fs_info, inode_rootid); 3047 if (!inode_qg) 3048 return -ENOENT; 3049 3050 num_qgroups = list_count_nodes(&inode_qg->groups); 3051 3052 if (!num_qgroups) 3053 return 0; 3054 3055 struct_sz = struct_size(res, qgroups, num_qgroups); 3056 if (struct_sz == SIZE_MAX) 3057 return -ERANGE; 3058 3059 res = kzalloc(struct_sz, GFP_NOFS); 3060 if (!res) 3061 return -ENOMEM; 3062 res->num_qgroups = num_qgroups; 3063 qgids = res->qgroups; 3064 3065 list_for_each_entry(qg_list, &inode_qg->groups, next_group) 3066 qgids[i] = qg_list->group->qgroupid; 3067 3068 *inherit = res; 3069 return 0; 3070 } 3071 3072 /* 3073 * Copy the accounting information between qgroups. This is necessary 3074 * when a snapshot or a subvolume is created. Throwing an error will 3075 * cause a transaction abort so we take extra care here to only error 3076 * when a readonly fs is a reasonable outcome. 3077 */ 3078 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, 3079 u64 objectid, u64 inode_rootid, 3080 struct btrfs_qgroup_inherit *inherit) 3081 { 3082 int ret = 0; 3083 int i; 3084 u64 *i_qgroups; 3085 bool committing = false; 3086 struct btrfs_fs_info *fs_info = trans->fs_info; 3087 struct btrfs_root *quota_root; 3088 struct btrfs_qgroup *srcgroup; 3089 struct btrfs_qgroup *dstgroup; 3090 struct btrfs_qgroup *prealloc; 3091 struct btrfs_qgroup_list **qlist_prealloc = NULL; 3092 bool free_inherit = false; 3093 bool need_rescan = false; 3094 u32 level_size = 0; 3095 u64 nums; 3096 3097 prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); 3098 if (!prealloc) 3099 return -ENOMEM; 3100 3101 /* 3102 * There are only two callers of this function. 3103 * 3104 * One in create_subvol() in the ioctl context, which needs to hold 3105 * the qgroup_ioctl_lock. 3106 * 3107 * The other one in create_pending_snapshot() where no other qgroup 3108 * code can modify the fs as they all need to either start a new trans 3109 * or hold a trans handler, thus we don't need to hold 3110 * qgroup_ioctl_lock. 3111 * This would avoid long and complex lock chain and make lockdep happy. 3112 */ 3113 spin_lock(&fs_info->trans_lock); 3114 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) 3115 committing = true; 3116 spin_unlock(&fs_info->trans_lock); 3117 3118 if (!committing) 3119 mutex_lock(&fs_info->qgroup_ioctl_lock); 3120 if (!btrfs_qgroup_enabled(fs_info)) 3121 goto out; 3122 3123 quota_root = fs_info->quota_root; 3124 if (!quota_root) { 3125 ret = -EINVAL; 3126 goto out; 3127 } 3128 3129 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) { 3130 ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit); 3131 if (ret) 3132 goto out; 3133 free_inherit = true; 3134 } 3135 3136 if (inherit) { 3137 i_qgroups = (u64 *)(inherit + 1); 3138 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + 3139 2 * inherit->num_excl_copies; 3140 for (i = 0; i < nums; ++i) { 3141 srcgroup = find_qgroup_rb(fs_info, *i_qgroups); 3142 3143 /* 3144 * Zero out invalid groups so we can ignore 3145 * them later. 3146 */ 3147 if (!srcgroup || 3148 ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) 3149 *i_qgroups = 0ULL; 3150 3151 ++i_qgroups; 3152 } 3153 } 3154 3155 /* 3156 * create a tracking group for the subvol itself 3157 */ 3158 ret = add_qgroup_item(trans, quota_root, objectid); 3159 if (ret) 3160 goto out; 3161 3162 /* 3163 * add qgroup to all inherited groups 3164 */ 3165 if (inherit) { 3166 i_qgroups = (u64 *)(inherit + 1); 3167 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { 3168 if (*i_qgroups == 0) 3169 continue; 3170 ret = add_qgroup_relation_item(trans, objectid, 3171 *i_qgroups); 3172 if (ret && ret != -EEXIST) 3173 goto out; 3174 ret = add_qgroup_relation_item(trans, *i_qgroups, 3175 objectid); 3176 if (ret && ret != -EEXIST) 3177 goto out; 3178 } 3179 ret = 0; 3180 3181 qlist_prealloc = kcalloc(inherit->num_qgroups, 3182 sizeof(struct btrfs_qgroup_list *), 3183 GFP_NOFS); 3184 if (!qlist_prealloc) { 3185 ret = -ENOMEM; 3186 goto out; 3187 } 3188 for (int i = 0; i < inherit->num_qgroups; i++) { 3189 qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list), 3190 GFP_NOFS); 3191 if (!qlist_prealloc[i]) { 3192 ret = -ENOMEM; 3193 goto out; 3194 } 3195 } 3196 } 3197 3198 spin_lock(&fs_info->qgroup_lock); 3199 3200 dstgroup = add_qgroup_rb(fs_info, prealloc, objectid); 3201 prealloc = NULL; 3202 3203 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { 3204 dstgroup->lim_flags = inherit->lim.flags; 3205 dstgroup->max_rfer = inherit->lim.max_rfer; 3206 dstgroup->max_excl = inherit->lim.max_excl; 3207 dstgroup->rsv_rfer = inherit->lim.rsv_rfer; 3208 dstgroup->rsv_excl = inherit->lim.rsv_excl; 3209 3210 qgroup_dirty(fs_info, dstgroup); 3211 } 3212 3213 if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) { 3214 srcgroup = find_qgroup_rb(fs_info, srcid); 3215 if (!srcgroup) 3216 goto unlock; 3217 3218 /* 3219 * We call inherit after we clone the root in order to make sure 3220 * our counts don't go crazy, so at this point the only 3221 * difference between the two roots should be the root node. 3222 */ 3223 level_size = fs_info->nodesize; 3224 dstgroup->rfer = srcgroup->rfer; 3225 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; 3226 dstgroup->excl = level_size; 3227 dstgroup->excl_cmpr = level_size; 3228 srcgroup->excl = level_size; 3229 srcgroup->excl_cmpr = level_size; 3230 3231 /* inherit the limit info */ 3232 dstgroup->lim_flags = srcgroup->lim_flags; 3233 dstgroup->max_rfer = srcgroup->max_rfer; 3234 dstgroup->max_excl = srcgroup->max_excl; 3235 dstgroup->rsv_rfer = srcgroup->rsv_rfer; 3236 dstgroup->rsv_excl = srcgroup->rsv_excl; 3237 3238 qgroup_dirty(fs_info, dstgroup); 3239 qgroup_dirty(fs_info, srcgroup); 3240 } 3241 3242 if (!inherit) 3243 goto unlock; 3244 3245 i_qgroups = (u64 *)(inherit + 1); 3246 for (i = 0; i < inherit->num_qgroups; ++i) { 3247 if (*i_qgroups) { 3248 ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid, 3249 *i_qgroups); 3250 qlist_prealloc[i] = NULL; 3251 if (ret) 3252 goto unlock; 3253 } 3254 ++i_qgroups; 3255 3256 /* 3257 * If we're doing a snapshot, and adding the snapshot to a new 3258 * qgroup, the numbers are guaranteed to be incorrect. 3259 */ 3260 if (srcid) 3261 need_rescan = true; 3262 } 3263 3264 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { 3265 struct btrfs_qgroup *src; 3266 struct btrfs_qgroup *dst; 3267 3268 if (!i_qgroups[0] || !i_qgroups[1]) 3269 continue; 3270 3271 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3272 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3273 3274 if (!src || !dst) { 3275 ret = -EINVAL; 3276 goto unlock; 3277 } 3278 3279 dst->rfer = src->rfer - level_size; 3280 dst->rfer_cmpr = src->rfer_cmpr - level_size; 3281 3282 /* Manually tweaking numbers certainly needs a rescan */ 3283 need_rescan = true; 3284 } 3285 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { 3286 struct btrfs_qgroup *src; 3287 struct btrfs_qgroup *dst; 3288 3289 if (!i_qgroups[0] || !i_qgroups[1]) 3290 continue; 3291 3292 src = find_qgroup_rb(fs_info, i_qgroups[0]); 3293 dst = find_qgroup_rb(fs_info, i_qgroups[1]); 3294 3295 if (!src || !dst) { 3296 ret = -EINVAL; 3297 goto unlock; 3298 } 3299 3300 dst->excl = src->excl + level_size; 3301 dst->excl_cmpr = src->excl_cmpr + level_size; 3302 need_rescan = true; 3303 } 3304 3305 unlock: 3306 spin_unlock(&fs_info->qgroup_lock); 3307 if (!ret) 3308 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); 3309 out: 3310 if (!committing) 3311 mutex_unlock(&fs_info->qgroup_ioctl_lock); 3312 if (need_rescan) 3313 qgroup_mark_inconsistent(fs_info); 3314 if (qlist_prealloc) { 3315 for (int i = 0; i < inherit->num_qgroups; i++) 3316 kfree(qlist_prealloc[i]); 3317 kfree(qlist_prealloc); 3318 } 3319 if (free_inherit) 3320 kfree(inherit); 3321 kfree(prealloc); 3322 return ret; 3323 } 3324 3325 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 3326 { 3327 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 3328 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 3329 return false; 3330 3331 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 3332 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 3333 return false; 3334 3335 return true; 3336 } 3337 3338 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, 3339 enum btrfs_qgroup_rsv_type type) 3340 { 3341 struct btrfs_qgroup *qgroup; 3342 struct btrfs_fs_info *fs_info = root->fs_info; 3343 u64 ref_root = root->root_key.objectid; 3344 int ret = 0; 3345 LIST_HEAD(qgroup_list); 3346 3347 if (!is_fstree(ref_root)) 3348 return 0; 3349 3350 if (num_bytes == 0) 3351 return 0; 3352 3353 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && 3354 capable(CAP_SYS_RESOURCE)) 3355 enforce = false; 3356 3357 spin_lock(&fs_info->qgroup_lock); 3358 if (!fs_info->quota_root) 3359 goto out; 3360 3361 qgroup = find_qgroup_rb(fs_info, ref_root); 3362 if (!qgroup) 3363 goto out; 3364 3365 qgroup_iterator_add(&qgroup_list, qgroup); 3366 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3367 struct btrfs_qgroup_list *glist; 3368 3369 if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { 3370 ret = -EDQUOT; 3371 goto out; 3372 } 3373 3374 list_for_each_entry(glist, &qgroup->groups, next_group) 3375 qgroup_iterator_add(&qgroup_list, glist->group); 3376 } 3377 3378 ret = 0; 3379 /* 3380 * no limits exceeded, now record the reservation into all qgroups 3381 */ 3382 list_for_each_entry(qgroup, &qgroup_list, iterator) 3383 qgroup_rsv_add(fs_info, qgroup, num_bytes, type); 3384 3385 out: 3386 qgroup_iterator_clean(&qgroup_list); 3387 spin_unlock(&fs_info->qgroup_lock); 3388 return ret; 3389 } 3390 3391 /* 3392 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 3393 * qgroup). 3394 * 3395 * Will handle all higher level qgroup too. 3396 * 3397 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. 3398 * This special case is only used for META_PERTRANS type. 3399 */ 3400 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, 3401 u64 ref_root, u64 num_bytes, 3402 enum btrfs_qgroup_rsv_type type) 3403 { 3404 struct btrfs_qgroup *qgroup; 3405 LIST_HEAD(qgroup_list); 3406 3407 if (!is_fstree(ref_root)) 3408 return; 3409 3410 if (num_bytes == 0) 3411 return; 3412 3413 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { 3414 WARN(1, "%s: Invalid type to free", __func__); 3415 return; 3416 } 3417 spin_lock(&fs_info->qgroup_lock); 3418 3419 if (!fs_info->quota_root) 3420 goto out; 3421 3422 qgroup = find_qgroup_rb(fs_info, ref_root); 3423 if (!qgroup) 3424 goto out; 3425 3426 if (num_bytes == (u64)-1) 3427 /* 3428 * We're freeing all pertrans rsv, get reserved value from 3429 * level 0 qgroup as real num_bytes to free. 3430 */ 3431 num_bytes = qgroup->rsv.values[type]; 3432 3433 qgroup_iterator_add(&qgroup_list, qgroup); 3434 list_for_each_entry(qgroup, &qgroup_list, iterator) { 3435 struct btrfs_qgroup_list *glist; 3436 3437 qgroup_rsv_release(fs_info, qgroup, num_bytes, type); 3438 list_for_each_entry(glist, &qgroup->groups, next_group) { 3439 qgroup_iterator_add(&qgroup_list, glist->group); 3440 } 3441 } 3442 out: 3443 qgroup_iterator_clean(&qgroup_list); 3444 spin_unlock(&fs_info->qgroup_lock); 3445 } 3446 3447 /* 3448 * Check if the leaf is the last leaf. Which means all node pointers 3449 * are at their last position. 3450 */ 3451 static bool is_last_leaf(struct btrfs_path *path) 3452 { 3453 int i; 3454 3455 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { 3456 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) 3457 return false; 3458 } 3459 return true; 3460 } 3461 3462 /* 3463 * returns < 0 on error, 0 when more leafs are to be scanned. 3464 * returns 1 when done. 3465 */ 3466 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, 3467 struct btrfs_path *path) 3468 { 3469 struct btrfs_fs_info *fs_info = trans->fs_info; 3470 struct btrfs_root *extent_root; 3471 struct btrfs_key found; 3472 struct extent_buffer *scratch_leaf = NULL; 3473 u64 num_bytes; 3474 bool done; 3475 int slot; 3476 int ret; 3477 3478 if (!btrfs_qgroup_full_accounting(fs_info)) 3479 return 1; 3480 3481 mutex_lock(&fs_info->qgroup_rescan_lock); 3482 extent_root = btrfs_extent_root(fs_info, 3483 fs_info->qgroup_rescan_progress.objectid); 3484 ret = btrfs_search_slot_for_read(extent_root, 3485 &fs_info->qgroup_rescan_progress, 3486 path, 1, 0); 3487 3488 btrfs_debug(fs_info, 3489 "current progress key (%llu %u %llu), search_slot ret %d", 3490 fs_info->qgroup_rescan_progress.objectid, 3491 fs_info->qgroup_rescan_progress.type, 3492 fs_info->qgroup_rescan_progress.offset, ret); 3493 3494 if (ret) { 3495 /* 3496 * The rescan is about to end, we will not be scanning any 3497 * further blocks. We cannot unset the RESCAN flag here, because 3498 * we want to commit the transaction if everything went well. 3499 * To make the live accounting work in this phase, we set our 3500 * scan progress pointer such that every real extent objectid 3501 * will be smaller. 3502 */ 3503 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3504 btrfs_release_path(path); 3505 mutex_unlock(&fs_info->qgroup_rescan_lock); 3506 return ret; 3507 } 3508 done = is_last_leaf(path); 3509 3510 btrfs_item_key_to_cpu(path->nodes[0], &found, 3511 btrfs_header_nritems(path->nodes[0]) - 1); 3512 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; 3513 3514 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); 3515 if (!scratch_leaf) { 3516 ret = -ENOMEM; 3517 mutex_unlock(&fs_info->qgroup_rescan_lock); 3518 goto out; 3519 } 3520 slot = path->slots[0]; 3521 btrfs_release_path(path); 3522 mutex_unlock(&fs_info->qgroup_rescan_lock); 3523 3524 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { 3525 struct btrfs_backref_walk_ctx ctx = { 0 }; 3526 3527 btrfs_item_key_to_cpu(scratch_leaf, &found, slot); 3528 if (found.type != BTRFS_EXTENT_ITEM_KEY && 3529 found.type != BTRFS_METADATA_ITEM_KEY) 3530 continue; 3531 if (found.type == BTRFS_METADATA_ITEM_KEY) 3532 num_bytes = fs_info->nodesize; 3533 else 3534 num_bytes = found.offset; 3535 3536 ctx.bytenr = found.objectid; 3537 ctx.fs_info = fs_info; 3538 3539 ret = btrfs_find_all_roots(&ctx, false); 3540 if (ret < 0) 3541 goto out; 3542 /* For rescan, just pass old_roots as NULL */ 3543 ret = btrfs_qgroup_account_extent(trans, found.objectid, 3544 num_bytes, NULL, ctx.roots); 3545 if (ret < 0) 3546 goto out; 3547 } 3548 out: 3549 if (scratch_leaf) 3550 free_extent_buffer(scratch_leaf); 3551 3552 if (done && !ret) { 3553 ret = 1; 3554 fs_info->qgroup_rescan_progress.objectid = (u64)-1; 3555 } 3556 return ret; 3557 } 3558 3559 static bool rescan_should_stop(struct btrfs_fs_info *fs_info) 3560 { 3561 if (btrfs_fs_closing(fs_info)) 3562 return true; 3563 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) 3564 return true; 3565 if (!btrfs_qgroup_enabled(fs_info)) 3566 return true; 3567 if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3568 return true; 3569 return false; 3570 } 3571 3572 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3573 { 3574 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, 3575 qgroup_rescan_work); 3576 struct btrfs_path *path; 3577 struct btrfs_trans_handle *trans = NULL; 3578 int err = -ENOMEM; 3579 int ret = 0; 3580 bool stopped = false; 3581 bool did_leaf_rescans = false; 3582 3583 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) 3584 return; 3585 3586 path = btrfs_alloc_path(); 3587 if (!path) 3588 goto out; 3589 /* 3590 * Rescan should only search for commit root, and any later difference 3591 * should be recorded by qgroup 3592 */ 3593 path->search_commit_root = 1; 3594 path->skip_locking = 1; 3595 3596 err = 0; 3597 while (!err && !(stopped = rescan_should_stop(fs_info))) { 3598 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3599 if (IS_ERR(trans)) { 3600 err = PTR_ERR(trans); 3601 break; 3602 } 3603 3604 err = qgroup_rescan_leaf(trans, path); 3605 did_leaf_rescans = true; 3606 3607 if (err > 0) 3608 btrfs_commit_transaction(trans); 3609 else 3610 btrfs_end_transaction(trans); 3611 } 3612 3613 out: 3614 btrfs_free_path(path); 3615 3616 mutex_lock(&fs_info->qgroup_rescan_lock); 3617 if (err > 0 && 3618 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3619 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3620 } else if (err < 0 || stopped) { 3621 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3622 } 3623 mutex_unlock(&fs_info->qgroup_rescan_lock); 3624 3625 /* 3626 * Only update status, since the previous part has already updated the 3627 * qgroup info, and only if we did any actual work. This also prevents 3628 * race with a concurrent quota disable, which has already set 3629 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at 3630 * btrfs_quota_disable(). 3631 */ 3632 if (did_leaf_rescans) { 3633 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3634 if (IS_ERR(trans)) { 3635 err = PTR_ERR(trans); 3636 trans = NULL; 3637 btrfs_err(fs_info, 3638 "fail to start transaction for status update: %d", 3639 err); 3640 } 3641 } else { 3642 trans = NULL; 3643 } 3644 3645 mutex_lock(&fs_info->qgroup_rescan_lock); 3646 if (!stopped || 3647 fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) 3648 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3649 if (trans) { 3650 ret = update_qgroup_status_item(trans); 3651 if (ret < 0) { 3652 err = ret; 3653 btrfs_err(fs_info, "fail to update qgroup status: %d", 3654 err); 3655 } 3656 } 3657 fs_info->qgroup_rescan_running = false; 3658 fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; 3659 complete_all(&fs_info->qgroup_rescan_completion); 3660 mutex_unlock(&fs_info->qgroup_rescan_lock); 3661 3662 if (!trans) 3663 return; 3664 3665 btrfs_end_transaction(trans); 3666 3667 if (stopped) { 3668 btrfs_info(fs_info, "qgroup scan paused"); 3669 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { 3670 btrfs_info(fs_info, "qgroup scan cancelled"); 3671 } else if (err >= 0) { 3672 btrfs_info(fs_info, "qgroup scan completed%s", 3673 err > 0 ? " (inconsistency flag cleared)" : ""); 3674 } else { 3675 btrfs_err(fs_info, "qgroup scan failed with %d", err); 3676 } 3677 } 3678 3679 /* 3680 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all 3681 * memory required for the rescan context. 3682 */ 3683 static int 3684 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, 3685 int init_flags) 3686 { 3687 int ret = 0; 3688 3689 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { 3690 btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode"); 3691 return -EINVAL; 3692 } 3693 3694 if (!init_flags) { 3695 /* we're resuming qgroup rescan at mount time */ 3696 if (!(fs_info->qgroup_flags & 3697 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { 3698 btrfs_warn(fs_info, 3699 "qgroup rescan init failed, qgroup rescan is not queued"); 3700 ret = -EINVAL; 3701 } else if (!(fs_info->qgroup_flags & 3702 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3703 btrfs_warn(fs_info, 3704 "qgroup rescan init failed, qgroup is not enabled"); 3705 ret = -EINVAL; 3706 } 3707 3708 if (ret) 3709 return ret; 3710 } 3711 3712 mutex_lock(&fs_info->qgroup_rescan_lock); 3713 3714 if (init_flags) { 3715 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3716 btrfs_warn(fs_info, 3717 "qgroup rescan is already in progress"); 3718 ret = -EINPROGRESS; 3719 } else if (!(fs_info->qgroup_flags & 3720 BTRFS_QGROUP_STATUS_FLAG_ON)) { 3721 btrfs_warn(fs_info, 3722 "qgroup rescan init failed, qgroup is not enabled"); 3723 ret = -EINVAL; 3724 } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 3725 /* Quota disable is in progress */ 3726 ret = -EBUSY; 3727 } 3728 3729 if (ret) { 3730 mutex_unlock(&fs_info->qgroup_rescan_lock); 3731 return ret; 3732 } 3733 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3734 } 3735 3736 memset(&fs_info->qgroup_rescan_progress, 0, 3737 sizeof(fs_info->qgroup_rescan_progress)); 3738 fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | 3739 BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); 3740 fs_info->qgroup_rescan_progress.objectid = progress_objectid; 3741 init_completion(&fs_info->qgroup_rescan_completion); 3742 mutex_unlock(&fs_info->qgroup_rescan_lock); 3743 3744 btrfs_init_work(&fs_info->qgroup_rescan_work, 3745 btrfs_qgroup_rescan_worker, NULL); 3746 return 0; 3747 } 3748 3749 static void 3750 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) 3751 { 3752 struct rb_node *n; 3753 struct btrfs_qgroup *qgroup; 3754 3755 spin_lock(&fs_info->qgroup_lock); 3756 /* clear all current qgroup tracking information */ 3757 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { 3758 qgroup = rb_entry(n, struct btrfs_qgroup, node); 3759 qgroup->rfer = 0; 3760 qgroup->rfer_cmpr = 0; 3761 qgroup->excl = 0; 3762 qgroup->excl_cmpr = 0; 3763 qgroup_dirty(fs_info, qgroup); 3764 } 3765 spin_unlock(&fs_info->qgroup_lock); 3766 } 3767 3768 int 3769 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) 3770 { 3771 int ret = 0; 3772 struct btrfs_trans_handle *trans; 3773 3774 ret = qgroup_rescan_init(fs_info, 0, 1); 3775 if (ret) 3776 return ret; 3777 3778 /* 3779 * We have set the rescan_progress to 0, which means no more 3780 * delayed refs will be accounted by btrfs_qgroup_account_ref. 3781 * However, btrfs_qgroup_account_ref may be right after its call 3782 * to btrfs_find_all_roots, in which case it would still do the 3783 * accounting. 3784 * To solve this, we're committing the transaction, which will 3785 * ensure we run all delayed refs and only after that, we are 3786 * going to clear all tracking information for a clean start. 3787 */ 3788 3789 trans = btrfs_attach_transaction_barrier(fs_info->fs_root); 3790 if (IS_ERR(trans) && trans != ERR_PTR(-ENOENT)) { 3791 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3792 return PTR_ERR(trans); 3793 } else if (trans != ERR_PTR(-ENOENT)) { 3794 ret = btrfs_commit_transaction(trans); 3795 if (ret) { 3796 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3797 return ret; 3798 } 3799 } 3800 3801 qgroup_rescan_zero_tracking(fs_info); 3802 3803 mutex_lock(&fs_info->qgroup_rescan_lock); 3804 fs_info->qgroup_rescan_running = true; 3805 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3806 &fs_info->qgroup_rescan_work); 3807 mutex_unlock(&fs_info->qgroup_rescan_lock); 3808 3809 return 0; 3810 } 3811 3812 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 3813 bool interruptible) 3814 { 3815 int running; 3816 int ret = 0; 3817 3818 mutex_lock(&fs_info->qgroup_rescan_lock); 3819 running = fs_info->qgroup_rescan_running; 3820 mutex_unlock(&fs_info->qgroup_rescan_lock); 3821 3822 if (!running) 3823 return 0; 3824 3825 if (interruptible) 3826 ret = wait_for_completion_interruptible( 3827 &fs_info->qgroup_rescan_completion); 3828 else 3829 wait_for_completion(&fs_info->qgroup_rescan_completion); 3830 3831 return ret; 3832 } 3833 3834 /* 3835 * this is only called from open_ctree where we're still single threaded, thus 3836 * locking is omitted here. 3837 */ 3838 void 3839 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) 3840 { 3841 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { 3842 mutex_lock(&fs_info->qgroup_rescan_lock); 3843 fs_info->qgroup_rescan_running = true; 3844 btrfs_queue_work(fs_info->qgroup_rescan_workers, 3845 &fs_info->qgroup_rescan_work); 3846 mutex_unlock(&fs_info->qgroup_rescan_lock); 3847 } 3848 } 3849 3850 #define rbtree_iterate_from_safe(node, next, start) \ 3851 for (node = start; node && ({ next = rb_next(node); 1;}); node = next) 3852 3853 static int qgroup_unreserve_range(struct btrfs_inode *inode, 3854 struct extent_changeset *reserved, u64 start, 3855 u64 len) 3856 { 3857 struct rb_node *node; 3858 struct rb_node *next; 3859 struct ulist_node *entry; 3860 int ret = 0; 3861 3862 node = reserved->range_changed.root.rb_node; 3863 if (!node) 3864 return 0; 3865 while (node) { 3866 entry = rb_entry(node, struct ulist_node, rb_node); 3867 if (entry->val < start) 3868 node = node->rb_right; 3869 else 3870 node = node->rb_left; 3871 } 3872 3873 if (entry->val > start && rb_prev(&entry->rb_node)) 3874 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, 3875 rb_node); 3876 3877 rbtree_iterate_from_safe(node, next, &entry->rb_node) { 3878 u64 entry_start; 3879 u64 entry_end; 3880 u64 entry_len; 3881 int clear_ret; 3882 3883 entry = rb_entry(node, struct ulist_node, rb_node); 3884 entry_start = entry->val; 3885 entry_end = entry->aux; 3886 entry_len = entry_end - entry_start + 1; 3887 3888 if (entry_start >= start + len) 3889 break; 3890 if (entry_start + entry_len <= start) 3891 continue; 3892 /* 3893 * Now the entry is in [start, start + len), revert the 3894 * EXTENT_QGROUP_RESERVED bit. 3895 */ 3896 clear_ret = clear_extent_bits(&inode->io_tree, entry_start, 3897 entry_end, EXTENT_QGROUP_RESERVED); 3898 if (!ret && clear_ret < 0) 3899 ret = clear_ret; 3900 3901 ulist_del(&reserved->range_changed, entry->val, entry->aux); 3902 if (likely(reserved->bytes_changed >= entry_len)) { 3903 reserved->bytes_changed -= entry_len; 3904 } else { 3905 WARN_ON(1); 3906 reserved->bytes_changed = 0; 3907 } 3908 } 3909 3910 return ret; 3911 } 3912 3913 /* 3914 * Try to free some space for qgroup. 3915 * 3916 * For qgroup, there are only 3 ways to free qgroup space: 3917 * - Flush nodatacow write 3918 * Any nodatacow write will free its reserved data space at run_delalloc_range(). 3919 * In theory, we should only flush nodatacow inodes, but it's not yet 3920 * possible, so we need to flush the whole root. 3921 * 3922 * - Wait for ordered extents 3923 * When ordered extents are finished, their reserved metadata is finally 3924 * converted to per_trans status, which can be freed by later commit 3925 * transaction. 3926 * 3927 * - Commit transaction 3928 * This would free the meta_per_trans space. 3929 * In theory this shouldn't provide much space, but any more qgroup space 3930 * is needed. 3931 */ 3932 static int try_flush_qgroup(struct btrfs_root *root) 3933 { 3934 struct btrfs_trans_handle *trans; 3935 int ret; 3936 3937 /* Can't hold an open transaction or we run the risk of deadlocking. */ 3938 ASSERT(current->journal_info == NULL); 3939 if (WARN_ON(current->journal_info)) 3940 return 0; 3941 3942 /* 3943 * We don't want to run flush again and again, so if there is a running 3944 * one, we won't try to start a new flush, but exit directly. 3945 */ 3946 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3947 wait_event(root->qgroup_flush_wait, 3948 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3949 return 0; 3950 } 3951 3952 ret = btrfs_start_delalloc_snapshot(root, true); 3953 if (ret < 0) 3954 goto out; 3955 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 3956 3957 trans = btrfs_attach_transaction_barrier(root); 3958 if (IS_ERR(trans)) { 3959 ret = PTR_ERR(trans); 3960 if (ret == -ENOENT) 3961 ret = 0; 3962 goto out; 3963 } 3964 3965 ret = btrfs_commit_transaction(trans); 3966 out: 3967 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); 3968 wake_up(&root->qgroup_flush_wait); 3969 return ret; 3970 } 3971 3972 static int qgroup_reserve_data(struct btrfs_inode *inode, 3973 struct extent_changeset **reserved_ret, u64 start, 3974 u64 len) 3975 { 3976 struct btrfs_root *root = inode->root; 3977 struct extent_changeset *reserved; 3978 bool new_reserved = false; 3979 u64 orig_reserved; 3980 u64 to_reserve; 3981 int ret; 3982 3983 if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || 3984 !is_fstree(root->root_key.objectid) || len == 0) 3985 return 0; 3986 3987 /* @reserved parameter is mandatory for qgroup */ 3988 if (WARN_ON(!reserved_ret)) 3989 return -EINVAL; 3990 if (!*reserved_ret) { 3991 new_reserved = true; 3992 *reserved_ret = extent_changeset_alloc(); 3993 if (!*reserved_ret) 3994 return -ENOMEM; 3995 } 3996 reserved = *reserved_ret; 3997 /* Record already reserved space */ 3998 orig_reserved = reserved->bytes_changed; 3999 ret = set_record_extent_bits(&inode->io_tree, start, 4000 start + len -1, EXTENT_QGROUP_RESERVED, reserved); 4001 4002 /* Newly reserved space */ 4003 to_reserve = reserved->bytes_changed - orig_reserved; 4004 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, 4005 to_reserve, QGROUP_RESERVE); 4006 if (ret < 0) 4007 goto out; 4008 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); 4009 if (ret < 0) 4010 goto cleanup; 4011 4012 return ret; 4013 4014 cleanup: 4015 qgroup_unreserve_range(inode, reserved, start, len); 4016 out: 4017 if (new_reserved) { 4018 extent_changeset_free(reserved); 4019 *reserved_ret = NULL; 4020 } 4021 return ret; 4022 } 4023 4024 /* 4025 * Reserve qgroup space for range [start, start + len). 4026 * 4027 * This function will either reserve space from related qgroups or do nothing 4028 * if the range is already reserved. 4029 * 4030 * Return 0 for successful reservation 4031 * Return <0 for error (including -EQUOT) 4032 * 4033 * NOTE: This function may sleep for memory allocation, dirty page flushing and 4034 * commit transaction. So caller should not hold any dirty page locked. 4035 */ 4036 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, 4037 struct extent_changeset **reserved_ret, u64 start, 4038 u64 len) 4039 { 4040 int ret; 4041 4042 ret = qgroup_reserve_data(inode, reserved_ret, start, len); 4043 if (ret <= 0 && ret != -EDQUOT) 4044 return ret; 4045 4046 ret = try_flush_qgroup(inode->root); 4047 if (ret < 0) 4048 return ret; 4049 return qgroup_reserve_data(inode, reserved_ret, start, len); 4050 } 4051 4052 /* Free ranges specified by @reserved, normally in error path */ 4053 static int qgroup_free_reserved_data(struct btrfs_inode *inode, 4054 struct extent_changeset *reserved, u64 start, u64 len) 4055 { 4056 struct btrfs_root *root = inode->root; 4057 struct ulist_node *unode; 4058 struct ulist_iterator uiter; 4059 struct extent_changeset changeset; 4060 int freed = 0; 4061 int ret; 4062 4063 extent_changeset_init(&changeset); 4064 len = round_up(start + len, root->fs_info->sectorsize); 4065 start = round_down(start, root->fs_info->sectorsize); 4066 4067 ULIST_ITER_INIT(&uiter); 4068 while ((unode = ulist_next(&reserved->range_changed, &uiter))) { 4069 u64 range_start = unode->val; 4070 /* unode->aux is the inclusive end */ 4071 u64 range_len = unode->aux - range_start + 1; 4072 u64 free_start; 4073 u64 free_len; 4074 4075 extent_changeset_release(&changeset); 4076 4077 /* Only free range in range [start, start + len) */ 4078 if (range_start >= start + len || 4079 range_start + range_len <= start) 4080 continue; 4081 free_start = max(range_start, start); 4082 free_len = min(start + len, range_start + range_len) - 4083 free_start; 4084 /* 4085 * TODO: To also modify reserved->ranges_reserved to reflect 4086 * the modification. 4087 * 4088 * However as long as we free qgroup reserved according to 4089 * EXTENT_QGROUP_RESERVED, we won't double free. 4090 * So not need to rush. 4091 */ 4092 ret = clear_record_extent_bits(&inode->io_tree, free_start, 4093 free_start + free_len - 1, 4094 EXTENT_QGROUP_RESERVED, &changeset); 4095 if (ret < 0) 4096 goto out; 4097 freed += changeset.bytes_changed; 4098 } 4099 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed, 4100 BTRFS_QGROUP_RSV_DATA); 4101 ret = freed; 4102 out: 4103 extent_changeset_release(&changeset); 4104 return ret; 4105 } 4106 4107 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, 4108 struct extent_changeset *reserved, u64 start, u64 len, 4109 int free) 4110 { 4111 struct extent_changeset changeset; 4112 int trace_op = QGROUP_RELEASE; 4113 int ret; 4114 4115 if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { 4116 extent_changeset_init(&changeset); 4117 return clear_record_extent_bits(&inode->io_tree, start, 4118 start + len - 1, 4119 EXTENT_QGROUP_RESERVED, &changeset); 4120 } 4121 4122 /* In release case, we shouldn't have @reserved */ 4123 WARN_ON(!free && reserved); 4124 if (free && reserved) 4125 return qgroup_free_reserved_data(inode, reserved, start, len); 4126 extent_changeset_init(&changeset); 4127 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, 4128 EXTENT_QGROUP_RESERVED, &changeset); 4129 if (ret < 0) 4130 goto out; 4131 4132 if (free) 4133 trace_op = QGROUP_FREE; 4134 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, 4135 changeset.bytes_changed, trace_op); 4136 if (free) 4137 btrfs_qgroup_free_refroot(inode->root->fs_info, 4138 inode->root->root_key.objectid, 4139 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4140 ret = changeset.bytes_changed; 4141 out: 4142 extent_changeset_release(&changeset); 4143 return ret; 4144 } 4145 4146 /* 4147 * Free a reserved space range from io_tree and related qgroups 4148 * 4149 * Should be called when a range of pages get invalidated before reaching disk. 4150 * Or for error cleanup case. 4151 * if @reserved is given, only reserved range in [@start, @start + @len) will 4152 * be freed. 4153 * 4154 * For data written to disk, use btrfs_qgroup_release_data(). 4155 * 4156 * NOTE: This function may sleep for memory allocation. 4157 */ 4158 int btrfs_qgroup_free_data(struct btrfs_inode *inode, 4159 struct extent_changeset *reserved, u64 start, u64 len) 4160 { 4161 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); 4162 } 4163 4164 /* 4165 * Release a reserved space range from io_tree only. 4166 * 4167 * Should be called when a range of pages get written to disk and corresponding 4168 * FILE_EXTENT is inserted into corresponding root. 4169 * 4170 * Since new qgroup accounting framework will only update qgroup numbers at 4171 * commit_transaction() time, its reserved space shouldn't be freed from 4172 * related qgroups. 4173 * 4174 * But we should release the range from io_tree, to allow further write to be 4175 * COWed. 4176 * 4177 * NOTE: This function may sleep for memory allocation. 4178 */ 4179 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len) 4180 { 4181 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); 4182 } 4183 4184 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4185 enum btrfs_qgroup_rsv_type type) 4186 { 4187 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4188 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4189 return; 4190 if (num_bytes == 0) 4191 return; 4192 4193 spin_lock(&root->qgroup_meta_rsv_lock); 4194 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) 4195 root->qgroup_meta_rsv_prealloc += num_bytes; 4196 else 4197 root->qgroup_meta_rsv_pertrans += num_bytes; 4198 spin_unlock(&root->qgroup_meta_rsv_lock); 4199 } 4200 4201 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, 4202 enum btrfs_qgroup_rsv_type type) 4203 { 4204 if (type != BTRFS_QGROUP_RSV_META_PREALLOC && 4205 type != BTRFS_QGROUP_RSV_META_PERTRANS) 4206 return 0; 4207 if (num_bytes == 0) 4208 return 0; 4209 4210 spin_lock(&root->qgroup_meta_rsv_lock); 4211 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { 4212 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, 4213 num_bytes); 4214 root->qgroup_meta_rsv_prealloc -= num_bytes; 4215 } else { 4216 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, 4217 num_bytes); 4218 root->qgroup_meta_rsv_pertrans -= num_bytes; 4219 } 4220 spin_unlock(&root->qgroup_meta_rsv_lock); 4221 return num_bytes; 4222 } 4223 4224 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4225 enum btrfs_qgroup_rsv_type type, bool enforce) 4226 { 4227 struct btrfs_fs_info *fs_info = root->fs_info; 4228 int ret; 4229 4230 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4231 !is_fstree(root->root_key.objectid) || num_bytes == 0) 4232 return 0; 4233 4234 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4235 trace_qgroup_meta_reserve(root, (s64)num_bytes, type); 4236 ret = qgroup_reserve(root, num_bytes, enforce, type); 4237 if (ret < 0) 4238 return ret; 4239 /* 4240 * Record what we have reserved into root. 4241 * 4242 * To avoid quota disabled->enabled underflow. 4243 * In that case, we may try to free space we haven't reserved 4244 * (since quota was disabled), so record what we reserved into root. 4245 * And ensure later release won't underflow this number. 4246 */ 4247 add_root_meta_rsv(root, num_bytes, type); 4248 return ret; 4249 } 4250 4251 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, 4252 enum btrfs_qgroup_rsv_type type, bool enforce, 4253 bool noflush) 4254 { 4255 int ret; 4256 4257 ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4258 if ((ret <= 0 && ret != -EDQUOT) || noflush) 4259 return ret; 4260 4261 ret = try_flush_qgroup(root); 4262 if (ret < 0) 4263 return ret; 4264 return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); 4265 } 4266 4267 /* 4268 * Per-transaction meta reservation should be all freed at transaction commit 4269 * time 4270 */ 4271 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) 4272 { 4273 struct btrfs_fs_info *fs_info = root->fs_info; 4274 4275 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4276 !is_fstree(root->root_key.objectid)) 4277 return; 4278 4279 /* TODO: Update trace point to handle such free */ 4280 trace_qgroup_meta_free_all_pertrans(root); 4281 /* Special value -1 means to free all reserved space */ 4282 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1, 4283 BTRFS_QGROUP_RSV_META_PERTRANS); 4284 } 4285 4286 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, 4287 enum btrfs_qgroup_rsv_type type) 4288 { 4289 struct btrfs_fs_info *fs_info = root->fs_info; 4290 4291 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4292 !is_fstree(root->root_key.objectid)) 4293 return; 4294 4295 /* 4296 * reservation for META_PREALLOC can happen before quota is enabled, 4297 * which can lead to underflow. 4298 * Here ensure we will only free what we really have reserved. 4299 */ 4300 num_bytes = sub_root_meta_rsv(root, num_bytes, type); 4301 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 4302 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); 4303 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, 4304 num_bytes, type); 4305 } 4306 4307 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, 4308 int num_bytes) 4309 { 4310 struct btrfs_qgroup *qgroup; 4311 LIST_HEAD(qgroup_list); 4312 4313 if (num_bytes == 0) 4314 return; 4315 if (!fs_info->quota_root) 4316 return; 4317 4318 spin_lock(&fs_info->qgroup_lock); 4319 qgroup = find_qgroup_rb(fs_info, ref_root); 4320 if (!qgroup) 4321 goto out; 4322 4323 qgroup_iterator_add(&qgroup_list, qgroup); 4324 list_for_each_entry(qgroup, &qgroup_list, iterator) { 4325 struct btrfs_qgroup_list *glist; 4326 4327 qgroup_rsv_release(fs_info, qgroup, num_bytes, 4328 BTRFS_QGROUP_RSV_META_PREALLOC); 4329 qgroup_rsv_add(fs_info, qgroup, num_bytes, 4330 BTRFS_QGROUP_RSV_META_PERTRANS); 4331 4332 list_for_each_entry(glist, &qgroup->groups, next_group) 4333 qgroup_iterator_add(&qgroup_list, glist->group); 4334 } 4335 out: 4336 qgroup_iterator_clean(&qgroup_list); 4337 spin_unlock(&fs_info->qgroup_lock); 4338 } 4339 4340 /* 4341 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS. 4342 * 4343 * This is called when preallocated meta reservation needs to be used. 4344 * Normally after btrfs_join_transaction() call. 4345 */ 4346 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) 4347 { 4348 struct btrfs_fs_info *fs_info = root->fs_info; 4349 4350 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || 4351 !is_fstree(root->root_key.objectid)) 4352 return; 4353 /* Same as btrfs_qgroup_free_meta_prealloc() */ 4354 num_bytes = sub_root_meta_rsv(root, num_bytes, 4355 BTRFS_QGROUP_RSV_META_PREALLOC); 4356 trace_qgroup_meta_convert(root, num_bytes); 4357 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes); 4358 } 4359 4360 /* 4361 * Check qgroup reserved space leaking, normally at destroy inode 4362 * time 4363 */ 4364 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) 4365 { 4366 struct extent_changeset changeset; 4367 struct ulist_node *unode; 4368 struct ulist_iterator iter; 4369 int ret; 4370 4371 extent_changeset_init(&changeset); 4372 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, 4373 EXTENT_QGROUP_RESERVED, &changeset); 4374 4375 WARN_ON(ret < 0); 4376 if (WARN_ON(changeset.bytes_changed)) { 4377 ULIST_ITER_INIT(&iter); 4378 while ((unode = ulist_next(&changeset.range_changed, &iter))) { 4379 btrfs_warn(inode->root->fs_info, 4380 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", 4381 btrfs_ino(inode), unode->val, unode->aux); 4382 } 4383 btrfs_qgroup_free_refroot(inode->root->fs_info, 4384 inode->root->root_key.objectid, 4385 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); 4386 4387 } 4388 extent_changeset_release(&changeset); 4389 } 4390 4391 void btrfs_qgroup_init_swapped_blocks( 4392 struct btrfs_qgroup_swapped_blocks *swapped_blocks) 4393 { 4394 int i; 4395 4396 spin_lock_init(&swapped_blocks->lock); 4397 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 4398 swapped_blocks->blocks[i] = RB_ROOT; 4399 swapped_blocks->swapped = false; 4400 } 4401 4402 /* 4403 * Delete all swapped blocks record of @root. 4404 * Every record here means we skipped a full subtree scan for qgroup. 4405 * 4406 * Gets called when committing one transaction. 4407 */ 4408 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) 4409 { 4410 struct btrfs_qgroup_swapped_blocks *swapped_blocks; 4411 int i; 4412 4413 swapped_blocks = &root->swapped_blocks; 4414 4415 spin_lock(&swapped_blocks->lock); 4416 if (!swapped_blocks->swapped) 4417 goto out; 4418 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4419 struct rb_root *cur_root = &swapped_blocks->blocks[i]; 4420 struct btrfs_qgroup_swapped_block *entry; 4421 struct btrfs_qgroup_swapped_block *next; 4422 4423 rbtree_postorder_for_each_entry_safe(entry, next, cur_root, 4424 node) 4425 kfree(entry); 4426 swapped_blocks->blocks[i] = RB_ROOT; 4427 } 4428 swapped_blocks->swapped = false; 4429 out: 4430 spin_unlock(&swapped_blocks->lock); 4431 } 4432 4433 /* 4434 * Add subtree roots record into @subvol_root. 4435 * 4436 * @subvol_root: tree root of the subvolume tree get swapped 4437 * @bg: block group under balance 4438 * @subvol_parent/slot: pointer to the subtree root in subvolume tree 4439 * @reloc_parent/slot: pointer to the subtree root in reloc tree 4440 * BOTH POINTERS ARE BEFORE TREE SWAP 4441 * @last_snapshot: last snapshot generation of the subvolume tree 4442 */ 4443 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, 4444 struct btrfs_root *subvol_root, 4445 struct btrfs_block_group *bg, 4446 struct extent_buffer *subvol_parent, int subvol_slot, 4447 struct extent_buffer *reloc_parent, int reloc_slot, 4448 u64 last_snapshot) 4449 { 4450 struct btrfs_fs_info *fs_info = subvol_root->fs_info; 4451 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; 4452 struct btrfs_qgroup_swapped_block *block; 4453 struct rb_node **cur; 4454 struct rb_node *parent = NULL; 4455 int level = btrfs_header_level(subvol_parent) - 1; 4456 int ret = 0; 4457 4458 if (!btrfs_qgroup_full_accounting(fs_info)) 4459 return 0; 4460 4461 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > 4462 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { 4463 btrfs_err_rl(fs_info, 4464 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", 4465 __func__, 4466 btrfs_node_ptr_generation(subvol_parent, subvol_slot), 4467 btrfs_node_ptr_generation(reloc_parent, reloc_slot)); 4468 return -EUCLEAN; 4469 } 4470 4471 block = kmalloc(sizeof(*block), GFP_NOFS); 4472 if (!block) { 4473 ret = -ENOMEM; 4474 goto out; 4475 } 4476 4477 /* 4478 * @reloc_parent/slot is still before swap, while @block is going to 4479 * record the bytenr after swap, so we do the swap here. 4480 */ 4481 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); 4482 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, 4483 reloc_slot); 4484 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); 4485 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, 4486 subvol_slot); 4487 block->last_snapshot = last_snapshot; 4488 block->level = level; 4489 4490 /* 4491 * If we have bg == NULL, we're called from btrfs_recover_relocation(), 4492 * no one else can modify tree blocks thus we qgroup will not change 4493 * no matter the value of trace_leaf. 4494 */ 4495 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) 4496 block->trace_leaf = true; 4497 else 4498 block->trace_leaf = false; 4499 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); 4500 4501 /* Insert @block into @blocks */ 4502 spin_lock(&blocks->lock); 4503 cur = &blocks->blocks[level].rb_node; 4504 while (*cur) { 4505 struct btrfs_qgroup_swapped_block *entry; 4506 4507 parent = *cur; 4508 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, 4509 node); 4510 4511 if (entry->subvol_bytenr < block->subvol_bytenr) { 4512 cur = &(*cur)->rb_left; 4513 } else if (entry->subvol_bytenr > block->subvol_bytenr) { 4514 cur = &(*cur)->rb_right; 4515 } else { 4516 if (entry->subvol_generation != 4517 block->subvol_generation || 4518 entry->reloc_bytenr != block->reloc_bytenr || 4519 entry->reloc_generation != 4520 block->reloc_generation) { 4521 /* 4522 * Duplicated but mismatch entry found. 4523 * Shouldn't happen. 4524 * 4525 * Marking qgroup inconsistent should be enough 4526 * for end users. 4527 */ 4528 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); 4529 ret = -EEXIST; 4530 } 4531 kfree(block); 4532 goto out_unlock; 4533 } 4534 } 4535 rb_link_node(&block->node, parent, cur); 4536 rb_insert_color(&block->node, &blocks->blocks[level]); 4537 blocks->swapped = true; 4538 out_unlock: 4539 spin_unlock(&blocks->lock); 4540 out: 4541 if (ret < 0) 4542 qgroup_mark_inconsistent(fs_info); 4543 return ret; 4544 } 4545 4546 /* 4547 * Check if the tree block is a subtree root, and if so do the needed 4548 * delayed subtree trace for qgroup. 4549 * 4550 * This is called during btrfs_cow_block(). 4551 */ 4552 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, 4553 struct btrfs_root *root, 4554 struct extent_buffer *subvol_eb) 4555 { 4556 struct btrfs_fs_info *fs_info = root->fs_info; 4557 struct btrfs_tree_parent_check check = { 0 }; 4558 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; 4559 struct btrfs_qgroup_swapped_block *block; 4560 struct extent_buffer *reloc_eb = NULL; 4561 struct rb_node *node; 4562 bool found = false; 4563 bool swapped = false; 4564 int level = btrfs_header_level(subvol_eb); 4565 int ret = 0; 4566 int i; 4567 4568 if (!btrfs_qgroup_full_accounting(fs_info)) 4569 return 0; 4570 if (!is_fstree(root->root_key.objectid) || !root->reloc_root) 4571 return 0; 4572 4573 spin_lock(&blocks->lock); 4574 if (!blocks->swapped) { 4575 spin_unlock(&blocks->lock); 4576 return 0; 4577 } 4578 node = blocks->blocks[level].rb_node; 4579 4580 while (node) { 4581 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); 4582 if (block->subvol_bytenr < subvol_eb->start) { 4583 node = node->rb_left; 4584 } else if (block->subvol_bytenr > subvol_eb->start) { 4585 node = node->rb_right; 4586 } else { 4587 found = true; 4588 break; 4589 } 4590 } 4591 if (!found) { 4592 spin_unlock(&blocks->lock); 4593 goto out; 4594 } 4595 /* Found one, remove it from @blocks first and update blocks->swapped */ 4596 rb_erase(&block->node, &blocks->blocks[level]); 4597 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 4598 if (RB_EMPTY_ROOT(&blocks->blocks[i])) { 4599 swapped = true; 4600 break; 4601 } 4602 } 4603 blocks->swapped = swapped; 4604 spin_unlock(&blocks->lock); 4605 4606 check.level = block->level; 4607 check.transid = block->reloc_generation; 4608 check.has_first_key = true; 4609 memcpy(&check.first_key, &block->first_key, sizeof(check.first_key)); 4610 4611 /* Read out reloc subtree root */ 4612 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check); 4613 if (IS_ERR(reloc_eb)) { 4614 ret = PTR_ERR(reloc_eb); 4615 reloc_eb = NULL; 4616 goto free_out; 4617 } 4618 if (!extent_buffer_uptodate(reloc_eb)) { 4619 ret = -EIO; 4620 goto free_out; 4621 } 4622 4623 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, 4624 block->last_snapshot, block->trace_leaf); 4625 free_out: 4626 kfree(block); 4627 free_extent_buffer(reloc_eb); 4628 out: 4629 if (ret < 0) { 4630 btrfs_err_rl(fs_info, 4631 "failed to account subtree at bytenr %llu: %d", 4632 subvol_eb->start, ret); 4633 qgroup_mark_inconsistent(fs_info); 4634 } 4635 return ret; 4636 } 4637 4638 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) 4639 { 4640 struct btrfs_qgroup_extent_record *entry; 4641 struct btrfs_qgroup_extent_record *next; 4642 struct rb_root *root; 4643 4644 root = &trans->delayed_refs.dirty_extent_root; 4645 rbtree_postorder_for_each_entry_safe(entry, next, root, node) { 4646 ulist_free(entry->old_roots); 4647 kfree(entry); 4648 } 4649 *root = RB_ROOT; 4650 } 4651 4652 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, 4653 struct btrfs_squota_delta *delta) 4654 { 4655 int ret; 4656 struct btrfs_qgroup *qgroup; 4657 struct btrfs_qgroup *qg; 4658 LIST_HEAD(qgroup_list); 4659 u64 root = delta->root; 4660 u64 num_bytes = delta->num_bytes; 4661 const int sign = (delta->is_inc ? 1 : -1); 4662 4663 if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) 4664 return 0; 4665 4666 if (!is_fstree(root)) 4667 return 0; 4668 4669 /* If the extent predates enabling quotas, don't count it. */ 4670 if (delta->generation < fs_info->qgroup_enable_gen) 4671 return 0; 4672 4673 spin_lock(&fs_info->qgroup_lock); 4674 qgroup = find_qgroup_rb(fs_info, root); 4675 if (!qgroup) { 4676 ret = -ENOENT; 4677 goto out; 4678 } 4679 4680 ret = 0; 4681 qgroup_iterator_add(&qgroup_list, qgroup); 4682 list_for_each_entry(qg, &qgroup_list, iterator) { 4683 struct btrfs_qgroup_list *glist; 4684 4685 qg->excl += num_bytes * sign; 4686 qg->rfer += num_bytes * sign; 4687 qgroup_dirty(fs_info, qg); 4688 4689 list_for_each_entry(glist, &qg->groups, next_group) 4690 qgroup_iterator_add(&qgroup_list, glist->group); 4691 } 4692 qgroup_iterator_clean(&qgroup_list); 4693 4694 out: 4695 spin_unlock(&fs_info->qgroup_lock); 4696 if (!ret && delta->rsv_bytes) 4697 btrfs_qgroup_free_refroot(fs_info, root, delta->rsv_bytes, 4698 BTRFS_QGROUP_RSV_DATA); 4699 return ret; 4700 } 4701