xref: /linux/fs/btrfs/qgroup.c (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
163 static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
164 {
165 	const u64 *qgroupid = key;
166 	const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
167 
168 	if (qgroup->qgroupid < *qgroupid)
169 		return -1;
170 	else if (qgroup->qgroupid > *qgroupid)
171 		return 1;
172 
173 	return 0;
174 }
175 
176 /* must be called with qgroup_ioctl_lock held */
177 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
178 					   u64 qgroupid)
179 {
180 	struct rb_node *node;
181 
182 	node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
183 	return rb_entry_safe(node, struct btrfs_qgroup, node);
184 }
185 
186 static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
187 {
188 	const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
189 
190 	return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
191 }
192 
193 /*
194  * Add qgroup to the filesystem's qgroup tree.
195  *
196  * Must be called with qgroup_lock held and @prealloc preallocated.
197  *
198  * The control on the lifespan of @prealloc would be transferred to this
199  * function, thus caller should no longer touch @prealloc.
200  */
201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
202 					  struct btrfs_qgroup *prealloc,
203 					  u64 qgroupid)
204 {
205 	struct rb_node *node;
206 
207 	/* Caller must have pre-allocated @prealloc. */
208 	ASSERT(prealloc);
209 
210 	prealloc->qgroupid = qgroupid;
211 	node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
212 	if (node) {
213 		kfree(prealloc);
214 		return rb_entry(node, struct btrfs_qgroup, node);
215 	}
216 
217 	INIT_LIST_HEAD(&prealloc->groups);
218 	INIT_LIST_HEAD(&prealloc->members);
219 	INIT_LIST_HEAD(&prealloc->dirty);
220 	INIT_LIST_HEAD(&prealloc->iterator);
221 	INIT_LIST_HEAD(&prealloc->nested_iterator);
222 
223 	return prealloc;
224 }
225 
226 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
227 {
228 	struct btrfs_qgroup_list *list;
229 
230 	list_del(&qgroup->dirty);
231 	while (!list_empty(&qgroup->groups)) {
232 		list = list_first_entry(&qgroup->groups,
233 					struct btrfs_qgroup_list, next_group);
234 		list_del(&list->next_group);
235 		list_del(&list->next_member);
236 		kfree(list);
237 	}
238 
239 	while (!list_empty(&qgroup->members)) {
240 		list = list_first_entry(&qgroup->members,
241 					struct btrfs_qgroup_list, next_member);
242 		list_del(&list->next_group);
243 		list_del(&list->next_member);
244 		kfree(list);
245 	}
246 }
247 
248 /* must be called with qgroup_lock held */
249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
250 {
251 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
252 
253 	if (!qgroup)
254 		return -ENOENT;
255 
256 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257 	__del_qgroup_rb(qgroup);
258 	return 0;
259 }
260 
261 /*
262  * Add relation specified by two qgroups.
263  *
264  * Must be called with qgroup_lock held, the ownership of @prealloc is
265  * transferred to this function and caller should not touch it anymore.
266  *
267  * Return: 0        on success
268  *         -ENOENT  if one of the qgroups is NULL
269  *         <0       other errors
270  */
271 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
272 			     struct btrfs_qgroup *member,
273 			     struct btrfs_qgroup *parent)
274 {
275 	if (!member || !parent) {
276 		kfree(prealloc);
277 		return -ENOENT;
278 	}
279 
280 	prealloc->group = parent;
281 	prealloc->member = member;
282 	list_add_tail(&prealloc->next_group, &member->groups);
283 	list_add_tail(&prealloc->next_member, &parent->members);
284 
285 	return 0;
286 }
287 
288 /*
289  * Add relation specified by two qgroup ids.
290  *
291  * Must be called with qgroup_lock held.
292  *
293  * Return: 0        on success
294  *         -ENOENT  if one of the ids does not exist
295  *         <0       other errors
296  */
297 static int add_relation_rb(struct btrfs_fs_info *fs_info,
298 			   struct btrfs_qgroup_list *prealloc,
299 			   u64 memberid, u64 parentid)
300 {
301 	struct btrfs_qgroup *member;
302 	struct btrfs_qgroup *parent;
303 
304 	member = find_qgroup_rb(fs_info, memberid);
305 	parent = find_qgroup_rb(fs_info, parentid);
306 
307 	return __add_relation_rb(prealloc, member, parent);
308 }
309 
310 /* Must be called with qgroup_lock held */
311 static int del_relation_rb(struct btrfs_fs_info *fs_info,
312 			   u64 memberid, u64 parentid)
313 {
314 	struct btrfs_qgroup *member;
315 	struct btrfs_qgroup *parent;
316 	struct btrfs_qgroup_list *list;
317 
318 	member = find_qgroup_rb(fs_info, memberid);
319 	parent = find_qgroup_rb(fs_info, parentid);
320 	if (!member || !parent)
321 		return -ENOENT;
322 
323 	list_for_each_entry(list, &member->groups, next_group) {
324 		if (list->group == parent) {
325 			list_del(&list->next_group);
326 			list_del(&list->next_member);
327 			kfree(list);
328 			return 0;
329 		}
330 	}
331 	return -ENOENT;
332 }
333 
334 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
335 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
336 			       u64 rfer, u64 excl)
337 {
338 	struct btrfs_qgroup *qgroup;
339 
340 	qgroup = find_qgroup_rb(fs_info, qgroupid);
341 	if (!qgroup)
342 		return -EINVAL;
343 	if (qgroup->rfer != rfer || qgroup->excl != excl)
344 		return -EINVAL;
345 	return 0;
346 }
347 #endif
348 
349 __printf(2, 3)
350 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
351 {
352 	const u64 old_flags = fs_info->qgroup_flags;
353 
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 	if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
360 		struct va_format vaf;
361 		va_list args;
362 
363 		va_start(args, fmt);
364 		vaf.fmt = fmt;
365 		vaf.va = &args;
366 
367 		btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
368 		va_end(args);
369 	}
370 }
371 
372 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
373 				   struct extent_buffer *leaf, int slot,
374 				   struct btrfs_qgroup_status_item *ptr)
375 {
376 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
377 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
378 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
379 }
380 
381 /*
382  * The full config is read in one go, only called from open_ctree()
383  * It doesn't use any locking, as at this point we're still single-threaded
384  */
385 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
386 {
387 	struct btrfs_key key;
388 	struct btrfs_key found_key;
389 	struct btrfs_root *quota_root = fs_info->quota_root;
390 	struct btrfs_path *path = NULL;
391 	struct extent_buffer *l;
392 	int slot;
393 	int ret = 0;
394 	u64 flags = 0;
395 	u64 rescan_progress = 0;
396 
397 	if (!fs_info->quota_root)
398 		return 0;
399 
400 	path = btrfs_alloc_path();
401 	if (!path) {
402 		ret = -ENOMEM;
403 		goto out;
404 	}
405 
406 	ret = btrfs_sysfs_add_qgroups(fs_info);
407 	if (ret < 0)
408 		goto out;
409 	/* default this to quota off, in case no status key is found */
410 	fs_info->qgroup_flags = 0;
411 
412 	/*
413 	 * pass 1: read status, all qgroup infos and limits
414 	 */
415 	key.objectid = 0;
416 	key.type = 0;
417 	key.offset = 0;
418 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
419 	if (ret)
420 		goto out;
421 
422 	while (1) {
423 		struct btrfs_qgroup *qgroup;
424 
425 		slot = path->slots[0];
426 		l = path->nodes[0];
427 		btrfs_item_key_to_cpu(l, &found_key, slot);
428 
429 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
430 			struct btrfs_qgroup_status_item *ptr;
431 
432 			ptr = btrfs_item_ptr(l, slot,
433 					     struct btrfs_qgroup_status_item);
434 
435 			if (btrfs_qgroup_status_version(l, ptr) !=
436 			    BTRFS_QGROUP_STATUS_VERSION) {
437 				btrfs_err(fs_info,
438 				 "old qgroup version, quota disabled");
439 				goto out;
440 			}
441 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
442 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
443 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
444 			else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
445 				qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
446 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
447 			goto next1;
448 		}
449 
450 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
451 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
452 			goto next1;
453 
454 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
455 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
456 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
457 			qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * reusing that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (btrfs_is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
585 		btrfs_sysfs_del_qgroups(fs_info);
586 	}
587 
588 	return ret < 0 ? ret : 0;
589 }
590 
591 /*
592  * Called in close_ctree() when quota is still enabled.  This verifies we don't
593  * leak some reserved space.
594  *
595  * Return false if no reserved space is left.
596  * Return true if some reserved space is leaked.
597  */
598 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
599 {
600 	struct rb_node *node;
601 	bool ret = false;
602 
603 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
604 		return ret;
605 	/*
606 	 * Since we're unmounting, there is no race and no need to grab qgroup
607 	 * lock.  And here we don't go post-order to provide a more user
608 	 * friendly sorted result.
609 	 */
610 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
611 		struct btrfs_qgroup *qgroup;
612 		int i;
613 
614 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
615 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
616 			if (qgroup->rsv.values[i]) {
617 				ret = true;
618 				btrfs_warn(fs_info,
619 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
620 				   btrfs_qgroup_level(qgroup->qgroupid),
621 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
622 				   i, qgroup->rsv.values[i]);
623 			}
624 		}
625 	}
626 	return ret;
627 }
628 
629 /*
630  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
631  * first two are in single-threaded paths.
632  */
633 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
634 {
635 	struct rb_node *n;
636 	struct btrfs_qgroup *qgroup;
637 
638 	/*
639 	 * btrfs_quota_disable() can be called concurrently with
640 	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
641 	 * lock.
642 	 */
643 	spin_lock(&fs_info->qgroup_lock);
644 	while ((n = rb_first(&fs_info->qgroup_tree))) {
645 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
646 		rb_erase(n, &fs_info->qgroup_tree);
647 		__del_qgroup_rb(qgroup);
648 		spin_unlock(&fs_info->qgroup_lock);
649 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
650 		kfree(qgroup);
651 		spin_lock(&fs_info->qgroup_lock);
652 	}
653 	spin_unlock(&fs_info->qgroup_lock);
654 
655 	btrfs_sysfs_del_qgroups(fs_info);
656 }
657 
658 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
659 				    u64 dst)
660 {
661 	int ret;
662 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
663 	BTRFS_PATH_AUTO_FREE(path);
664 	struct btrfs_key key;
665 
666 	path = btrfs_alloc_path();
667 	if (!path)
668 		return -ENOMEM;
669 
670 	key.objectid = src;
671 	key.type = BTRFS_QGROUP_RELATION_KEY;
672 	key.offset = dst;
673 
674 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
675 	return ret;
676 }
677 
678 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
679 				    u64 dst)
680 {
681 	int ret;
682 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
683 	BTRFS_PATH_AUTO_FREE(path);
684 	struct btrfs_key key;
685 
686 	path = btrfs_alloc_path();
687 	if (!path)
688 		return -ENOMEM;
689 
690 	key.objectid = src;
691 	key.type = BTRFS_QGROUP_RELATION_KEY;
692 	key.offset = dst;
693 
694 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
695 	if (ret < 0)
696 		return ret;
697 
698 	if (ret > 0)
699 		return -ENOENT;
700 
701 	return btrfs_del_item(trans, quota_root, path);
702 }
703 
704 static int add_qgroup_item(struct btrfs_trans_handle *trans,
705 			   struct btrfs_root *quota_root, u64 qgroupid)
706 {
707 	int ret;
708 	BTRFS_PATH_AUTO_FREE(path);
709 	struct btrfs_qgroup_info_item *qgroup_info;
710 	struct btrfs_qgroup_limit_item *qgroup_limit;
711 	struct extent_buffer *leaf;
712 	struct btrfs_key key;
713 
714 	if (btrfs_is_testing(quota_root->fs_info))
715 		return 0;
716 
717 	path = btrfs_alloc_path();
718 	if (!path)
719 		return -ENOMEM;
720 
721 	key.objectid = 0;
722 	key.type = BTRFS_QGROUP_INFO_KEY;
723 	key.offset = qgroupid;
724 
725 	/*
726 	 * Avoid a transaction abort by catching -EEXIST here. In that
727 	 * case, we proceed by re-initializing the existing structure
728 	 * on disk.
729 	 */
730 
731 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
732 				      sizeof(*qgroup_info));
733 	if (ret && ret != -EEXIST)
734 		return ret;
735 
736 	leaf = path->nodes[0];
737 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
738 				 struct btrfs_qgroup_info_item);
739 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
740 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
741 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
742 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
743 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
744 
745 	btrfs_release_path(path);
746 
747 	key.type = BTRFS_QGROUP_LIMIT_KEY;
748 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
749 				      sizeof(*qgroup_limit));
750 	if (ret && ret != -EEXIST)
751 		return ret;
752 
753 	leaf = path->nodes[0];
754 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
755 				  struct btrfs_qgroup_limit_item);
756 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
757 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
758 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
759 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
760 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
761 
762 	return 0;
763 }
764 
765 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
766 {
767 	int ret;
768 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
769 	BTRFS_PATH_AUTO_FREE(path);
770 	struct btrfs_key key;
771 
772 	path = btrfs_alloc_path();
773 	if (!path)
774 		return -ENOMEM;
775 
776 	key.objectid = 0;
777 	key.type = BTRFS_QGROUP_INFO_KEY;
778 	key.offset = qgroupid;
779 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
780 	if (ret < 0)
781 		return ret;
782 
783 	if (ret > 0)
784 		return -ENOENT;
785 
786 	ret = btrfs_del_item(trans, quota_root, path);
787 	if (ret)
788 		return ret;
789 
790 	btrfs_release_path(path);
791 
792 	key.type = BTRFS_QGROUP_LIMIT_KEY;
793 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
794 	if (ret < 0)
795 		return ret;
796 
797 	if (ret > 0)
798 		return -ENOENT;
799 
800 	ret = btrfs_del_item(trans, quota_root, path);
801 
802 	return ret;
803 }
804 
805 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
806 				    struct btrfs_qgroup *qgroup)
807 {
808 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
809 	BTRFS_PATH_AUTO_FREE(path);
810 	struct btrfs_key key;
811 	struct extent_buffer *l;
812 	struct btrfs_qgroup_limit_item *qgroup_limit;
813 	int ret;
814 	int slot;
815 
816 	key.objectid = 0;
817 	key.type = BTRFS_QGROUP_LIMIT_KEY;
818 	key.offset = qgroup->qgroupid;
819 
820 	path = btrfs_alloc_path();
821 	if (!path)
822 		return -ENOMEM;
823 
824 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
825 	if (ret > 0)
826 		ret = -ENOENT;
827 
828 	if (ret)
829 		return ret;
830 
831 	l = path->nodes[0];
832 	slot = path->slots[0];
833 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
834 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
835 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
836 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
837 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
838 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
839 
840 	return ret;
841 }
842 
843 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
844 				   struct btrfs_qgroup *qgroup)
845 {
846 	struct btrfs_fs_info *fs_info = trans->fs_info;
847 	struct btrfs_root *quota_root = fs_info->quota_root;
848 	BTRFS_PATH_AUTO_FREE(path);
849 	struct btrfs_key key;
850 	struct extent_buffer *l;
851 	struct btrfs_qgroup_info_item *qgroup_info;
852 	int ret;
853 	int slot;
854 
855 	if (btrfs_is_testing(fs_info))
856 		return 0;
857 
858 	key.objectid = 0;
859 	key.type = BTRFS_QGROUP_INFO_KEY;
860 	key.offset = qgroup->qgroupid;
861 
862 	path = btrfs_alloc_path();
863 	if (!path)
864 		return -ENOMEM;
865 
866 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
867 	if (ret > 0)
868 		ret = -ENOENT;
869 
870 	if (ret)
871 		return ret;
872 
873 	l = path->nodes[0];
874 	slot = path->slots[0];
875 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
876 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
877 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
878 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
879 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
880 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
881 
882 	return ret;
883 }
884 
885 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
886 {
887 	struct btrfs_fs_info *fs_info = trans->fs_info;
888 	struct btrfs_root *quota_root = fs_info->quota_root;
889 	BTRFS_PATH_AUTO_FREE(path);
890 	struct btrfs_key key;
891 	struct extent_buffer *l;
892 	struct btrfs_qgroup_status_item *ptr;
893 	int ret;
894 	int slot;
895 
896 	key.objectid = 0;
897 	key.type = BTRFS_QGROUP_STATUS_KEY;
898 	key.offset = 0;
899 
900 	path = btrfs_alloc_path();
901 	if (!path)
902 		return -ENOMEM;
903 
904 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
905 	if (ret > 0)
906 		ret = -ENOENT;
907 
908 	if (ret)
909 		return ret;
910 
911 	l = path->nodes[0];
912 	slot = path->slots[0];
913 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
914 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
915 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
916 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
917 	btrfs_set_qgroup_status_rescan(l, ptr,
918 				fs_info->qgroup_rescan_progress.objectid);
919 
920 	return ret;
921 }
922 
923 /*
924  * called with qgroup_lock held
925  */
926 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
927 				  struct btrfs_root *root)
928 {
929 	BTRFS_PATH_AUTO_FREE(path);
930 	struct btrfs_key key;
931 	struct extent_buffer *leaf = NULL;
932 	int ret;
933 	int nr = 0;
934 
935 	path = btrfs_alloc_path();
936 	if (!path)
937 		return -ENOMEM;
938 
939 	key.objectid = 0;
940 	key.type = 0;
941 	key.offset = 0;
942 
943 	while (1) {
944 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
945 		if (ret < 0)
946 			return ret;
947 		leaf = path->nodes[0];
948 		nr = btrfs_header_nritems(leaf);
949 		if (!nr)
950 			break;
951 		/*
952 		 * delete the leaf one by one
953 		 * since the whole tree is going
954 		 * to be deleted.
955 		 */
956 		path->slots[0] = 0;
957 		ret = btrfs_del_items(trans, root, path, 0, nr);
958 		if (ret)
959 			return ret;
960 
961 		btrfs_release_path(path);
962 	}
963 
964 	return 0;
965 }
966 
967 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
968 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
969 {
970 	struct btrfs_root *quota_root;
971 	struct btrfs_root *tree_root = fs_info->tree_root;
972 	struct btrfs_path *path = NULL;
973 	struct btrfs_qgroup_status_item *ptr;
974 	struct extent_buffer *leaf;
975 	struct btrfs_key key;
976 	struct btrfs_key found_key;
977 	struct btrfs_qgroup *qgroup = NULL;
978 	struct btrfs_qgroup *prealloc = NULL;
979 	struct btrfs_trans_handle *trans = NULL;
980 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
981 	int ret = 0;
982 	int slot;
983 
984 	/*
985 	 * We need to have subvol_sem write locked, to prevent races between
986 	 * concurrent tasks trying to enable quotas, because we will unlock
987 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
988 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
989 	 */
990 	lockdep_assert_held_write(&fs_info->subvol_sem);
991 
992 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
993 		btrfs_err(fs_info,
994 			  "qgroups are currently unsupported in extent tree v2");
995 		return -EINVAL;
996 	}
997 
998 	mutex_lock(&fs_info->qgroup_ioctl_lock);
999 	if (fs_info->quota_root)
1000 		goto out;
1001 
1002 	ret = btrfs_sysfs_add_qgroups(fs_info);
1003 	if (ret < 0)
1004 		goto out;
1005 
1006 	/*
1007 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1008 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1009 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1010 	 * start a transaction.
1011 	 * After we started the transaction lock qgroup_ioctl_lock again and
1012 	 * check if someone else created the quota root in the meanwhile. If so,
1013 	 * just return success and release the transaction handle.
1014 	 *
1015 	 * Also we don't need to worry about someone else calling
1016 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1017 	 * that function returns 0 (success) when the sysfs entries already exist.
1018 	 */
1019 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1020 
1021 	/*
1022 	 * 1 for quota root item
1023 	 * 1 for BTRFS_QGROUP_STATUS item
1024 	 *
1025 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1026 	 * per subvolume. However those are not currently reserved since it
1027 	 * would be a lot of overkill.
1028 	 */
1029 	trans = btrfs_start_transaction(tree_root, 2);
1030 
1031 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1032 	if (IS_ERR(trans)) {
1033 		ret = PTR_ERR(trans);
1034 		trans = NULL;
1035 		goto out;
1036 	}
1037 
1038 	if (fs_info->quota_root)
1039 		goto out;
1040 
1041 	/*
1042 	 * initially create the quota tree
1043 	 */
1044 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1045 	if (IS_ERR(quota_root)) {
1046 		ret =  PTR_ERR(quota_root);
1047 		btrfs_abort_transaction(trans, ret);
1048 		goto out;
1049 	}
1050 
1051 	path = btrfs_alloc_path();
1052 	if (unlikely(!path)) {
1053 		ret = -ENOMEM;
1054 		btrfs_abort_transaction(trans, ret);
1055 		goto out_free_root;
1056 	}
1057 
1058 	key.objectid = 0;
1059 	key.type = BTRFS_QGROUP_STATUS_KEY;
1060 	key.offset = 0;
1061 
1062 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1063 				      sizeof(*ptr));
1064 	if (unlikely(ret)) {
1065 		btrfs_abort_transaction(trans, ret);
1066 		goto out_free_path;
1067 	}
1068 
1069 	leaf = path->nodes[0];
1070 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1071 				 struct btrfs_qgroup_status_item);
1072 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1073 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1074 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1075 	if (simple) {
1076 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1077 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1078 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1079 	} else {
1080 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1081 	}
1082 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1083 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1084 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1085 
1086 	key.objectid = 0;
1087 	key.type = BTRFS_ROOT_REF_KEY;
1088 	key.offset = 0;
1089 
1090 	btrfs_release_path(path);
1091 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1092 	if (ret > 0)
1093 		goto out_add_root;
1094 	if (unlikely(ret < 0)) {
1095 		btrfs_abort_transaction(trans, ret);
1096 		goto out_free_path;
1097 	}
1098 
1099 	while (1) {
1100 		slot = path->slots[0];
1101 		leaf = path->nodes[0];
1102 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1103 
1104 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1105 
1106 			/* Release locks on tree_root before we access quota_root */
1107 			btrfs_release_path(path);
1108 
1109 			/* We should not have a stray @prealloc pointer. */
1110 			ASSERT(prealloc == NULL);
1111 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1112 			if (unlikely(!prealloc)) {
1113 				ret = -ENOMEM;
1114 				btrfs_abort_transaction(trans, ret);
1115 				goto out_free_path;
1116 			}
1117 
1118 			ret = add_qgroup_item(trans, quota_root,
1119 					      found_key.offset);
1120 			if (unlikely(ret)) {
1121 				btrfs_abort_transaction(trans, ret);
1122 				goto out_free_path;
1123 			}
1124 
1125 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1126 			prealloc = NULL;
1127 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1128 			if (unlikely(ret < 0)) {
1129 				btrfs_abort_transaction(trans, ret);
1130 				goto out_free_path;
1131 			}
1132 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1133 							 path, 1, 0);
1134 			if (unlikely(ret < 0)) {
1135 				btrfs_abort_transaction(trans, ret);
1136 				goto out_free_path;
1137 			}
1138 			if (ret > 0) {
1139 				/*
1140 				 * Shouldn't happen, but in case it does we
1141 				 * don't need to do the btrfs_next_item, just
1142 				 * continue.
1143 				 */
1144 				continue;
1145 			}
1146 		}
1147 		ret = btrfs_next_item(tree_root, path);
1148 		if (unlikely(ret < 0)) {
1149 			btrfs_abort_transaction(trans, ret);
1150 			goto out_free_path;
1151 		}
1152 		if (ret)
1153 			break;
1154 	}
1155 
1156 out_add_root:
1157 	btrfs_release_path(path);
1158 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1159 	if (unlikely(ret)) {
1160 		btrfs_abort_transaction(trans, ret);
1161 		goto out_free_path;
1162 	}
1163 
1164 	ASSERT(prealloc == NULL);
1165 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1166 	if (!prealloc) {
1167 		ret = -ENOMEM;
1168 		goto out_free_path;
1169 	}
1170 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1171 	prealloc = NULL;
1172 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1173 	if (unlikely(ret < 0)) {
1174 		btrfs_abort_transaction(trans, ret);
1175 		goto out_free_path;
1176 	}
1177 
1178 	fs_info->qgroup_enable_gen = trans->transid;
1179 
1180 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1181 	/*
1182 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1183 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1184 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1185 	 * because all qgroup operations first start or join a transaction and then
1186 	 * lock the qgroup_ioctl_lock mutex.
1187 	 * We are safe from a concurrent task trying to enable quotas, by calling
1188 	 * this function, since we are serialized by fs_info->subvol_sem.
1189 	 */
1190 	ret = btrfs_commit_transaction(trans);
1191 	trans = NULL;
1192 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1193 	if (ret)
1194 		goto out_free_path;
1195 
1196 	/*
1197 	 * Set quota enabled flag after committing the transaction, to avoid
1198 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1199 	 * creation.
1200 	 */
1201 	spin_lock(&fs_info->qgroup_lock);
1202 	fs_info->quota_root = quota_root;
1203 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1204 	spin_unlock(&fs_info->qgroup_lock);
1205 
1206 	/* Skip rescan for simple qgroups. */
1207 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1208 		goto out_free_path;
1209 
1210 	ret = qgroup_rescan_init(fs_info, 0, 1);
1211 	if (!ret) {
1212 	        qgroup_rescan_zero_tracking(fs_info);
1213 		fs_info->qgroup_rescan_running = true;
1214 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1215 	                         &fs_info->qgroup_rescan_work);
1216 	} else {
1217 		/*
1218 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1219 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1220 		 * -EINPROGRESS. That can happen because someone started the
1221 		 * rescan worker by calling quota rescan ioctl before we
1222 		 * attempted to initialize the rescan worker. Failure due to
1223 		 * quotas disabled in the meanwhile is not possible, because
1224 		 * we are holding a write lock on fs_info->subvol_sem, which
1225 		 * is also acquired when disabling quotas.
1226 		 * Ignore such error, and any other error would need to undo
1227 		 * everything we did in the transaction we just committed.
1228 		 */
1229 		ASSERT(ret == -EINPROGRESS);
1230 		ret = 0;
1231 	}
1232 
1233 out_free_path:
1234 	btrfs_free_path(path);
1235 out_free_root:
1236 	if (ret)
1237 		btrfs_put_root(quota_root);
1238 out:
1239 	if (ret)
1240 		btrfs_sysfs_del_qgroups(fs_info);
1241 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1242 	if (ret && trans)
1243 		btrfs_end_transaction(trans);
1244 	else if (trans)
1245 		ret = btrfs_end_transaction(trans);
1246 	kfree(prealloc);
1247 	return ret;
1248 }
1249 
1250 /*
1251  * It is possible to have outstanding ordered extents which reserved bytes
1252  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1253  * commit to ensure that we don't leak such reservations, only to have them
1254  * come back if we re-enable.
1255  *
1256  * - enable simple quotas
1257  * - reserve space
1258  * - release it, store rsv_bytes in OE
1259  * - disable quotas
1260  * - enable simple quotas (qgroup rsv are all 0)
1261  * - OE finishes
1262  * - run delayed refs
1263  * - free rsv_bytes, resulting in miscounting or even underflow
1264  */
1265 static int flush_reservations(struct btrfs_fs_info *fs_info)
1266 {
1267 	int ret;
1268 
1269 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1270 	if (ret)
1271 		return ret;
1272 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1273 
1274 	return btrfs_commit_current_transaction(fs_info->tree_root);
1275 }
1276 
1277 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1278 {
1279 	struct btrfs_root *quota_root = NULL;
1280 	struct btrfs_trans_handle *trans = NULL;
1281 	int ret = 0;
1282 
1283 	/*
1284 	 * We need to have subvol_sem write locked to prevent races with
1285 	 * snapshot creation.
1286 	 */
1287 	lockdep_assert_held_write(&fs_info->subvol_sem);
1288 
1289 	/*
1290 	 * Relocation will mess with backrefs, so make sure we have the
1291 	 * cleaner_mutex held to protect us from relocate.
1292 	 */
1293 	lockdep_assert_held(&fs_info->cleaner_mutex);
1294 
1295 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1296 	if (!fs_info->quota_root)
1297 		goto out;
1298 
1299 	/*
1300 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1301 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1302 	 * to lock that mutex while holding a transaction handle and the rescan
1303 	 * worker needs to commit a transaction.
1304 	 */
1305 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1306 
1307 	/*
1308 	 * Request qgroup rescan worker to complete and wait for it. This wait
1309 	 * must be done before transaction start for quota disable since it may
1310 	 * deadlock with transaction by the qgroup rescan worker.
1311 	 */
1312 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1313 	btrfs_qgroup_wait_for_completion(fs_info, false);
1314 
1315 	/*
1316 	 * We have nothing held here and no trans handle, just return the error
1317 	 * if there is one and set back the quota enabled bit since we didn't
1318 	 * actually disable quotas.
1319 	 */
1320 	ret = flush_reservations(fs_info);
1321 	if (ret) {
1322 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1323 		return ret;
1324 	}
1325 
1326 	/*
1327 	 * 1 For the root item
1328 	 *
1329 	 * We should also reserve enough items for the quota tree deletion in
1330 	 * btrfs_clean_quota_tree but this is not done.
1331 	 *
1332 	 * Also, we must always start a transaction without holding the mutex
1333 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1334 	 */
1335 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1336 
1337 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1338 	if (IS_ERR(trans)) {
1339 		ret = PTR_ERR(trans);
1340 		trans = NULL;
1341 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1342 		goto out;
1343 	}
1344 
1345 	if (!fs_info->quota_root)
1346 		goto out;
1347 
1348 	spin_lock(&fs_info->qgroup_lock);
1349 	quota_root = fs_info->quota_root;
1350 	fs_info->quota_root = NULL;
1351 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1352 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1353 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1354 	spin_unlock(&fs_info->qgroup_lock);
1355 
1356 	btrfs_free_qgroup_config(fs_info);
1357 
1358 	ret = btrfs_clean_quota_tree(trans, quota_root);
1359 	if (unlikely(ret)) {
1360 		btrfs_abort_transaction(trans, ret);
1361 		goto out;
1362 	}
1363 
1364 	ret = btrfs_del_root(trans, &quota_root->root_key);
1365 	if (unlikely(ret)) {
1366 		btrfs_abort_transaction(trans, ret);
1367 		goto out;
1368 	}
1369 
1370 	spin_lock(&fs_info->trans_lock);
1371 	list_del(&quota_root->dirty_list);
1372 	spin_unlock(&fs_info->trans_lock);
1373 
1374 	btrfs_tree_lock(quota_root->node);
1375 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1376 	btrfs_tree_unlock(quota_root->node);
1377 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1378 				    quota_root->node, 0, 1);
1379 
1380 	if (ret < 0)
1381 		btrfs_abort_transaction(trans, ret);
1382 
1383 out:
1384 	btrfs_put_root(quota_root);
1385 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1386 	if (ret && trans)
1387 		btrfs_end_transaction(trans);
1388 	else if (trans)
1389 		ret = btrfs_commit_transaction(trans);
1390 	return ret;
1391 }
1392 
1393 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1394 			 struct btrfs_qgroup *qgroup)
1395 {
1396 	if (list_empty(&qgroup->dirty))
1397 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1398 }
1399 
1400 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1401 {
1402 	if (!list_empty(&qgroup->iterator))
1403 		return;
1404 
1405 	list_add_tail(&qgroup->iterator, head);
1406 }
1407 
1408 static void qgroup_iterator_clean(struct list_head *head)
1409 {
1410 	while (!list_empty(head)) {
1411 		struct btrfs_qgroup *qgroup;
1412 
1413 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1414 		list_del_init(&qgroup->iterator);
1415 	}
1416 }
1417 
1418 /*
1419  * The easy accounting, we're updating qgroup relationship whose child qgroup
1420  * only has exclusive extents.
1421  *
1422  * In this case, all exclusive extents will also be exclusive for parent, so
1423  * excl/rfer just get added/removed.
1424  *
1425  * So is qgroup reservation space, which should also be added/removed to
1426  * parent.
1427  * Or when child tries to release reservation space, parent will underflow its
1428  * reservation (for relationship adding case).
1429  *
1430  * Caller should hold fs_info->qgroup_lock.
1431  */
1432 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1433 				    struct btrfs_qgroup *src, int sign)
1434 {
1435 	struct btrfs_qgroup *qgroup;
1436 	LIST_HEAD(qgroup_list);
1437 	u64 num_bytes = src->excl;
1438 	u64 num_bytes_cmpr = src->excl_cmpr;
1439 	int ret = 0;
1440 
1441 	qgroup = find_qgroup_rb(fs_info, ref_root);
1442 	if (!qgroup)
1443 		goto out;
1444 
1445 	qgroup_iterator_add(&qgroup_list, qgroup);
1446 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
1447 		struct btrfs_qgroup_list *glist;
1448 
1449 		qgroup->rfer += sign * num_bytes;
1450 		qgroup->rfer_cmpr += sign * num_bytes_cmpr;
1451 
1452 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1453 		WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
1454 		qgroup->excl += sign * num_bytes;
1455 		qgroup->excl_cmpr += sign * num_bytes_cmpr;
1456 
1457 		if (sign > 0)
1458 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1459 		else
1460 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1461 		qgroup_dirty(fs_info, qgroup);
1462 
1463 		/* Append parent qgroups to @qgroup_list. */
1464 		list_for_each_entry(glist, &qgroup->groups, next_group)
1465 			qgroup_iterator_add(&qgroup_list, glist->group);
1466 	}
1467 	ret = 0;
1468 out:
1469 	qgroup_iterator_clean(&qgroup_list);
1470 	return ret;
1471 }
1472 
1473 
1474 /*
1475  * Quick path for updating qgroup with only excl refs.
1476  *
1477  * In that case, just update all parent will be enough.
1478  * Or we needs to do a full rescan.
1479  * Caller should also hold fs_info->qgroup_lock.
1480  *
1481  * Return 0 for quick update, return >0 for need to full rescan
1482  * and mark INCONSISTENT flag.
1483  * Return < 0 for other error.
1484  */
1485 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1486 				   u64 src, u64 dst, int sign)
1487 {
1488 	struct btrfs_qgroup *qgroup;
1489 	int ret = 1;
1490 
1491 	qgroup = find_qgroup_rb(fs_info, src);
1492 	if (!qgroup)
1493 		goto out;
1494 	if (qgroup->excl == qgroup->rfer) {
1495 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1496 		if (ret < 0)
1497 			goto out;
1498 		ret = 0;
1499 	}
1500 out:
1501 	if (ret)
1502 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1503 	return ret;
1504 }
1505 
1506 /*
1507  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1508  * callers and transferred here (either used or freed on error).
1509  */
1510 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1511 			      struct btrfs_qgroup_list *prealloc)
1512 {
1513 	struct btrfs_fs_info *fs_info = trans->fs_info;
1514 	struct btrfs_qgroup *parent;
1515 	struct btrfs_qgroup *member;
1516 	struct btrfs_qgroup_list *list;
1517 	int ret = 0;
1518 
1519 	ASSERT(prealloc);
1520 
1521 	/* Check the level of src and dst first */
1522 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
1523 		kfree(prealloc);
1524 		return -EINVAL;
1525 	}
1526 
1527 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1528 	if (!fs_info->quota_root) {
1529 		ret = -ENOTCONN;
1530 		goto out;
1531 	}
1532 	member = find_qgroup_rb(fs_info, src);
1533 	parent = find_qgroup_rb(fs_info, dst);
1534 	if (!member || !parent) {
1535 		ret = -EINVAL;
1536 		goto out;
1537 	}
1538 
1539 	/* check if such qgroup relation exist firstly */
1540 	list_for_each_entry(list, &member->groups, next_group) {
1541 		if (list->group == parent) {
1542 			ret = -EEXIST;
1543 			goto out;
1544 		}
1545 	}
1546 
1547 	ret = add_qgroup_relation_item(trans, src, dst);
1548 	if (ret)
1549 		goto out;
1550 
1551 	ret = add_qgroup_relation_item(trans, dst, src);
1552 	if (ret) {
1553 		del_qgroup_relation_item(trans, src, dst);
1554 		goto out;
1555 	}
1556 
1557 	spin_lock(&fs_info->qgroup_lock);
1558 	ret = __add_relation_rb(prealloc, member, parent);
1559 	prealloc = NULL;
1560 	if (ret < 0) {
1561 		spin_unlock(&fs_info->qgroup_lock);
1562 		goto out;
1563 	}
1564 	ret = quick_update_accounting(fs_info, src, dst, 1);
1565 	spin_unlock(&fs_info->qgroup_lock);
1566 out:
1567 	kfree(prealloc);
1568 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1569 	return ret;
1570 }
1571 
1572 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1573 				 u64 dst)
1574 {
1575 	struct btrfs_fs_info *fs_info = trans->fs_info;
1576 	struct btrfs_qgroup *parent;
1577 	struct btrfs_qgroup *member;
1578 	struct btrfs_qgroup_list *list;
1579 	bool found = false;
1580 	int ret = 0;
1581 	int ret2;
1582 
1583 	if (!fs_info->quota_root) {
1584 		ret = -ENOTCONN;
1585 		goto out;
1586 	}
1587 
1588 	member = find_qgroup_rb(fs_info, src);
1589 	parent = find_qgroup_rb(fs_info, dst);
1590 	/*
1591 	 * The parent/member pair doesn't exist, then try to delete the dead
1592 	 * relation items only.
1593 	 */
1594 	if (!member || !parent)
1595 		goto delete_item;
1596 
1597 	/* check if such qgroup relation exist firstly */
1598 	list_for_each_entry(list, &member->groups, next_group) {
1599 		if (list->group == parent) {
1600 			found = true;
1601 			break;
1602 		}
1603 	}
1604 
1605 delete_item:
1606 	ret = del_qgroup_relation_item(trans, src, dst);
1607 	if (ret < 0 && ret != -ENOENT)
1608 		goto out;
1609 	ret2 = del_qgroup_relation_item(trans, dst, src);
1610 	if (ret2 < 0 && ret2 != -ENOENT)
1611 		goto out;
1612 
1613 	/* At least one deletion succeeded, return 0 */
1614 	if (!ret || !ret2)
1615 		ret = 0;
1616 
1617 	if (found) {
1618 		spin_lock(&fs_info->qgroup_lock);
1619 		del_relation_rb(fs_info, src, dst);
1620 		ret = quick_update_accounting(fs_info, src, dst, -1);
1621 		spin_unlock(&fs_info->qgroup_lock);
1622 	}
1623 out:
1624 	return ret;
1625 }
1626 
1627 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1628 			      u64 dst)
1629 {
1630 	struct btrfs_fs_info *fs_info = trans->fs_info;
1631 	int ret = 0;
1632 
1633 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1634 	ret = __del_qgroup_relation(trans, src, dst);
1635 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1636 
1637 	return ret;
1638 }
1639 
1640 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1641 {
1642 	struct btrfs_fs_info *fs_info = trans->fs_info;
1643 	struct btrfs_root *quota_root;
1644 	struct btrfs_qgroup *qgroup;
1645 	struct btrfs_qgroup *prealloc = NULL;
1646 	int ret = 0;
1647 
1648 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1649 	if (!fs_info->quota_root) {
1650 		ret = -ENOTCONN;
1651 		goto out;
1652 	}
1653 	quota_root = fs_info->quota_root;
1654 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1655 	if (qgroup) {
1656 		ret = -EEXIST;
1657 		goto out;
1658 	}
1659 
1660 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1661 	if (!prealloc) {
1662 		ret = -ENOMEM;
1663 		goto out;
1664 	}
1665 
1666 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1667 	if (ret)
1668 		goto out;
1669 
1670 	spin_lock(&fs_info->qgroup_lock);
1671 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1672 	spin_unlock(&fs_info->qgroup_lock);
1673 	prealloc = NULL;
1674 
1675 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1676 out:
1677 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1678 	kfree(prealloc);
1679 	return ret;
1680 }
1681 
1682 /*
1683  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1684  * Return >0 if we can delete the qgroup.
1685  * Return <0 for other errors during tree search.
1686  */
1687 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1688 {
1689 	struct btrfs_key key;
1690 	BTRFS_PATH_AUTO_FREE(path);
1691 
1692 	/*
1693 	 * Squota would never be inconsistent, but there can still be case
1694 	 * where a dropped subvolume still has qgroup numbers, and squota
1695 	 * relies on such qgroup for future accounting.
1696 	 *
1697 	 * So for squota, do not allow dropping any non-zero qgroup.
1698 	 */
1699 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1700 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1701 		return 0;
1702 
1703 	/* For higher level qgroup, we can only delete it if it has no child. */
1704 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1705 		if (!list_empty(&qgroup->members))
1706 			return 0;
1707 		return 1;
1708 	}
1709 
1710 	/*
1711 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1712 	 * for it.
1713 	 * This means even a subvolume is unlinked but not yet fully dropped,
1714 	 * we can not delete the qgroup.
1715 	 */
1716 	key.objectid = qgroup->qgroupid;
1717 	key.type = BTRFS_ROOT_ITEM_KEY;
1718 	key.offset = -1ULL;
1719 	path = btrfs_alloc_path();
1720 	if (!path)
1721 		return -ENOMEM;
1722 
1723 	/*
1724 	 * The @ret from btrfs_find_root() exactly matches our definition for
1725 	 * the return value, thus can be returned directly.
1726 	 */
1727 	return btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1728 }
1729 
1730 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1731 {
1732 	struct btrfs_fs_info *fs_info = trans->fs_info;
1733 	struct btrfs_qgroup *qgroup;
1734 	struct btrfs_qgroup_list *list;
1735 	int ret = 0;
1736 
1737 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1738 	if (!fs_info->quota_root) {
1739 		ret = -ENOTCONN;
1740 		goto out;
1741 	}
1742 
1743 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1744 	if (!qgroup) {
1745 		ret = -ENOENT;
1746 		goto out;
1747 	}
1748 
1749 	ret = can_delete_qgroup(fs_info, qgroup);
1750 	if (ret < 0)
1751 		goto out;
1752 	if (ret == 0) {
1753 		ret = -EBUSY;
1754 		goto out;
1755 	}
1756 
1757 	/* Check if there are no children of this qgroup */
1758 	if (!list_empty(&qgroup->members)) {
1759 		ret = -EBUSY;
1760 		goto out;
1761 	}
1762 
1763 	ret = del_qgroup_item(trans, qgroupid);
1764 	if (ret && ret != -ENOENT)
1765 		goto out;
1766 
1767 	while (!list_empty(&qgroup->groups)) {
1768 		list = list_first_entry(&qgroup->groups,
1769 					struct btrfs_qgroup_list, next_group);
1770 		ret = __del_qgroup_relation(trans, qgroupid,
1771 					    list->group->qgroupid);
1772 		if (ret)
1773 			goto out;
1774 	}
1775 
1776 	spin_lock(&fs_info->qgroup_lock);
1777 	/*
1778 	 * Warn on reserved space. The subvolume should has no child nor
1779 	 * corresponding subvolume.
1780 	 * Thus its reserved space should all be zero, no matter if qgroup
1781 	 * is consistent or the mode.
1782 	 */
1783 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1784 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1785 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1786 		DEBUG_WARN();
1787 		btrfs_warn_rl(fs_info,
1788 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1789 			      btrfs_qgroup_level(qgroup->qgroupid),
1790 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1791 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1792 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1793 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1794 
1795 	}
1796 	/*
1797 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1798 	 * consistent and if it's in regular qgroup mode.
1799 	 * For simple mode it's not as accurate thus we can hit non-zero values
1800 	 * very frequently.
1801 	 */
1802 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1803 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1804 		if (qgroup->rfer || qgroup->excl ||
1805 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1806 			DEBUG_WARN();
1807 			qgroup_mark_inconsistent(fs_info,
1808 				"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1809 				btrfs_qgroup_level(qgroup->qgroupid),
1810 				btrfs_qgroup_subvolid(qgroup->qgroupid),
1811 				qgroup->rfer, qgroup->rfer_cmpr,
1812 				qgroup->excl, qgroup->excl_cmpr);
1813 		}
1814 	}
1815 	del_qgroup_rb(fs_info, qgroupid);
1816 	spin_unlock(&fs_info->qgroup_lock);
1817 
1818 	/*
1819 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1820 	 * spinlock, since the sysfs_remove_group() function needs to take
1821 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1822 	 */
1823 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1824 	kfree(qgroup);
1825 out:
1826 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1827 	return ret;
1828 }
1829 
1830 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1831 {
1832 	struct btrfs_trans_handle *trans;
1833 	int ret;
1834 
1835 	if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
1836 	    !fs_info->quota_root)
1837 		return 0;
1838 
1839 	/*
1840 	 * Commit current transaction to make sure all the rfer/excl numbers
1841 	 * get updated.
1842 	 */
1843 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1844 	if (ret < 0)
1845 		return ret;
1846 
1847 	/* Start new trans to delete the qgroup info and limit items. */
1848 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1849 	if (IS_ERR(trans))
1850 		return PTR_ERR(trans);
1851 	ret = btrfs_remove_qgroup(trans, subvolid);
1852 	btrfs_end_transaction(trans);
1853 	/*
1854 	 * It's squota and the subvolume still has numbers needed for future
1855 	 * accounting, in this case we can not delete it.  Just skip it.
1856 	 *
1857 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1858 	 * safe to ignore them.
1859 	 */
1860 	if (ret == -EBUSY || ret == -ENOENT)
1861 		ret = 0;
1862 	return ret;
1863 }
1864 
1865 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1866 		       struct btrfs_qgroup_limit *limit)
1867 {
1868 	struct btrfs_fs_info *fs_info = trans->fs_info;
1869 	struct btrfs_qgroup *qgroup;
1870 	int ret = 0;
1871 	/* Sometimes we would want to clear the limit on this qgroup.
1872 	 * To meet this requirement, we treat the -1 as a special value
1873 	 * which tell kernel to clear the limit on this qgroup.
1874 	 */
1875 	const u64 CLEAR_VALUE = -1;
1876 
1877 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1878 	if (!fs_info->quota_root) {
1879 		ret = -ENOTCONN;
1880 		goto out;
1881 	}
1882 
1883 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1884 	if (!qgroup) {
1885 		ret = -ENOENT;
1886 		goto out;
1887 	}
1888 
1889 	spin_lock(&fs_info->qgroup_lock);
1890 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1891 		if (limit->max_rfer == CLEAR_VALUE) {
1892 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1893 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1894 			qgroup->max_rfer = 0;
1895 		} else {
1896 			qgroup->max_rfer = limit->max_rfer;
1897 		}
1898 	}
1899 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1900 		if (limit->max_excl == CLEAR_VALUE) {
1901 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1902 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1903 			qgroup->max_excl = 0;
1904 		} else {
1905 			qgroup->max_excl = limit->max_excl;
1906 		}
1907 	}
1908 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1909 		if (limit->rsv_rfer == CLEAR_VALUE) {
1910 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1911 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1912 			qgroup->rsv_rfer = 0;
1913 		} else {
1914 			qgroup->rsv_rfer = limit->rsv_rfer;
1915 		}
1916 	}
1917 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1918 		if (limit->rsv_excl == CLEAR_VALUE) {
1919 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1920 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1921 			qgroup->rsv_excl = 0;
1922 		} else {
1923 			qgroup->rsv_excl = limit->rsv_excl;
1924 		}
1925 	}
1926 	qgroup->lim_flags |= limit->flags;
1927 
1928 	spin_unlock(&fs_info->qgroup_lock);
1929 
1930 	ret = update_qgroup_limit_item(trans, qgroup);
1931 	if (ret)
1932 		qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
1933 
1934 out:
1935 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1936 	return ret;
1937 }
1938 
1939 /*
1940  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1941  * So qgroup can account it at transaction committing time.
1942  *
1943  * No lock version, caller must acquire delayed ref lock and allocated memory,
1944  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1945  *
1946  * Return 0 for success insert
1947  * Return >0 for existing record, caller can free @record safely.
1948  * Return <0 for insertion failure, caller can free @record safely.
1949  */
1950 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1951 				     struct btrfs_delayed_ref_root *delayed_refs,
1952 				     struct btrfs_qgroup_extent_record *record,
1953 				     u64 bytenr)
1954 {
1955 	struct btrfs_qgroup_extent_record *existing, *ret;
1956 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1957 
1958 	if (!btrfs_qgroup_full_accounting(fs_info))
1959 		return 1;
1960 
1961 #if BITS_PER_LONG == 32
1962 	if (bytenr >= MAX_LFS_FILESIZE) {
1963 		btrfs_err_rl(fs_info,
1964 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
1965 			     bytenr);
1966 		btrfs_err_32bit_limit(fs_info);
1967 		return -EOVERFLOW;
1968 	}
1969 #endif
1970 
1971 	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
1972 
1973 	xa_lock(&delayed_refs->dirty_extents);
1974 	existing = xa_load(&delayed_refs->dirty_extents, index);
1975 	if (existing) {
1976 		if (record->data_rsv && !existing->data_rsv) {
1977 			existing->data_rsv = record->data_rsv;
1978 			existing->data_rsv_refroot = record->data_rsv_refroot;
1979 		}
1980 		xa_unlock(&delayed_refs->dirty_extents);
1981 		return 1;
1982 	}
1983 
1984 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
1985 	xa_unlock(&delayed_refs->dirty_extents);
1986 	if (xa_is_err(ret)) {
1987 		qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
1988 		return xa_err(ret);
1989 	}
1990 
1991 	return 0;
1992 }
1993 
1994 /*
1995  * Post handler after qgroup_trace_extent_nolock().
1996  *
1997  * NOTE: Current qgroup does the expensive backref walk at transaction
1998  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
1999  * new transaction.
2000  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2001  * result.
2002  *
2003  * However for old_roots there is no need to do backref walk at that time,
2004  * since we search commit roots to walk backref and result will always be
2005  * correct.
2006  *
2007  * Due to the nature of no lock version, we can't do backref there.
2008  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2009  * spinlock context.
2010  *
2011  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2012  * using current root, then we can move all expensive backref walk out of
2013  * transaction committing, but not now as qgroup accounting will be wrong again.
2014  */
2015 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2016 				   struct btrfs_qgroup_extent_record *qrecord,
2017 				   u64 bytenr)
2018 {
2019 	struct btrfs_fs_info *fs_info = trans->fs_info;
2020 	struct btrfs_backref_walk_ctx ctx = {
2021 		.bytenr = bytenr,
2022 		.fs_info = fs_info,
2023 	};
2024 	int ret;
2025 
2026 	if (!btrfs_qgroup_full_accounting(fs_info))
2027 		return 0;
2028 	/*
2029 	 * We are always called in a context where we are already holding a
2030 	 * transaction handle. Often we are called when adding a data delayed
2031 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2032 	 * in which case we will be holding a write lock on extent buffer from a
2033 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2034 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2035 	 * that must be acquired before locking any extent buffers.
2036 	 *
2037 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2038 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2039 	 * it would not use commit roots and would lock extent buffers, causing
2040 	 * a deadlock if it ends up trying to read lock the same extent buffer
2041 	 * that was previously write locked at btrfs_truncate_inode_items().
2042 	 *
2043 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2044 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2045 	 * holding a transaction handle we don't need its protection.
2046 	 */
2047 	ASSERT(trans != NULL);
2048 
2049 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2050 		return 0;
2051 
2052 	ret = btrfs_find_all_roots(&ctx, true);
2053 	if (ret < 0) {
2054 		qgroup_mark_inconsistent(fs_info,
2055 				"error accounting new delayed refs extent: %d", ret);
2056 		return 0;
2057 	}
2058 
2059 	/*
2060 	 * Here we don't need to get the lock of
2061 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2062 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2063 	 *
2064 	 * So modifying qrecord->old_roots is safe here
2065 	 */
2066 	qrecord->old_roots = ctx.roots;
2067 	return 0;
2068 }
2069 
2070 /*
2071  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2072  * @num_bytes.
2073  * So qgroup can account it at commit trans time.
2074  *
2075  * Better encapsulated version, with memory allocation and backref walk for
2076  * commit roots.
2077  * So this can sleep.
2078  *
2079  * Return 0 if the operation is done.
2080  * Return <0 for error, like memory allocation failure or invalid parameter
2081  * (NULL trans)
2082  */
2083 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2084 			      u64 num_bytes)
2085 {
2086 	struct btrfs_fs_info *fs_info = trans->fs_info;
2087 	struct btrfs_qgroup_extent_record *record;
2088 	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2089 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2090 	int ret;
2091 
2092 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2093 		return 0;
2094 	record = kzalloc(sizeof(*record), GFP_NOFS);
2095 	if (!record)
2096 		return -ENOMEM;
2097 
2098 	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2099 		kfree(record);
2100 		return -ENOMEM;
2101 	}
2102 
2103 	record->num_bytes = num_bytes;
2104 
2105 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2106 	if (ret) {
2107 		/* Clean up if insertion fails or item exists. */
2108 		xa_release(&delayed_refs->dirty_extents, index);
2109 		kfree(record);
2110 		return 0;
2111 	}
2112 	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2113 }
2114 
2115 /*
2116  * Inform qgroup to trace all leaf items of data
2117  *
2118  * Return 0 for success
2119  * Return <0 for error(ENOMEM)
2120  */
2121 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2122 				  struct extent_buffer *eb)
2123 {
2124 	struct btrfs_fs_info *fs_info = trans->fs_info;
2125 	int nr = btrfs_header_nritems(eb);
2126 	int i, extent_type, ret;
2127 	struct btrfs_key key;
2128 	struct btrfs_file_extent_item *fi;
2129 	u64 bytenr, num_bytes;
2130 
2131 	/* We can be called directly from walk_up_proc() */
2132 	if (!btrfs_qgroup_full_accounting(fs_info))
2133 		return 0;
2134 
2135 	for (i = 0; i < nr; i++) {
2136 		btrfs_item_key_to_cpu(eb, &key, i);
2137 
2138 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2139 			continue;
2140 
2141 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2142 		/* filter out non qgroup-accountable extents  */
2143 		extent_type = btrfs_file_extent_type(eb, fi);
2144 
2145 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2146 			continue;
2147 
2148 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2149 		if (!bytenr)
2150 			continue;
2151 
2152 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2153 
2154 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2155 		if (ret)
2156 			return ret;
2157 	}
2158 	cond_resched();
2159 	return 0;
2160 }
2161 
2162 /*
2163  * Walk up the tree from the bottom, freeing leaves and any interior
2164  * nodes which have had all slots visited. If a node (leaf or
2165  * interior) is freed, the node above it will have it's slot
2166  * incremented. The root node will never be freed.
2167  *
2168  * At the end of this function, we should have a path which has all
2169  * slots incremented to the next position for a search. If we need to
2170  * read a new node it will be NULL and the node above it will have the
2171  * correct slot selected for a later read.
2172  *
2173  * If we increment the root nodes slot counter past the number of
2174  * elements, 1 is returned to signal completion of the search.
2175  */
2176 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2177 {
2178 	int level = 0;
2179 	int nr, slot;
2180 	struct extent_buffer *eb;
2181 
2182 	if (root_level == 0)
2183 		return 1;
2184 
2185 	while (level <= root_level) {
2186 		eb = path->nodes[level];
2187 		nr = btrfs_header_nritems(eb);
2188 		path->slots[level]++;
2189 		slot = path->slots[level];
2190 		if (slot >= nr || level == 0) {
2191 			/*
2192 			 * Don't free the root -  we will detect this
2193 			 * condition after our loop and return a
2194 			 * positive value for caller to stop walking the tree.
2195 			 */
2196 			if (level != root_level) {
2197 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2198 				path->locks[level] = 0;
2199 
2200 				free_extent_buffer(eb);
2201 				path->nodes[level] = NULL;
2202 				path->slots[level] = 0;
2203 			}
2204 		} else {
2205 			/*
2206 			 * We have a valid slot to walk back down
2207 			 * from. Stop here so caller can process these
2208 			 * new nodes.
2209 			 */
2210 			break;
2211 		}
2212 
2213 		level++;
2214 	}
2215 
2216 	eb = path->nodes[root_level];
2217 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2218 		return 1;
2219 
2220 	return 0;
2221 }
2222 
2223 /*
2224  * Helper function to trace a subtree tree block swap.
2225  *
2226  * The swap will happen in highest tree block, but there may be a lot of
2227  * tree blocks involved.
2228  *
2229  * For example:
2230  *  OO = Old tree blocks
2231  *  NN = New tree blocks allocated during balance
2232  *
2233  *           File tree (257)                  Reloc tree for 257
2234  * L2              OO                                NN
2235  *               /    \                            /    \
2236  * L1          OO      OO (a)                    OO      NN (a)
2237  *            / \     / \                       / \     / \
2238  * L0       OO   OO OO   OO                   OO   OO NN   NN
2239  *                  (b)  (c)                          (b)  (c)
2240  *
2241  * When calling qgroup_trace_extent_swap(), we will pass:
2242  * @src_eb = OO(a)
2243  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2244  * @dst_level = 0
2245  * @root_level = 1
2246  *
2247  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2248  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2249  *
2250  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2251  *
2252  * 1) Tree search from @src_eb
2253  *    It should acts as a simplified btrfs_search_slot().
2254  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2255  *    (first key).
2256  *
2257  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2258  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2259  *    They should be marked during previous (@dst_level = 1) iteration.
2260  *
2261  * 3) Mark file extents in leaves dirty
2262  *    We don't have good way to pick out new file extents only.
2263  *    So we still follow the old method by scanning all file extents in
2264  *    the leave.
2265  *
2266  * This function can free us from keeping two paths, thus later we only need
2267  * to care about how to iterate all new tree blocks in reloc tree.
2268  */
2269 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2270 				    struct extent_buffer *src_eb,
2271 				    struct btrfs_path *dst_path,
2272 				    int dst_level, int root_level,
2273 				    bool trace_leaf)
2274 {
2275 	struct btrfs_key key;
2276 	BTRFS_PATH_AUTO_FREE(src_path);
2277 	struct btrfs_fs_info *fs_info = trans->fs_info;
2278 	u32 nodesize = fs_info->nodesize;
2279 	int cur_level = root_level;
2280 	int ret;
2281 
2282 	BUG_ON(dst_level > root_level);
2283 	/* Level mismatch */
2284 	if (btrfs_header_level(src_eb) != root_level)
2285 		return -EINVAL;
2286 
2287 	src_path = btrfs_alloc_path();
2288 	if (!src_path)
2289 		return -ENOMEM;
2290 
2291 	if (dst_level)
2292 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2293 	else
2294 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2295 
2296 	/* For src_path */
2297 	refcount_inc(&src_eb->refs);
2298 	src_path->nodes[root_level] = src_eb;
2299 	src_path->slots[root_level] = dst_path->slots[root_level];
2300 	src_path->locks[root_level] = 0;
2301 
2302 	/* A simplified version of btrfs_search_slot() */
2303 	while (cur_level >= dst_level) {
2304 		struct btrfs_key src_key;
2305 		struct btrfs_key dst_key;
2306 
2307 		if (src_path->nodes[cur_level] == NULL) {
2308 			struct extent_buffer *eb;
2309 			int parent_slot;
2310 
2311 			eb = src_path->nodes[cur_level + 1];
2312 			parent_slot = src_path->slots[cur_level + 1];
2313 
2314 			eb = btrfs_read_node_slot(eb, parent_slot);
2315 			if (IS_ERR(eb))
2316 				return PTR_ERR(eb);
2317 
2318 			src_path->nodes[cur_level] = eb;
2319 
2320 			btrfs_tree_read_lock(eb);
2321 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2322 		}
2323 
2324 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2325 		if (cur_level) {
2326 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2327 					&dst_key, dst_path->slots[cur_level]);
2328 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2329 					&src_key, src_path->slots[cur_level]);
2330 		} else {
2331 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2332 					&dst_key, dst_path->slots[cur_level]);
2333 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2334 					&src_key, src_path->slots[cur_level]);
2335 		}
2336 		/* Content mismatch, something went wrong */
2337 		if (btrfs_comp_cpu_keys(&dst_key, &src_key))
2338 			return -ENOENT;
2339 		cur_level--;
2340 	}
2341 
2342 	/*
2343 	 * Now both @dst_path and @src_path have been populated, record the tree
2344 	 * blocks for qgroup accounting.
2345 	 */
2346 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2347 					nodesize);
2348 	if (ret < 0)
2349 		return ret;
2350 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2351 					nodesize);
2352 	if (ret < 0)
2353 		return ret;
2354 
2355 	/* Record leaf file extents */
2356 	if (dst_level == 0 && trace_leaf) {
2357 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2358 		if (ret < 0)
2359 			return ret;
2360 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2361 	}
2362 
2363 	return ret;
2364 }
2365 
2366 /*
2367  * Helper function to do recursive generation-aware depth-first search, to
2368  * locate all new tree blocks in a subtree of reloc tree.
2369  *
2370  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2371  *         reloc tree
2372  * L2         NN (a)
2373  *          /    \
2374  * L1    OO        NN (b)
2375  *      /  \      /  \
2376  * L0  OO  OO    OO  NN
2377  *               (c) (d)
2378  * If we pass:
2379  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2380  * @cur_level = 1
2381  * @root_level = 1
2382  *
2383  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2384  * above tree blocks along with their counter parts in file tree.
2385  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2386  * won't affect OO(c).
2387  */
2388 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2389 					   struct extent_buffer *src_eb,
2390 					   struct btrfs_path *dst_path,
2391 					   int cur_level, int root_level,
2392 					   u64 last_snapshot, bool trace_leaf)
2393 {
2394 	struct btrfs_fs_info *fs_info = trans->fs_info;
2395 	struct extent_buffer *eb;
2396 	bool need_cleanup = false;
2397 	int ret = 0;
2398 	int i;
2399 
2400 	/* Level sanity check */
2401 	if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2402 		     root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2403 		     root_level < cur_level)) {
2404 		btrfs_err_rl(fs_info,
2405 			"%s: bad levels, cur_level=%d root_level=%d",
2406 			__func__, cur_level, root_level);
2407 		return -EUCLEAN;
2408 	}
2409 
2410 	/* Read the tree block if needed */
2411 	if (dst_path->nodes[cur_level] == NULL) {
2412 		int parent_slot;
2413 		u64 child_gen;
2414 
2415 		/*
2416 		 * dst_path->nodes[root_level] must be initialized before
2417 		 * calling this function.
2418 		 */
2419 		if (unlikely(cur_level == root_level)) {
2420 			btrfs_err_rl(fs_info,
2421 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2422 				__func__, root_level, root_level, cur_level);
2423 			return -EUCLEAN;
2424 		}
2425 
2426 		/*
2427 		 * We need to get child blockptr/gen from parent before we can
2428 		 * read it.
2429 		  */
2430 		eb = dst_path->nodes[cur_level + 1];
2431 		parent_slot = dst_path->slots[cur_level + 1];
2432 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2433 
2434 		/* This node is old, no need to trace */
2435 		if (child_gen < last_snapshot)
2436 			goto out;
2437 
2438 		eb = btrfs_read_node_slot(eb, parent_slot);
2439 		if (IS_ERR(eb)) {
2440 			ret = PTR_ERR(eb);
2441 			goto out;
2442 		}
2443 
2444 		dst_path->nodes[cur_level] = eb;
2445 		dst_path->slots[cur_level] = 0;
2446 
2447 		btrfs_tree_read_lock(eb);
2448 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2449 		need_cleanup = true;
2450 	}
2451 
2452 	/* Now record this tree block and its counter part for qgroups */
2453 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2454 				       root_level, trace_leaf);
2455 	if (ret < 0)
2456 		goto cleanup;
2457 
2458 	eb = dst_path->nodes[cur_level];
2459 
2460 	if (cur_level > 0) {
2461 		/* Iterate all child tree blocks */
2462 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2463 			/* Skip old tree blocks as they won't be swapped */
2464 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2465 				continue;
2466 			dst_path->slots[cur_level] = i;
2467 
2468 			/* Recursive call (at most 7 times) */
2469 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2470 					dst_path, cur_level - 1, root_level,
2471 					last_snapshot, trace_leaf);
2472 			if (ret < 0)
2473 				goto cleanup;
2474 		}
2475 	}
2476 
2477 cleanup:
2478 	if (need_cleanup) {
2479 		/* Clean up */
2480 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2481 				     dst_path->locks[cur_level]);
2482 		free_extent_buffer(dst_path->nodes[cur_level]);
2483 		dst_path->nodes[cur_level] = NULL;
2484 		dst_path->slots[cur_level] = 0;
2485 		dst_path->locks[cur_level] = 0;
2486 	}
2487 out:
2488 	return ret;
2489 }
2490 
2491 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2492 				struct extent_buffer *src_eb,
2493 				struct extent_buffer *dst_eb,
2494 				u64 last_snapshot, bool trace_leaf)
2495 {
2496 	struct btrfs_fs_info *fs_info = trans->fs_info;
2497 	struct btrfs_path *dst_path = NULL;
2498 	int level;
2499 	int ret;
2500 
2501 	if (!btrfs_qgroup_full_accounting(fs_info))
2502 		return 0;
2503 
2504 	/* Wrong parameter order */
2505 	if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) {
2506 		btrfs_err_rl(fs_info,
2507 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2508 			     btrfs_header_generation(src_eb),
2509 			     btrfs_header_generation(dst_eb));
2510 		return -EUCLEAN;
2511 	}
2512 
2513 	if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) {
2514 		ret = -EIO;
2515 		goto out;
2516 	}
2517 
2518 	level = btrfs_header_level(dst_eb);
2519 	dst_path = btrfs_alloc_path();
2520 	if (!dst_path) {
2521 		ret = -ENOMEM;
2522 		goto out;
2523 	}
2524 	/* For dst_path */
2525 	refcount_inc(&dst_eb->refs);
2526 	dst_path->nodes[level] = dst_eb;
2527 	dst_path->slots[level] = 0;
2528 	dst_path->locks[level] = 0;
2529 
2530 	/* Do the generation aware breadth-first search */
2531 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2532 					      level, last_snapshot, trace_leaf);
2533 	if (ret < 0)
2534 		goto out;
2535 	ret = 0;
2536 
2537 out:
2538 	btrfs_free_path(dst_path);
2539 	if (ret < 0)
2540 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
2541 	return ret;
2542 }
2543 
2544 /*
2545  * Inform qgroup to trace a whole subtree, including all its child tree
2546  * blocks and data.
2547  * The root tree block is specified by @root_eb.
2548  *
2549  * Normally used by relocation(tree block swap) and subvolume deletion.
2550  *
2551  * Return 0 for success
2552  * Return <0 for error(ENOMEM or tree search error)
2553  */
2554 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2555 			       struct extent_buffer *root_eb,
2556 			       u64 root_gen, int root_level)
2557 {
2558 	struct btrfs_fs_info *fs_info = trans->fs_info;
2559 	int ret = 0;
2560 	int level;
2561 	u8 drop_subptree_thres;
2562 	struct extent_buffer *eb = root_eb;
2563 	BTRFS_PATH_AUTO_FREE(path);
2564 
2565 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2566 	ASSERT(root_eb != NULL);
2567 
2568 	if (!btrfs_qgroup_full_accounting(fs_info))
2569 		return 0;
2570 
2571 	spin_lock(&fs_info->qgroup_lock);
2572 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2573 	spin_unlock(&fs_info->qgroup_lock);
2574 
2575 	/*
2576 	 * This function only gets called for snapshot drop, if we hit a high
2577 	 * node here, it means we are going to change ownership for quite a lot
2578 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2579 	 *
2580 	 * So here if we find a high tree here, we just skip the accounting and
2581 	 * mark qgroup inconsistent.
2582 	 */
2583 	if (root_level >= drop_subptree_thres) {
2584 		qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
2585 		return 0;
2586 	}
2587 
2588 	if (!extent_buffer_uptodate(root_eb)) {
2589 		struct btrfs_tree_parent_check check = {
2590 			.transid = root_gen,
2591 			.level = root_level
2592 		};
2593 
2594 		ret = btrfs_read_extent_buffer(root_eb, &check);
2595 		if (ret)
2596 			return ret;
2597 	}
2598 
2599 	if (root_level == 0) {
2600 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2601 		return ret;
2602 	}
2603 
2604 	path = btrfs_alloc_path();
2605 	if (!path)
2606 		return -ENOMEM;
2607 
2608 	/*
2609 	 * Walk down the tree.  Missing extent blocks are filled in as
2610 	 * we go. Metadata is accounted every time we read a new
2611 	 * extent block.
2612 	 *
2613 	 * When we reach a leaf, we account for file extent items in it,
2614 	 * walk back up the tree (adjusting slot pointers as we go)
2615 	 * and restart the search process.
2616 	 */
2617 	refcount_inc(&root_eb->refs);	/* For path */
2618 	path->nodes[root_level] = root_eb;
2619 	path->slots[root_level] = 0;
2620 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2621 walk_down:
2622 	level = root_level;
2623 	while (level >= 0) {
2624 		if (path->nodes[level] == NULL) {
2625 			int parent_slot;
2626 			u64 child_bytenr;
2627 
2628 			/*
2629 			 * We need to get child blockptr from parent before we
2630 			 * can read it.
2631 			  */
2632 			eb = path->nodes[level + 1];
2633 			parent_slot = path->slots[level + 1];
2634 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2635 
2636 			eb = btrfs_read_node_slot(eb, parent_slot);
2637 			if (IS_ERR(eb))
2638 				return PTR_ERR(eb);
2639 
2640 			path->nodes[level] = eb;
2641 			path->slots[level] = 0;
2642 
2643 			btrfs_tree_read_lock(eb);
2644 			path->locks[level] = BTRFS_READ_LOCK;
2645 
2646 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2647 							fs_info->nodesize);
2648 			if (ret)
2649 				return ret;
2650 		}
2651 
2652 		if (level == 0) {
2653 			ret = btrfs_qgroup_trace_leaf_items(trans,
2654 							    path->nodes[level]);
2655 			if (ret)
2656 				return ret;
2657 
2658 			/* Nonzero return here means we completed our search */
2659 			ret = adjust_slots_upwards(path, root_level);
2660 			if (ret)
2661 				break;
2662 
2663 			/* Restart search with new slots */
2664 			goto walk_down;
2665 		}
2666 
2667 		level--;
2668 	}
2669 
2670 	return 0;
2671 }
2672 
2673 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2674 {
2675 	if (!list_empty(&qgroup->nested_iterator))
2676 		return;
2677 
2678 	list_add_tail(&qgroup->nested_iterator, head);
2679 }
2680 
2681 static void qgroup_iterator_nested_clean(struct list_head *head)
2682 {
2683 	while (!list_empty(head)) {
2684 		struct btrfs_qgroup *qgroup;
2685 
2686 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2687 		list_del_init(&qgroup->nested_iterator);
2688 	}
2689 }
2690 
2691 #define UPDATE_NEW	0
2692 #define UPDATE_OLD	1
2693 /*
2694  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2695  */
2696 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2697 				 struct ulist *roots, struct list_head *qgroups,
2698 				 u64 seq, bool update_old)
2699 {
2700 	struct ulist_node *unode;
2701 	struct ulist_iterator uiter;
2702 	struct btrfs_qgroup *qg;
2703 
2704 	if (!roots)
2705 		return;
2706 	ULIST_ITER_INIT(&uiter);
2707 	while ((unode = ulist_next(roots, &uiter))) {
2708 		LIST_HEAD(tmp);
2709 
2710 		qg = find_qgroup_rb(fs_info, unode->val);
2711 		if (!qg)
2712 			continue;
2713 
2714 		qgroup_iterator_nested_add(qgroups, qg);
2715 		qgroup_iterator_add(&tmp, qg);
2716 		list_for_each_entry(qg, &tmp, iterator) {
2717 			struct btrfs_qgroup_list *glist;
2718 
2719 			if (update_old)
2720 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2721 			else
2722 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2723 
2724 			list_for_each_entry(glist, &qg->groups, next_group) {
2725 				qgroup_iterator_nested_add(qgroups, glist->group);
2726 				qgroup_iterator_add(&tmp, glist->group);
2727 			}
2728 		}
2729 		qgroup_iterator_clean(&tmp);
2730 	}
2731 }
2732 
2733 /*
2734  * Update qgroup rfer/excl counters.
2735  * Rfer update is easy, codes can explain themselves.
2736  *
2737  * Excl update is tricky, the update is split into 2 parts.
2738  * Part 1: Possible exclusive <-> sharing detect:
2739  *	|	A	|	!A	|
2740  *  -------------------------------------
2741  *  B	|	*	|	-	|
2742  *  -------------------------------------
2743  *  !B	|	+	|	**	|
2744  *  -------------------------------------
2745  *
2746  * Conditions:
2747  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2748  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2749  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2750  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2751  *
2752  * Results:
2753  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2754  * *: Definitely not changed.		**: Possible unchanged.
2755  *
2756  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2757  *
2758  * To make the logic clear, we first use condition A and B to split
2759  * combination into 4 results.
2760  *
2761  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2762  * only on variant maybe 0.
2763  *
2764  * Lastly, check result **, since there are 2 variants maybe 0, split them
2765  * again(2x2).
2766  * But this time we don't need to consider other things, the codes and logic
2767  * is easy to understand now.
2768  */
2769 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2770 				   struct list_head *qgroups, u64 nr_old_roots,
2771 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2772 {
2773 	struct btrfs_qgroup *qg;
2774 
2775 	list_for_each_entry(qg, qgroups, nested_iterator) {
2776 		u64 cur_new_count, cur_old_count;
2777 		bool dirty = false;
2778 
2779 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2780 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2781 
2782 		trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
2783 						   cur_new_count);
2784 
2785 		/* Rfer update part */
2786 		if (cur_old_count == 0 && cur_new_count > 0) {
2787 			qg->rfer += num_bytes;
2788 			qg->rfer_cmpr += num_bytes;
2789 			dirty = true;
2790 		}
2791 		if (cur_old_count > 0 && cur_new_count == 0) {
2792 			qg->rfer -= num_bytes;
2793 			qg->rfer_cmpr -= num_bytes;
2794 			dirty = true;
2795 		}
2796 
2797 		/* Excl update part */
2798 		/* Exclusive/none -> shared case */
2799 		if (cur_old_count == nr_old_roots &&
2800 		    cur_new_count < nr_new_roots) {
2801 			/* Exclusive -> shared */
2802 			if (cur_old_count != 0) {
2803 				qg->excl -= num_bytes;
2804 				qg->excl_cmpr -= num_bytes;
2805 				dirty = true;
2806 			}
2807 		}
2808 
2809 		/* Shared -> exclusive/none case */
2810 		if (cur_old_count < nr_old_roots &&
2811 		    cur_new_count == nr_new_roots) {
2812 			/* Shared->exclusive */
2813 			if (cur_new_count != 0) {
2814 				qg->excl += num_bytes;
2815 				qg->excl_cmpr += num_bytes;
2816 				dirty = true;
2817 			}
2818 		}
2819 
2820 		/* Exclusive/none -> exclusive/none case */
2821 		if (cur_old_count == nr_old_roots &&
2822 		    cur_new_count == nr_new_roots) {
2823 			if (cur_old_count == 0) {
2824 				/* None -> exclusive/none */
2825 
2826 				if (cur_new_count != 0) {
2827 					/* None -> exclusive */
2828 					qg->excl += num_bytes;
2829 					qg->excl_cmpr += num_bytes;
2830 					dirty = true;
2831 				}
2832 				/* None -> none, nothing changed */
2833 			} else {
2834 				/* Exclusive -> exclusive/none */
2835 
2836 				if (cur_new_count == 0) {
2837 					/* Exclusive -> none */
2838 					qg->excl -= num_bytes;
2839 					qg->excl_cmpr -= num_bytes;
2840 					dirty = true;
2841 				}
2842 				/* Exclusive -> exclusive, nothing changed */
2843 			}
2844 		}
2845 
2846 		if (dirty)
2847 			qgroup_dirty(fs_info, qg);
2848 	}
2849 }
2850 
2851 /*
2852  * Check if the @roots potentially is a list of fs tree roots
2853  *
2854  * Return 0 for definitely not a fs/subvol tree roots ulist
2855  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2856  *          one as well)
2857  */
2858 static int maybe_fs_roots(struct ulist *roots)
2859 {
2860 	struct ulist_node *unode;
2861 	struct ulist_iterator uiter;
2862 
2863 	/* Empty one, still possible for fs roots */
2864 	if (!roots || roots->nnodes == 0)
2865 		return 1;
2866 
2867 	ULIST_ITER_INIT(&uiter);
2868 	unode = ulist_next(roots, &uiter);
2869 	if (!unode)
2870 		return 1;
2871 
2872 	/*
2873 	 * If it contains fs tree roots, then it must belong to fs/subvol
2874 	 * trees.
2875 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2876 	 */
2877 	return btrfs_is_fstree(unode->val);
2878 }
2879 
2880 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2881 				u64 num_bytes, struct ulist *old_roots,
2882 				struct ulist *new_roots)
2883 {
2884 	struct btrfs_fs_info *fs_info = trans->fs_info;
2885 	LIST_HEAD(qgroups);
2886 	u64 seq;
2887 	u64 nr_new_roots = 0;
2888 	u64 nr_old_roots = 0;
2889 	int ret = 0;
2890 
2891 	/*
2892 	 * If quotas get disabled meanwhile, the resources need to be freed and
2893 	 * we can't just exit here.
2894 	 */
2895 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2896 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2897 		goto out_free;
2898 
2899 	if (new_roots) {
2900 		if (!maybe_fs_roots(new_roots))
2901 			goto out_free;
2902 		nr_new_roots = new_roots->nnodes;
2903 	}
2904 	if (old_roots) {
2905 		if (!maybe_fs_roots(old_roots))
2906 			goto out_free;
2907 		nr_old_roots = old_roots->nnodes;
2908 	}
2909 
2910 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2911 	if (nr_old_roots == 0 && nr_new_roots == 0)
2912 		goto out_free;
2913 
2914 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2915 					num_bytes, nr_old_roots, nr_new_roots);
2916 
2917 	mutex_lock(&fs_info->qgroup_rescan_lock);
2918 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2919 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2920 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2921 			ret = 0;
2922 			goto out_free;
2923 		}
2924 	}
2925 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2926 
2927 	spin_lock(&fs_info->qgroup_lock);
2928 	seq = fs_info->qgroup_seq;
2929 
2930 	/* Update old refcnts using old_roots */
2931 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2932 
2933 	/* Update new refcnts using new_roots */
2934 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2935 
2936 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2937 			       num_bytes, seq);
2938 
2939 	/*
2940 	 * We're done using the iterator, release all its qgroups while holding
2941 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
2942 	 * and trigger use-after-free accesses to qgroups.
2943 	 */
2944 	qgroup_iterator_nested_clean(&qgroups);
2945 
2946 	/*
2947 	 * Bump qgroup_seq to avoid seq overlap
2948 	 */
2949 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2950 	spin_unlock(&fs_info->qgroup_lock);
2951 out_free:
2952 	ulist_free(old_roots);
2953 	ulist_free(new_roots);
2954 	return ret;
2955 }
2956 
2957 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2958 {
2959 	struct btrfs_fs_info *fs_info = trans->fs_info;
2960 	struct btrfs_qgroup_extent_record *record;
2961 	struct btrfs_delayed_ref_root *delayed_refs;
2962 	struct ulist *new_roots = NULL;
2963 	unsigned long index;
2964 	u64 num_dirty_extents = 0;
2965 	u64 qgroup_to_skip;
2966 	int ret = 0;
2967 
2968 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
2969 		return 0;
2970 
2971 	delayed_refs = &trans->transaction->delayed_refs;
2972 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2973 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
2974 		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
2975 
2976 		num_dirty_extents++;
2977 		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
2978 
2979 		if (!ret && !(fs_info->qgroup_flags &
2980 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
2981 			struct btrfs_backref_walk_ctx ctx = { 0 };
2982 
2983 			ctx.bytenr = bytenr;
2984 			ctx.fs_info = fs_info;
2985 
2986 			/*
2987 			 * Old roots should be searched when inserting qgroup
2988 			 * extent record.
2989 			 *
2990 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
2991 			 * we may have some record inserted during
2992 			 * NO_ACCOUNTING (thus no old_roots populated), but
2993 			 * later we start rescan, which clears NO_ACCOUNTING,
2994 			 * leaving some inserted records without old_roots
2995 			 * populated.
2996 			 *
2997 			 * Those cases are rare and should not cause too much
2998 			 * time spent during commit_transaction().
2999 			 */
3000 			if (!record->old_roots) {
3001 				/* Search commit root to find old_roots */
3002 				ret = btrfs_find_all_roots(&ctx, false);
3003 				if (ret < 0)
3004 					goto cleanup;
3005 				record->old_roots = ctx.roots;
3006 				ctx.roots = NULL;
3007 			}
3008 
3009 			/*
3010 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3011 			 * which doesn't lock tree or delayed_refs and search
3012 			 * current root. It's safe inside commit_transaction().
3013 			 */
3014 			ctx.trans = trans;
3015 			ctx.time_seq = BTRFS_SEQ_LAST;
3016 			ret = btrfs_find_all_roots(&ctx, false);
3017 			if (ret < 0)
3018 				goto cleanup;
3019 			new_roots = ctx.roots;
3020 			if (qgroup_to_skip) {
3021 				ulist_del(new_roots, qgroup_to_skip, 0);
3022 				ulist_del(record->old_roots, qgroup_to_skip,
3023 					  0);
3024 			}
3025 			ret = btrfs_qgroup_account_extent(trans, bytenr,
3026 							  record->num_bytes,
3027 							  record->old_roots,
3028 							  new_roots);
3029 			record->old_roots = NULL;
3030 			new_roots = NULL;
3031 		}
3032 		/* Free the reserved data space */
3033 		btrfs_qgroup_free_refroot(fs_info,
3034 				record->data_rsv_refroot,
3035 				record->data_rsv,
3036 				BTRFS_QGROUP_RSV_DATA);
3037 cleanup:
3038 		ulist_free(record->old_roots);
3039 		ulist_free(new_roots);
3040 		new_roots = NULL;
3041 		xa_erase(&delayed_refs->dirty_extents, index);
3042 		kfree(record);
3043 
3044 	}
3045 	trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
3046 	return ret;
3047 }
3048 
3049 /*
3050  * Writes all changed qgroups to disk.
3051  * Called by the transaction commit path and the qgroup assign ioctl.
3052  */
3053 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3054 {
3055 	struct btrfs_fs_info *fs_info = trans->fs_info;
3056 	int ret = 0;
3057 
3058 	/*
3059 	 * In case we are called from the qgroup assign ioctl, assert that we
3060 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3061 	 * disable operation (ioctl) and access a freed quota root.
3062 	 */
3063 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3064 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3065 
3066 	if (!fs_info->quota_root)
3067 		return ret;
3068 
3069 	spin_lock(&fs_info->qgroup_lock);
3070 	while (!list_empty(&fs_info->dirty_qgroups)) {
3071 		struct btrfs_qgroup *qgroup;
3072 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3073 					  struct btrfs_qgroup, dirty);
3074 		list_del_init(&qgroup->dirty);
3075 		spin_unlock(&fs_info->qgroup_lock);
3076 		ret = update_qgroup_info_item(trans, qgroup);
3077 		if (ret)
3078 			qgroup_mark_inconsistent(fs_info,
3079 						 "qgroup info item update error %d", ret);
3080 		ret = update_qgroup_limit_item(trans, qgroup);
3081 		if (ret)
3082 			qgroup_mark_inconsistent(fs_info,
3083 						 "qgroup limit item update error %d", ret);
3084 		spin_lock(&fs_info->qgroup_lock);
3085 	}
3086 	if (btrfs_qgroup_enabled(fs_info))
3087 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3088 	else
3089 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3090 	spin_unlock(&fs_info->qgroup_lock);
3091 
3092 	ret = update_qgroup_status_item(trans);
3093 	if (ret)
3094 		qgroup_mark_inconsistent(fs_info,
3095 					 "qgroup status item update error %d", ret);
3096 
3097 	return ret;
3098 }
3099 
3100 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3101 			       struct btrfs_qgroup_inherit *inherit,
3102 			       size_t size)
3103 {
3104 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3105 		return -EOPNOTSUPP;
3106 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3107 		return -EINVAL;
3108 
3109 	/*
3110 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3111 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3112 	 * been disabled in userspace for a very long time, but here we should
3113 	 * also disable it in kernel, as this behavior is known to mark qgroup
3114 	 * inconsistent, and a rescan would wipe out the changes anyway.
3115 	 *
3116 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3117 	 */
3118 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3119 		return -EINVAL;
3120 
3121 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3122 		return -EINVAL;
3123 
3124 	/*
3125 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3126 	 * Qgroup can still be later enabled causing problems, but in that case
3127 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3128 	 */
3129 	if (!btrfs_qgroup_enabled(fs_info))
3130 		return 0;
3131 
3132 	/*
3133 	 * Now check all the remaining qgroups, they should all:
3134 	 *
3135 	 * - Exist
3136 	 * - Be higher level qgroups.
3137 	 */
3138 	for (int i = 0; i < inherit->num_qgroups; i++) {
3139 		struct btrfs_qgroup *qgroup;
3140 		u64 qgroupid = inherit->qgroups[i];
3141 
3142 		if (btrfs_qgroup_level(qgroupid) == 0)
3143 			return -EINVAL;
3144 
3145 		spin_lock(&fs_info->qgroup_lock);
3146 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3147 		if (!qgroup) {
3148 			spin_unlock(&fs_info->qgroup_lock);
3149 			return -ENOENT;
3150 		}
3151 		spin_unlock(&fs_info->qgroup_lock);
3152 	}
3153 	return 0;
3154 }
3155 
3156 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3157 			       u64 inode_rootid,
3158 			       struct btrfs_qgroup_inherit **inherit)
3159 {
3160 	int i = 0;
3161 	u64 num_qgroups = 0;
3162 	struct btrfs_qgroup *inode_qg;
3163 	struct btrfs_qgroup_list *qg_list;
3164 	struct btrfs_qgroup_inherit *res;
3165 	size_t struct_sz;
3166 	u64 *qgids;
3167 
3168 	if (*inherit)
3169 		return -EEXIST;
3170 
3171 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3172 	if (!inode_qg)
3173 		return -ENOENT;
3174 
3175 	num_qgroups = list_count_nodes(&inode_qg->groups);
3176 
3177 	if (!num_qgroups)
3178 		return 0;
3179 
3180 	struct_sz = struct_size(res, qgroups, num_qgroups);
3181 	if (struct_sz == SIZE_MAX)
3182 		return -ERANGE;
3183 
3184 	res = kzalloc(struct_sz, GFP_NOFS);
3185 	if (!res)
3186 		return -ENOMEM;
3187 	res->num_qgroups = num_qgroups;
3188 	qgids = res->qgroups;
3189 
3190 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3191 		qgids[i++] = qg_list->group->qgroupid;
3192 
3193 	*inherit = res;
3194 	return 0;
3195 }
3196 
3197 /*
3198  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3199  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3200  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3201  *
3202  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3203  * Return 0 if a quick inherit is done.
3204  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3205  */
3206 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3207 					 u64 srcid, u64 parentid)
3208 {
3209 	struct btrfs_qgroup *src;
3210 	struct btrfs_qgroup *parent;
3211 	struct btrfs_qgroup_list *list;
3212 	int nr_parents = 0;
3213 
3214 	src = find_qgroup_rb(fs_info, srcid);
3215 	if (!src)
3216 		return -ENOENT;
3217 	parent = find_qgroup_rb(fs_info, parentid);
3218 	if (!parent)
3219 		return -ENOENT;
3220 
3221 	/*
3222 	 * Source has no parent qgroup, but our new qgroup would have one.
3223 	 * Qgroup numbers would become inconsistent.
3224 	 */
3225 	if (list_empty(&src->groups))
3226 		return 1;
3227 
3228 	list_for_each_entry(list, &src->groups, next_group) {
3229 		/* The parent is not the same, quick update is not possible. */
3230 		if (list->group->qgroupid != parentid)
3231 			return 1;
3232 		nr_parents++;
3233 		/*
3234 		 * More than one parent qgroup, we can't be sure about accounting
3235 		 * consistency.
3236 		 */
3237 		if (nr_parents > 1)
3238 			return 1;
3239 	}
3240 
3241 	/*
3242 	 * The parent is not exclusively owning all its bytes.  We're not sure
3243 	 * if the source has any bytes not fully owned by the parent.
3244 	 */
3245 	if (parent->excl != parent->rfer)
3246 		return 1;
3247 
3248 	parent->excl += fs_info->nodesize;
3249 	parent->rfer += fs_info->nodesize;
3250 	return 0;
3251 }
3252 
3253 /*
3254  * Copy the accounting information between qgroups. This is necessary
3255  * when a snapshot or a subvolume is created. Throwing an error will
3256  * cause a transaction abort so we take extra care here to only error
3257  * when a readonly fs is a reasonable outcome.
3258  */
3259 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3260 			 u64 objectid, u64 inode_rootid,
3261 			 struct btrfs_qgroup_inherit *inherit)
3262 {
3263 	int ret = 0;
3264 	u64 *i_qgroups;
3265 	bool committing = false;
3266 	struct btrfs_fs_info *fs_info = trans->fs_info;
3267 	struct btrfs_root *quota_root;
3268 	struct btrfs_qgroup *srcgroup;
3269 	struct btrfs_qgroup *dstgroup;
3270 	struct btrfs_qgroup *prealloc;
3271 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3272 	bool free_inherit = false;
3273 	bool need_rescan = false;
3274 	u32 level_size = 0;
3275 	u64 nums;
3276 
3277 	if (!btrfs_qgroup_enabled(fs_info))
3278 		return 0;
3279 
3280 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3281 	if (!prealloc)
3282 		return -ENOMEM;
3283 
3284 	/*
3285 	 * There are only two callers of this function.
3286 	 *
3287 	 * One in create_subvol() in the ioctl context, which needs to hold
3288 	 * the qgroup_ioctl_lock.
3289 	 *
3290 	 * The other one in create_pending_snapshot() where no other qgroup
3291 	 * code can modify the fs as they all need to either start a new trans
3292 	 * or hold a trans handler, thus we don't need to hold
3293 	 * qgroup_ioctl_lock.
3294 	 * This would avoid long and complex lock chain and make lockdep happy.
3295 	 */
3296 	spin_lock(&fs_info->trans_lock);
3297 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3298 		committing = true;
3299 	spin_unlock(&fs_info->trans_lock);
3300 
3301 	if (!committing)
3302 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3303 
3304 	quota_root = fs_info->quota_root;
3305 	if (!quota_root) {
3306 		ret = -EINVAL;
3307 		goto out;
3308 	}
3309 
3310 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3311 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3312 		if (ret)
3313 			goto out;
3314 		free_inherit = true;
3315 	}
3316 
3317 	if (inherit) {
3318 		i_qgroups = (u64 *)(inherit + 1);
3319 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3320 		       2 * inherit->num_excl_copies;
3321 		for (int i = 0; i < nums; i++) {
3322 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3323 
3324 			/*
3325 			 * Zero out invalid groups so we can ignore
3326 			 * them later.
3327 			 */
3328 			if (!srcgroup ||
3329 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3330 				*i_qgroups = 0ULL;
3331 
3332 			++i_qgroups;
3333 		}
3334 	}
3335 
3336 	/*
3337 	 * create a tracking group for the subvol itself
3338 	 */
3339 	ret = add_qgroup_item(trans, quota_root, objectid);
3340 	if (ret)
3341 		goto out;
3342 
3343 	/*
3344 	 * add qgroup to all inherited groups
3345 	 */
3346 	if (inherit) {
3347 		i_qgroups = (u64 *)(inherit + 1);
3348 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3349 			if (*i_qgroups == 0)
3350 				continue;
3351 			ret = add_qgroup_relation_item(trans, objectid,
3352 						       *i_qgroups);
3353 			if (ret && ret != -EEXIST)
3354 				goto out;
3355 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3356 						       objectid);
3357 			if (ret && ret != -EEXIST)
3358 				goto out;
3359 		}
3360 		ret = 0;
3361 
3362 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3363 					 sizeof(struct btrfs_qgroup_list *),
3364 					 GFP_NOFS);
3365 		if (!qlist_prealloc) {
3366 			ret = -ENOMEM;
3367 			goto out;
3368 		}
3369 		for (int i = 0; i < inherit->num_qgroups; i++) {
3370 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3371 						    GFP_NOFS);
3372 			if (!qlist_prealloc[i]) {
3373 				ret = -ENOMEM;
3374 				goto out;
3375 			}
3376 		}
3377 	}
3378 
3379 	spin_lock(&fs_info->qgroup_lock);
3380 
3381 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3382 	prealloc = NULL;
3383 
3384 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3385 		dstgroup->lim_flags = inherit->lim.flags;
3386 		dstgroup->max_rfer = inherit->lim.max_rfer;
3387 		dstgroup->max_excl = inherit->lim.max_excl;
3388 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3389 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3390 
3391 		qgroup_dirty(fs_info, dstgroup);
3392 	}
3393 
3394 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3395 		srcgroup = find_qgroup_rb(fs_info, srcid);
3396 		if (!srcgroup)
3397 			goto unlock;
3398 
3399 		/*
3400 		 * We call inherit after we clone the root in order to make sure
3401 		 * our counts don't go crazy, so at this point the only
3402 		 * difference between the two roots should be the root node.
3403 		 */
3404 		level_size = fs_info->nodesize;
3405 		dstgroup->rfer = srcgroup->rfer;
3406 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3407 		dstgroup->excl = level_size;
3408 		dstgroup->excl_cmpr = level_size;
3409 		srcgroup->excl = level_size;
3410 		srcgroup->excl_cmpr = level_size;
3411 
3412 		/* inherit the limit info */
3413 		dstgroup->lim_flags = srcgroup->lim_flags;
3414 		dstgroup->max_rfer = srcgroup->max_rfer;
3415 		dstgroup->max_excl = srcgroup->max_excl;
3416 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3417 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3418 
3419 		qgroup_dirty(fs_info, dstgroup);
3420 		qgroup_dirty(fs_info, srcgroup);
3421 
3422 		/*
3423 		 * If the source qgroup has parent but the new one doesn't,
3424 		 * we need a full rescan.
3425 		 */
3426 		if (!inherit && !list_empty(&srcgroup->groups))
3427 			need_rescan = true;
3428 	}
3429 
3430 	if (!inherit)
3431 		goto unlock;
3432 
3433 	i_qgroups = (u64 *)(inherit + 1);
3434 	for (int i = 0; i < inherit->num_qgroups; i++) {
3435 		if (*i_qgroups) {
3436 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3437 					      *i_qgroups);
3438 			qlist_prealloc[i] = NULL;
3439 			if (ret)
3440 				goto unlock;
3441 		}
3442 		if (srcid) {
3443 			/* Check if we can do a quick inherit. */
3444 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3445 			if (ret < 0)
3446 				goto unlock;
3447 			if (ret > 0)
3448 				need_rescan = true;
3449 			ret = 0;
3450 		}
3451 		++i_qgroups;
3452 	}
3453 
3454 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3455 		struct btrfs_qgroup *src;
3456 		struct btrfs_qgroup *dst;
3457 
3458 		if (!i_qgroups[0] || !i_qgroups[1])
3459 			continue;
3460 
3461 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3462 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3463 
3464 		if (!src || !dst) {
3465 			ret = -EINVAL;
3466 			goto unlock;
3467 		}
3468 
3469 		dst->rfer = src->rfer - level_size;
3470 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3471 
3472 		/* Manually tweaking numbers certainly needs a rescan */
3473 		need_rescan = true;
3474 	}
3475 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3476 		struct btrfs_qgroup *src;
3477 		struct btrfs_qgroup *dst;
3478 
3479 		if (!i_qgroups[0] || !i_qgroups[1])
3480 			continue;
3481 
3482 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3483 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3484 
3485 		if (!src || !dst) {
3486 			ret = -EINVAL;
3487 			goto unlock;
3488 		}
3489 
3490 		dst->excl = src->excl + level_size;
3491 		dst->excl_cmpr = src->excl_cmpr + level_size;
3492 		need_rescan = true;
3493 	}
3494 
3495 unlock:
3496 	spin_unlock(&fs_info->qgroup_lock);
3497 	if (!ret)
3498 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3499 out:
3500 	if (!committing)
3501 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3502 	if (need_rescan)
3503 		qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
3504 	if (qlist_prealloc) {
3505 		for (int i = 0; i < inherit->num_qgroups; i++)
3506 			kfree(qlist_prealloc[i]);
3507 		kfree(qlist_prealloc);
3508 	}
3509 	if (free_inherit)
3510 		kfree(inherit);
3511 	kfree(prealloc);
3512 	return ret;
3513 }
3514 
3515 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3516 {
3517 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3518 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3519 		return false;
3520 
3521 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3522 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3523 		return false;
3524 
3525 	return true;
3526 }
3527 
3528 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3529 			  enum btrfs_qgroup_rsv_type type)
3530 {
3531 	struct btrfs_qgroup *qgroup;
3532 	struct btrfs_fs_info *fs_info = root->fs_info;
3533 	u64 ref_root = btrfs_root_id(root);
3534 	int ret = 0;
3535 	LIST_HEAD(qgroup_list);
3536 
3537 	if (!btrfs_is_fstree(ref_root))
3538 		return 0;
3539 
3540 	if (num_bytes == 0)
3541 		return 0;
3542 
3543 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3544 	    capable(CAP_SYS_RESOURCE))
3545 		enforce = false;
3546 
3547 	spin_lock(&fs_info->qgroup_lock);
3548 	if (!fs_info->quota_root)
3549 		goto out;
3550 
3551 	qgroup = find_qgroup_rb(fs_info, ref_root);
3552 	if (!qgroup)
3553 		goto out;
3554 
3555 	qgroup_iterator_add(&qgroup_list, qgroup);
3556 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3557 		struct btrfs_qgroup_list *glist;
3558 
3559 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3560 			ret = -EDQUOT;
3561 			goto out;
3562 		}
3563 
3564 		list_for_each_entry(glist, &qgroup->groups, next_group)
3565 			qgroup_iterator_add(&qgroup_list, glist->group);
3566 	}
3567 
3568 	ret = 0;
3569 	/*
3570 	 * no limits exceeded, now record the reservation into all qgroups
3571 	 */
3572 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3573 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3574 
3575 out:
3576 	qgroup_iterator_clean(&qgroup_list);
3577 	spin_unlock(&fs_info->qgroup_lock);
3578 	return ret;
3579 }
3580 
3581 /*
3582  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3583  * qgroup).
3584  *
3585  * Will handle all higher level qgroup too.
3586  *
3587  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3588  * This special case is only used for META_PERTRANS type.
3589  */
3590 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3591 			       u64 ref_root, u64 num_bytes,
3592 			       enum btrfs_qgroup_rsv_type type)
3593 {
3594 	struct btrfs_qgroup *qgroup;
3595 	LIST_HEAD(qgroup_list);
3596 
3597 	if (!btrfs_is_fstree(ref_root))
3598 		return;
3599 
3600 	if (num_bytes == 0)
3601 		return;
3602 
3603 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3604 		WARN(1, "%s: Invalid type to free", __func__);
3605 		return;
3606 	}
3607 	spin_lock(&fs_info->qgroup_lock);
3608 
3609 	if (!fs_info->quota_root)
3610 		goto out;
3611 
3612 	qgroup = find_qgroup_rb(fs_info, ref_root);
3613 	if (!qgroup)
3614 		goto out;
3615 
3616 	if (num_bytes == (u64)-1)
3617 		/*
3618 		 * We're freeing all pertrans rsv, get reserved value from
3619 		 * level 0 qgroup as real num_bytes to free.
3620 		 */
3621 		num_bytes = qgroup->rsv.values[type];
3622 
3623 	qgroup_iterator_add(&qgroup_list, qgroup);
3624 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3625 		struct btrfs_qgroup_list *glist;
3626 
3627 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3628 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3629 			qgroup_iterator_add(&qgroup_list, glist->group);
3630 		}
3631 	}
3632 out:
3633 	qgroup_iterator_clean(&qgroup_list);
3634 	spin_unlock(&fs_info->qgroup_lock);
3635 }
3636 
3637 /*
3638  * Check if the leaf is the last leaf. Which means all node pointers
3639  * are at their last position.
3640  */
3641 static bool is_last_leaf(struct btrfs_path *path)
3642 {
3643 	int i;
3644 
3645 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3646 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3647 			return false;
3648 	}
3649 	return true;
3650 }
3651 
3652 /*
3653  * returns < 0 on error, 0 when more leafs are to be scanned.
3654  * returns 1 when done.
3655  */
3656 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3657 			      struct btrfs_path *path)
3658 {
3659 	struct btrfs_fs_info *fs_info = trans->fs_info;
3660 	struct btrfs_root *extent_root;
3661 	struct btrfs_key found;
3662 	struct extent_buffer *scratch_leaf = NULL;
3663 	u64 num_bytes;
3664 	bool done;
3665 	int slot;
3666 	int ret;
3667 
3668 	if (!btrfs_qgroup_full_accounting(fs_info))
3669 		return 1;
3670 
3671 	mutex_lock(&fs_info->qgroup_rescan_lock);
3672 	extent_root = btrfs_extent_root(fs_info,
3673 				fs_info->qgroup_rescan_progress.objectid);
3674 	ret = btrfs_search_slot_for_read(extent_root,
3675 					 &fs_info->qgroup_rescan_progress,
3676 					 path, 1, 0);
3677 
3678 	btrfs_debug(fs_info,
3679 		    "current progress key " BTRFS_KEY_FMT ", search_slot ret %d",
3680 		    BTRFS_KEY_FMT_VALUE(&fs_info->qgroup_rescan_progress), ret);
3681 
3682 	if (ret) {
3683 		/*
3684 		 * The rescan is about to end, we will not be scanning any
3685 		 * further blocks. We cannot unset the RESCAN flag here, because
3686 		 * we want to commit the transaction if everything went well.
3687 		 * To make the live accounting work in this phase, we set our
3688 		 * scan progress pointer such that every real extent objectid
3689 		 * will be smaller.
3690 		 */
3691 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3692 		btrfs_release_path(path);
3693 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3694 		return ret;
3695 	}
3696 	done = is_last_leaf(path);
3697 
3698 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3699 			      btrfs_header_nritems(path->nodes[0]) - 1);
3700 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3701 
3702 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3703 	if (!scratch_leaf) {
3704 		ret = -ENOMEM;
3705 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3706 		goto out;
3707 	}
3708 	slot = path->slots[0];
3709 	btrfs_release_path(path);
3710 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3711 
3712 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3713 		struct btrfs_backref_walk_ctx ctx = { 0 };
3714 
3715 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3716 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3717 		    found.type != BTRFS_METADATA_ITEM_KEY)
3718 			continue;
3719 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3720 			num_bytes = fs_info->nodesize;
3721 		else
3722 			num_bytes = found.offset;
3723 
3724 		ctx.bytenr = found.objectid;
3725 		ctx.fs_info = fs_info;
3726 
3727 		ret = btrfs_find_all_roots(&ctx, false);
3728 		if (ret < 0)
3729 			goto out;
3730 		/* For rescan, just pass old_roots as NULL */
3731 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3732 						  num_bytes, NULL, ctx.roots);
3733 		if (ret < 0)
3734 			goto out;
3735 	}
3736 out:
3737 	if (scratch_leaf)
3738 		free_extent_buffer(scratch_leaf);
3739 
3740 	if (done && !ret) {
3741 		ret = 1;
3742 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3743 	}
3744 	return ret;
3745 }
3746 
3747 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3748 {
3749 	if (btrfs_fs_closing(fs_info))
3750 		return true;
3751 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3752 		return true;
3753 	if (!btrfs_qgroup_enabled(fs_info))
3754 		return true;
3755 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3756 		return true;
3757 	return false;
3758 }
3759 
3760 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3761 {
3762 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3763 						     qgroup_rescan_work);
3764 	struct btrfs_path *path;
3765 	struct btrfs_trans_handle *trans = NULL;
3766 	int ret = 0;
3767 	bool stopped = false;
3768 	bool did_leaf_rescans = false;
3769 
3770 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3771 		return;
3772 
3773 	path = btrfs_alloc_path();
3774 	if (!path) {
3775 		ret = -ENOMEM;
3776 		goto out;
3777 	}
3778 	/*
3779 	 * Rescan should only search for commit root, and any later difference
3780 	 * should be recorded by qgroup
3781 	 */
3782 	path->search_commit_root = true;
3783 	path->skip_locking = true;
3784 
3785 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3786 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3787 		if (IS_ERR(trans)) {
3788 			ret = PTR_ERR(trans);
3789 			break;
3790 		}
3791 
3792 		ret = qgroup_rescan_leaf(trans, path);
3793 		did_leaf_rescans = true;
3794 
3795 		if (ret > 0)
3796 			btrfs_commit_transaction(trans);
3797 		else
3798 			btrfs_end_transaction(trans);
3799 	}
3800 
3801 out:
3802 	btrfs_free_path(path);
3803 
3804 	mutex_lock(&fs_info->qgroup_rescan_lock);
3805 	if (ret > 0 &&
3806 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3807 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3808 	} else if (ret < 0 || stopped) {
3809 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3810 	}
3811 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3812 
3813 	/*
3814 	 * Only update status, since the previous part has already updated the
3815 	 * qgroup info, and only if we did any actual work. This also prevents
3816 	 * race with a concurrent quota disable, which has already set
3817 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3818 	 * btrfs_quota_disable().
3819 	 */
3820 	if (did_leaf_rescans) {
3821 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3822 		if (IS_ERR(trans)) {
3823 			ret = PTR_ERR(trans);
3824 			trans = NULL;
3825 			btrfs_err(fs_info,
3826 				  "fail to start transaction for status update: %d",
3827 				  ret);
3828 		}
3829 	} else {
3830 		trans = NULL;
3831 	}
3832 
3833 	mutex_lock(&fs_info->qgroup_rescan_lock);
3834 	if (!stopped ||
3835 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3836 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3837 	if (trans) {
3838 		int ret2 = update_qgroup_status_item(trans);
3839 
3840 		if (ret2 < 0) {
3841 			ret = ret2;
3842 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3843 		}
3844 	}
3845 	fs_info->qgroup_rescan_running = false;
3846 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3847 	complete_all(&fs_info->qgroup_rescan_completion);
3848 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3849 
3850 	if (!trans)
3851 		return;
3852 
3853 	btrfs_end_transaction(trans);
3854 
3855 	if (stopped) {
3856 		btrfs_info(fs_info, "qgroup scan paused");
3857 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3858 		btrfs_info(fs_info, "qgroup scan cancelled");
3859 	} else if (ret >= 0) {
3860 		btrfs_info(fs_info, "qgroup scan completed%s",
3861 			ret > 0 ? " (inconsistency flag cleared)" : "");
3862 	} else {
3863 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3864 	}
3865 }
3866 
3867 /*
3868  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3869  * memory required for the rescan context.
3870  */
3871 static int
3872 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3873 		   int init_flags)
3874 {
3875 	int ret = 0;
3876 
3877 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3878 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3879 		return -EINVAL;
3880 	}
3881 
3882 	if (!init_flags) {
3883 		/* we're resuming qgroup rescan at mount time */
3884 		if (!(fs_info->qgroup_flags &
3885 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3886 			btrfs_debug(fs_info,
3887 			"qgroup rescan init failed, qgroup rescan is not queued");
3888 			ret = -EINVAL;
3889 		} else if (!(fs_info->qgroup_flags &
3890 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3891 			btrfs_debug(fs_info,
3892 			"qgroup rescan init failed, qgroup is not enabled");
3893 			ret = -ENOTCONN;
3894 		}
3895 
3896 		if (ret)
3897 			return ret;
3898 	}
3899 
3900 	mutex_lock(&fs_info->qgroup_rescan_lock);
3901 
3902 	if (init_flags) {
3903 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3904 			ret = -EINPROGRESS;
3905 		} else if (!(fs_info->qgroup_flags &
3906 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3907 			btrfs_debug(fs_info,
3908 			"qgroup rescan init failed, qgroup is not enabled");
3909 			ret = -ENOTCONN;
3910 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3911 			/* Quota disable is in progress */
3912 			ret = -EBUSY;
3913 		}
3914 
3915 		if (ret) {
3916 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3917 			return ret;
3918 		}
3919 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3920 	}
3921 
3922 	memset(&fs_info->qgroup_rescan_progress, 0,
3923 		sizeof(fs_info->qgroup_rescan_progress));
3924 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3925 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3926 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3927 	init_completion(&fs_info->qgroup_rescan_completion);
3928 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3929 
3930 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3931 			btrfs_qgroup_rescan_worker, NULL);
3932 	return 0;
3933 }
3934 
3935 static void
3936 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3937 {
3938 	struct rb_node *n;
3939 	struct btrfs_qgroup *qgroup;
3940 
3941 	spin_lock(&fs_info->qgroup_lock);
3942 	/* clear all current qgroup tracking information */
3943 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3944 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3945 		qgroup->rfer = 0;
3946 		qgroup->rfer_cmpr = 0;
3947 		qgroup->excl = 0;
3948 		qgroup->excl_cmpr = 0;
3949 		qgroup_dirty(fs_info, qgroup);
3950 	}
3951 	spin_unlock(&fs_info->qgroup_lock);
3952 }
3953 
3954 int
3955 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3956 {
3957 	int ret = 0;
3958 
3959 	ret = qgroup_rescan_init(fs_info, 0, 1);
3960 	if (ret)
3961 		return ret;
3962 
3963 	/*
3964 	 * We have set the rescan_progress to 0, which means no more
3965 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3966 	 * However, btrfs_qgroup_account_ref may be right after its call
3967 	 * to btrfs_find_all_roots, in which case it would still do the
3968 	 * accounting.
3969 	 * To solve this, we're committing the transaction, which will
3970 	 * ensure we run all delayed refs and only after that, we are
3971 	 * going to clear all tracking information for a clean start.
3972 	 */
3973 
3974 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
3975 	if (ret) {
3976 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3977 		return ret;
3978 	}
3979 
3980 	qgroup_rescan_zero_tracking(fs_info);
3981 
3982 	mutex_lock(&fs_info->qgroup_rescan_lock);
3983 	/*
3984 	 * The rescan worker is only for full accounting qgroups, check if it's
3985 	 * enabled as it is pointless to queue it otherwise. A concurrent quota
3986 	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
3987 	 */
3988 	if (btrfs_qgroup_full_accounting(fs_info)) {
3989 		fs_info->qgroup_rescan_running = true;
3990 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3991 				 &fs_info->qgroup_rescan_work);
3992 	} else {
3993 		ret = -ENOTCONN;
3994 	}
3995 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3996 
3997 	return ret;
3998 }
3999 
4000 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4001 				     bool interruptible)
4002 {
4003 	int running;
4004 	int ret = 0;
4005 
4006 	mutex_lock(&fs_info->qgroup_rescan_lock);
4007 	running = fs_info->qgroup_rescan_running;
4008 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4009 
4010 	if (!running)
4011 		return 0;
4012 
4013 	if (interruptible)
4014 		ret = wait_for_completion_interruptible(
4015 					&fs_info->qgroup_rescan_completion);
4016 	else
4017 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4018 
4019 	return ret;
4020 }
4021 
4022 /*
4023  * this is only called from open_ctree where we're still single threaded, thus
4024  * locking is omitted here.
4025  */
4026 void
4027 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4028 {
4029 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4030 		mutex_lock(&fs_info->qgroup_rescan_lock);
4031 		fs_info->qgroup_rescan_running = true;
4032 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4033 				 &fs_info->qgroup_rescan_work);
4034 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4035 	}
4036 }
4037 
4038 #define rbtree_iterate_from_safe(node, next, start)				\
4039        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4040 
4041 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4042 				  struct extent_changeset *reserved, u64 start,
4043 				  u64 len)
4044 {
4045 	struct rb_node *node;
4046 	struct rb_node *next;
4047 	struct ulist_node *entry;
4048 	int ret = 0;
4049 
4050 	node = reserved->range_changed.root.rb_node;
4051 	if (!node)
4052 		return 0;
4053 	while (node) {
4054 		entry = rb_entry(node, struct ulist_node, rb_node);
4055 		if (entry->val < start)
4056 			node = node->rb_right;
4057 		else
4058 			node = node->rb_left;
4059 	}
4060 
4061 	if (entry->val > start && rb_prev(&entry->rb_node))
4062 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4063 				 rb_node);
4064 
4065 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4066 		u64 entry_start;
4067 		u64 entry_end;
4068 		u64 entry_len;
4069 		int clear_ret;
4070 
4071 		entry = rb_entry(node, struct ulist_node, rb_node);
4072 		entry_start = entry->val;
4073 		entry_end = entry->aux;
4074 		entry_len = entry_end - entry_start + 1;
4075 
4076 		if (entry_start >= start + len)
4077 			break;
4078 		if (entry_start + entry_len <= start)
4079 			continue;
4080 		/*
4081 		 * Now the entry is in [start, start + len), revert the
4082 		 * EXTENT_QGROUP_RESERVED bit.
4083 		 */
4084 		clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
4085 						   EXTENT_QGROUP_RESERVED, NULL);
4086 		if (!ret && clear_ret < 0)
4087 			ret = clear_ret;
4088 
4089 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4090 		if (likely(reserved->bytes_changed >= entry_len)) {
4091 			reserved->bytes_changed -= entry_len;
4092 		} else {
4093 			WARN_ON(1);
4094 			reserved->bytes_changed = 0;
4095 		}
4096 	}
4097 
4098 	return ret;
4099 }
4100 
4101 /*
4102  * Try to free some space for qgroup.
4103  *
4104  * For qgroup, there are only 3 ways to free qgroup space:
4105  * - Flush nodatacow write
4106  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4107  *   In theory, we should only flush nodatacow inodes, but it's not yet
4108  *   possible, so we need to flush the whole root.
4109  *
4110  * - Wait for ordered extents
4111  *   When ordered extents are finished, their reserved metadata is finally
4112  *   converted to per_trans status, which can be freed by later commit
4113  *   transaction.
4114  *
4115  * - Commit transaction
4116  *   This would free the meta_per_trans space.
4117  *   In theory this shouldn't provide much space, but any more qgroup space
4118  *   is needed.
4119  */
4120 static int try_flush_qgroup(struct btrfs_root *root)
4121 {
4122 	int ret;
4123 
4124 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4125 	ASSERT(current->journal_info == NULL);
4126 	if (WARN_ON(current->journal_info))
4127 		return 0;
4128 
4129 	/*
4130 	 * We don't want to run flush again and again, so if there is a running
4131 	 * one, we won't try to start a new flush, but exit directly.
4132 	 */
4133 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4134 		wait_event(root->qgroup_flush_wait,
4135 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4136 		return 0;
4137 	}
4138 
4139 	ret = btrfs_start_delalloc_snapshot(root, true);
4140 	if (ret < 0)
4141 		goto out;
4142 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4143 
4144 	/*
4145 	 * After waiting for ordered extents run delayed iputs in order to free
4146 	 * space from unlinked files before committing the current transaction,
4147 	 * as ordered extents may have been holding the last reference of an
4148 	 * inode and they add a delayed iput when they complete.
4149 	 */
4150 	btrfs_run_delayed_iputs(root->fs_info);
4151 	btrfs_wait_on_delayed_iputs(root->fs_info);
4152 
4153 	ret = btrfs_commit_current_transaction(root);
4154 out:
4155 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4156 	wake_up(&root->qgroup_flush_wait);
4157 	return ret;
4158 }
4159 
4160 static int qgroup_reserve_data(struct btrfs_inode *inode,
4161 			struct extent_changeset **reserved_ret, u64 start,
4162 			u64 len)
4163 {
4164 	struct btrfs_root *root = inode->root;
4165 	struct extent_changeset *reserved;
4166 	bool new_reserved = false;
4167 	u64 orig_reserved;
4168 	u64 to_reserve;
4169 	int ret;
4170 
4171 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4172 	    !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
4173 		return 0;
4174 
4175 	/* @reserved parameter is mandatory for qgroup */
4176 	if (WARN_ON(!reserved_ret))
4177 		return -EINVAL;
4178 	if (!*reserved_ret) {
4179 		new_reserved = true;
4180 		*reserved_ret = extent_changeset_alloc();
4181 		if (!*reserved_ret)
4182 			return -ENOMEM;
4183 	}
4184 	reserved = *reserved_ret;
4185 	/* Record already reserved space */
4186 	orig_reserved = reserved->bytes_changed;
4187 	ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
4188 					   start + len - 1, EXTENT_QGROUP_RESERVED,
4189 					   reserved);
4190 
4191 	/* Newly reserved space */
4192 	to_reserve = reserved->bytes_changed - orig_reserved;
4193 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4194 					to_reserve, QGROUP_RESERVE);
4195 	if (ret < 0)
4196 		goto out;
4197 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4198 	if (ret < 0)
4199 		goto cleanup;
4200 
4201 	return ret;
4202 
4203 cleanup:
4204 	qgroup_unreserve_range(inode, reserved, start, len);
4205 out:
4206 	if (new_reserved) {
4207 		extent_changeset_free(reserved);
4208 		*reserved_ret = NULL;
4209 	}
4210 	return ret;
4211 }
4212 
4213 /*
4214  * Reserve qgroup space for range [start, start + len).
4215  *
4216  * This function will either reserve space from related qgroups or do nothing
4217  * if the range is already reserved.
4218  *
4219  * Return 0 for successful reservation
4220  * Return <0 for error (including -EQUOT)
4221  *
4222  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4223  *	 commit transaction. So caller should not hold any dirty page locked.
4224  */
4225 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4226 			struct extent_changeset **reserved_ret, u64 start,
4227 			u64 len)
4228 {
4229 	int ret;
4230 
4231 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4232 	if (ret <= 0 && ret != -EDQUOT)
4233 		return ret;
4234 
4235 	ret = try_flush_qgroup(inode->root);
4236 	if (ret < 0)
4237 		return ret;
4238 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4239 }
4240 
4241 /* Free ranges specified by @reserved, normally in error path */
4242 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4243 				     struct extent_changeset *reserved,
4244 				     u64 start, u64 len, u64 *freed_ret)
4245 {
4246 	struct btrfs_root *root = inode->root;
4247 	struct ulist_node *unode;
4248 	struct ulist_iterator uiter;
4249 	struct extent_changeset changeset;
4250 	u64 freed = 0;
4251 	int ret;
4252 
4253 	extent_changeset_init(&changeset);
4254 	len = round_up(start + len, root->fs_info->sectorsize);
4255 	start = round_down(start, root->fs_info->sectorsize);
4256 
4257 	ULIST_ITER_INIT(&uiter);
4258 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4259 		u64 range_start = unode->val;
4260 		/* unode->aux is the inclusive end */
4261 		u64 range_len = unode->aux - range_start + 1;
4262 		u64 free_start;
4263 		u64 free_len;
4264 
4265 		extent_changeset_release(&changeset);
4266 
4267 		/* Only free range in range [start, start + len) */
4268 		if (range_start >= start + len ||
4269 		    range_start + range_len <= start)
4270 			continue;
4271 		free_start = max(range_start, start);
4272 		free_len = min(start + len, range_start + range_len) -
4273 			   free_start;
4274 		/*
4275 		 * TODO: To also modify reserved->ranges_reserved to reflect
4276 		 * the modification.
4277 		 *
4278 		 * However as long as we free qgroup reserved according to
4279 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4280 		 * So not need to rush.
4281 		 */
4282 		ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
4283 						     free_start + free_len - 1,
4284 						     EXTENT_QGROUP_RESERVED,
4285 						     &changeset);
4286 		if (ret < 0)
4287 			goto out;
4288 		freed += changeset.bytes_changed;
4289 	}
4290 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4291 				  BTRFS_QGROUP_RSV_DATA);
4292 	if (freed_ret)
4293 		*freed_ret = freed;
4294 	ret = 0;
4295 out:
4296 	extent_changeset_release(&changeset);
4297 	return ret;
4298 }
4299 
4300 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4301 			struct extent_changeset *reserved, u64 start, u64 len,
4302 			u64 *released, int free)
4303 {
4304 	struct extent_changeset changeset;
4305 	int trace_op = QGROUP_RELEASE;
4306 	int ret;
4307 
4308 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4309 		return btrfs_clear_record_extent_bits(&inode->io_tree, start,
4310 						      start + len - 1,
4311 						      EXTENT_QGROUP_RESERVED, NULL);
4312 	}
4313 
4314 	/* In release case, we shouldn't have @reserved */
4315 	WARN_ON(!free && reserved);
4316 	if (free && reserved)
4317 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4318 	extent_changeset_init(&changeset);
4319 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
4320 					     EXTENT_QGROUP_RESERVED, &changeset);
4321 	if (ret < 0)
4322 		goto out;
4323 
4324 	if (free)
4325 		trace_op = QGROUP_FREE;
4326 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4327 					changeset.bytes_changed, trace_op);
4328 	if (free)
4329 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4330 				btrfs_root_id(inode->root),
4331 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4332 	if (released)
4333 		*released = changeset.bytes_changed;
4334 out:
4335 	extent_changeset_release(&changeset);
4336 	return ret;
4337 }
4338 
4339 /*
4340  * Free a reserved space range from io_tree and related qgroups
4341  *
4342  * Should be called when a range of pages get invalidated before reaching disk.
4343  * Or for error cleanup case.
4344  * if @reserved is given, only reserved range in [@start, @start + @len) will
4345  * be freed.
4346  *
4347  * For data written to disk, use btrfs_qgroup_release_data().
4348  *
4349  * NOTE: This function may sleep for memory allocation.
4350  */
4351 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4352 			   struct extent_changeset *reserved,
4353 			   u64 start, u64 len, u64 *freed)
4354 {
4355 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4356 }
4357 
4358 /*
4359  * Release a reserved space range from io_tree only.
4360  *
4361  * Should be called when a range of pages get written to disk and corresponding
4362  * FILE_EXTENT is inserted into corresponding root.
4363  *
4364  * Since new qgroup accounting framework will only update qgroup numbers at
4365  * commit_transaction() time, its reserved space shouldn't be freed from
4366  * related qgroups.
4367  *
4368  * But we should release the range from io_tree, to allow further write to be
4369  * COWed.
4370  *
4371  * NOTE: This function may sleep for memory allocation.
4372  */
4373 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4374 {
4375 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4376 }
4377 
4378 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4379 			      enum btrfs_qgroup_rsv_type type)
4380 {
4381 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4382 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4383 		return;
4384 	if (num_bytes == 0)
4385 		return;
4386 
4387 	spin_lock(&root->qgroup_meta_rsv_lock);
4388 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4389 		root->qgroup_meta_rsv_prealloc += num_bytes;
4390 	else
4391 		root->qgroup_meta_rsv_pertrans += num_bytes;
4392 	spin_unlock(&root->qgroup_meta_rsv_lock);
4393 }
4394 
4395 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4396 			     enum btrfs_qgroup_rsv_type type)
4397 {
4398 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4399 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4400 		return 0;
4401 	if (num_bytes == 0)
4402 		return 0;
4403 
4404 	spin_lock(&root->qgroup_meta_rsv_lock);
4405 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4406 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4407 				  num_bytes);
4408 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4409 	} else {
4410 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4411 				  num_bytes);
4412 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4413 	}
4414 	spin_unlock(&root->qgroup_meta_rsv_lock);
4415 	return num_bytes;
4416 }
4417 
4418 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4419 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4420 {
4421 	struct btrfs_fs_info *fs_info = root->fs_info;
4422 	int ret;
4423 
4424 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4425 	    !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4426 		return 0;
4427 
4428 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4429 	trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
4430 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4431 	if (ret < 0)
4432 		return ret;
4433 	/*
4434 	 * Record what we have reserved into root.
4435 	 *
4436 	 * To avoid quota disabled->enabled underflow.
4437 	 * In that case, we may try to free space we haven't reserved
4438 	 * (since quota was disabled), so record what we reserved into root.
4439 	 * And ensure later release won't underflow this number.
4440 	 */
4441 	add_root_meta_rsv(root, num_bytes, type);
4442 	return ret;
4443 }
4444 
4445 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4446 				enum btrfs_qgroup_rsv_type type, bool enforce,
4447 				bool noflush)
4448 {
4449 	int ret;
4450 
4451 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4452 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4453 		return ret;
4454 
4455 	ret = try_flush_qgroup(root);
4456 	if (ret < 0)
4457 		return ret;
4458 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4459 }
4460 
4461 /*
4462  * Per-transaction meta reservation should be all freed at transaction commit
4463  * time
4464  */
4465 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4466 {
4467 	struct btrfs_fs_info *fs_info = root->fs_info;
4468 
4469 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4470 	    !btrfs_is_fstree(btrfs_root_id(root)))
4471 		return;
4472 
4473 	/* TODO: Update trace point to handle such free */
4474 	trace_btrfs_qgroup_meta_free_all_pertrans(root);
4475 	/* Special value -1 means to free all reserved space */
4476 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4477 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4478 }
4479 
4480 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4481 			      enum btrfs_qgroup_rsv_type type)
4482 {
4483 	struct btrfs_fs_info *fs_info = root->fs_info;
4484 
4485 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4486 	    !btrfs_is_fstree(btrfs_root_id(root)))
4487 		return;
4488 
4489 	/*
4490 	 * reservation for META_PREALLOC can happen before quota is enabled,
4491 	 * which can lead to underflow.
4492 	 * Here ensure we will only free what we really have reserved.
4493 	 */
4494 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4495 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4496 	trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4497 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4498 }
4499 
4500 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4501 				int num_bytes)
4502 {
4503 	struct btrfs_qgroup *qgroup;
4504 	LIST_HEAD(qgroup_list);
4505 
4506 	if (num_bytes == 0)
4507 		return;
4508 	if (!fs_info->quota_root)
4509 		return;
4510 
4511 	spin_lock(&fs_info->qgroup_lock);
4512 	qgroup = find_qgroup_rb(fs_info, ref_root);
4513 	if (!qgroup)
4514 		goto out;
4515 
4516 	qgroup_iterator_add(&qgroup_list, qgroup);
4517 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4518 		struct btrfs_qgroup_list *glist;
4519 
4520 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4521 				BTRFS_QGROUP_RSV_META_PREALLOC);
4522 		if (!sb_rdonly(fs_info->sb))
4523 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4524 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4525 
4526 		list_for_each_entry(glist, &qgroup->groups, next_group)
4527 			qgroup_iterator_add(&qgroup_list, glist->group);
4528 	}
4529 out:
4530 	qgroup_iterator_clean(&qgroup_list);
4531 	spin_unlock(&fs_info->qgroup_lock);
4532 }
4533 
4534 /*
4535  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4536  *
4537  * This is called when preallocated meta reservation needs to be used.
4538  * Normally after btrfs_join_transaction() call.
4539  */
4540 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4541 {
4542 	struct btrfs_fs_info *fs_info = root->fs_info;
4543 
4544 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4545 	    !btrfs_is_fstree(btrfs_root_id(root)))
4546 		return;
4547 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4548 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4549 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4550 	trace_btrfs_qgroup_meta_convert(root, num_bytes);
4551 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4552 	if (!sb_rdonly(fs_info->sb))
4553 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4554 }
4555 
4556 /*
4557  * Check qgroup reserved space leaking, normally at destroy inode
4558  * time
4559  */
4560 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4561 {
4562 	struct extent_changeset changeset;
4563 	struct ulist_node *unode;
4564 	struct ulist_iterator iter;
4565 	int ret;
4566 
4567 	extent_changeset_init(&changeset);
4568 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4569 					     EXTENT_QGROUP_RESERVED, &changeset);
4570 
4571 	WARN_ON(ret < 0);
4572 	if (WARN_ON(changeset.bytes_changed)) {
4573 		ULIST_ITER_INIT(&iter);
4574 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4575 			btrfs_warn(inode->root->fs_info,
4576 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4577 				btrfs_ino(inode), unode->val, unode->aux);
4578 		}
4579 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4580 				btrfs_root_id(inode->root),
4581 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4582 
4583 	}
4584 	extent_changeset_release(&changeset);
4585 }
4586 
4587 void btrfs_qgroup_init_swapped_blocks(
4588 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4589 {
4590 	int i;
4591 
4592 	spin_lock_init(&swapped_blocks->lock);
4593 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4594 		swapped_blocks->blocks[i] = RB_ROOT;
4595 	swapped_blocks->swapped = false;
4596 }
4597 
4598 /*
4599  * Delete all swapped blocks record of @root.
4600  * Every record here means we skipped a full subtree scan for qgroup.
4601  *
4602  * Gets called when committing one transaction.
4603  */
4604 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4605 {
4606 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4607 	int i;
4608 
4609 	swapped_blocks = &root->swapped_blocks;
4610 
4611 	spin_lock(&swapped_blocks->lock);
4612 	if (!swapped_blocks->swapped)
4613 		goto out;
4614 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4615 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4616 		struct btrfs_qgroup_swapped_block *entry;
4617 		struct btrfs_qgroup_swapped_block *next;
4618 
4619 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4620 						     node)
4621 			kfree(entry);
4622 		swapped_blocks->blocks[i] = RB_ROOT;
4623 	}
4624 	swapped_blocks->swapped = false;
4625 out:
4626 	spin_unlock(&swapped_blocks->lock);
4627 }
4628 
4629 static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
4630 {
4631 	const u64 *bytenr = key;
4632 	const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
4633 					  struct btrfs_qgroup_swapped_block, node);
4634 
4635 	if (block->subvol_bytenr < *bytenr)
4636 		return -1;
4637 	else if (block->subvol_bytenr > *bytenr)
4638 		return 1;
4639 
4640 	return 0;
4641 }
4642 
4643 static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
4644 {
4645 	const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
4646 					      struct btrfs_qgroup_swapped_block, node);
4647 
4648 	return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
4649 }
4650 
4651 /*
4652  * Add subtree roots record into @subvol_root.
4653  *
4654  * @subvol_root:	tree root of the subvolume tree get swapped
4655  * @bg:			block group under balance
4656  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4657  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4658  *			BOTH POINTERS ARE BEFORE TREE SWAP
4659  * @last_snapshot:	last snapshot generation of the subvolume tree
4660  */
4661 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
4662 		struct btrfs_block_group *bg,
4663 		struct extent_buffer *subvol_parent, int subvol_slot,
4664 		struct extent_buffer *reloc_parent, int reloc_slot,
4665 		u64 last_snapshot)
4666 {
4667 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4668 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4669 	struct btrfs_qgroup_swapped_block *block;
4670 	struct rb_node *node;
4671 	int level = btrfs_header_level(subvol_parent) - 1;
4672 	int ret = 0;
4673 
4674 	if (!btrfs_qgroup_full_accounting(fs_info))
4675 		return 0;
4676 
4677 	if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4678 		     btrfs_node_ptr_generation(reloc_parent, reloc_slot))) {
4679 		btrfs_err_rl(fs_info,
4680 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4681 			__func__,
4682 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4683 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4684 		return -EUCLEAN;
4685 	}
4686 
4687 	block = kmalloc(sizeof(*block), GFP_NOFS);
4688 	if (!block) {
4689 		ret = -ENOMEM;
4690 		goto out;
4691 	}
4692 
4693 	/*
4694 	 * @reloc_parent/slot is still before swap, while @block is going to
4695 	 * record the bytenr after swap, so we do the swap here.
4696 	 */
4697 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4698 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4699 							     reloc_slot);
4700 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4701 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4702 							    subvol_slot);
4703 	block->last_snapshot = last_snapshot;
4704 	block->level = level;
4705 
4706 	/*
4707 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4708 	 * no one else can modify tree blocks thus we qgroup will not change
4709 	 * no matter the value of trace_leaf.
4710 	 */
4711 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4712 		block->trace_leaf = true;
4713 	else
4714 		block->trace_leaf = false;
4715 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4716 
4717 	/* Insert @block into @blocks */
4718 	spin_lock(&blocks->lock);
4719 	node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
4720 	if (node) {
4721 		struct btrfs_qgroup_swapped_block *entry;
4722 
4723 		entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4724 
4725 		if (entry->subvol_generation != block->subvol_generation ||
4726 		    entry->reloc_bytenr != block->reloc_bytenr ||
4727 		    entry->reloc_generation != block->reloc_generation) {
4728 			/*
4729 			 * Duplicated but mismatch entry found.  Shouldn't happen.
4730 			 * Marking qgroup inconsistent should be enough for end
4731 			 * users.
4732 			 */
4733 			DEBUG_WARN("duplicated but mismatched entry found");
4734 			ret = -EEXIST;
4735 		}
4736 		kfree(block);
4737 		goto out_unlock;
4738 	}
4739 	blocks->swapped = true;
4740 out_unlock:
4741 	spin_unlock(&blocks->lock);
4742 out:
4743 	if (ret < 0)
4744 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
4745 	return ret;
4746 }
4747 
4748 /*
4749  * Check if the tree block is a subtree root, and if so do the needed
4750  * delayed subtree trace for qgroup.
4751  *
4752  * This is called during btrfs_cow_block().
4753  */
4754 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4755 					 struct btrfs_root *root,
4756 					 struct extent_buffer *subvol_eb)
4757 {
4758 	struct btrfs_fs_info *fs_info = root->fs_info;
4759 	struct btrfs_tree_parent_check check = { 0 };
4760 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4761 	struct btrfs_qgroup_swapped_block AUTO_KFREE(block);
4762 	struct extent_buffer *reloc_eb = NULL;
4763 	struct rb_node *node;
4764 	bool swapped = false;
4765 	int level = btrfs_header_level(subvol_eb);
4766 	int ret = 0;
4767 	int i;
4768 
4769 	if (!btrfs_qgroup_full_accounting(fs_info))
4770 		return 0;
4771 	if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4772 		return 0;
4773 
4774 	spin_lock(&blocks->lock);
4775 	if (!blocks->swapped) {
4776 		spin_unlock(&blocks->lock);
4777 		return 0;
4778 	}
4779 	node = rb_find(&subvol_eb->start, &blocks->blocks[level],
4780 			qgroup_swapped_block_bytenr_key_cmp);
4781 	if (!node) {
4782 		spin_unlock(&blocks->lock);
4783 		goto out;
4784 	}
4785 	block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4786 
4787 	/* Found one, remove it from @blocks first and update blocks->swapped */
4788 	rb_erase(&block->node, &blocks->blocks[level]);
4789 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4790 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4791 			swapped = true;
4792 			break;
4793 		}
4794 	}
4795 	blocks->swapped = swapped;
4796 	spin_unlock(&blocks->lock);
4797 
4798 	check.level = block->level;
4799 	check.transid = block->reloc_generation;
4800 	check.has_first_key = true;
4801 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4802 
4803 	/* Read out reloc subtree root */
4804 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4805 	if (IS_ERR(reloc_eb)) {
4806 		ret = PTR_ERR(reloc_eb);
4807 		reloc_eb = NULL;
4808 		goto free_out;
4809 	}
4810 	if (unlikely(!extent_buffer_uptodate(reloc_eb))) {
4811 		ret = -EIO;
4812 		goto free_out;
4813 	}
4814 
4815 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4816 			block->last_snapshot, block->trace_leaf);
4817 free_out:
4818 	free_extent_buffer(reloc_eb);
4819 out:
4820 	if (ret < 0) {
4821 		qgroup_mark_inconsistent(fs_info,
4822 				"failed to account subtree at bytenr %llu: %d",
4823 				subvol_eb->start, ret);
4824 	}
4825 	return ret;
4826 }
4827 
4828 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4829 {
4830 	struct btrfs_qgroup_extent_record *entry;
4831 	unsigned long index;
4832 
4833 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4834 		ulist_free(entry->old_roots);
4835 		kfree(entry);
4836 	}
4837 	xa_destroy(&trans->delayed_refs.dirty_extents);
4838 }
4839 
4840 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4841 			      const struct btrfs_squota_delta *delta)
4842 {
4843 	int ret;
4844 	struct btrfs_qgroup *qgroup;
4845 	struct btrfs_qgroup *qg;
4846 	LIST_HEAD(qgroup_list);
4847 	u64 root = delta->root;
4848 	u64 num_bytes = delta->num_bytes;
4849 	const int sign = (delta->is_inc ? 1 : -1);
4850 
4851 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4852 		return 0;
4853 
4854 	if (!btrfs_is_fstree(root))
4855 		return 0;
4856 
4857 	/* If the extent predates enabling quotas, don't count it. */
4858 	if (delta->generation < fs_info->qgroup_enable_gen)
4859 		return 0;
4860 
4861 	spin_lock(&fs_info->qgroup_lock);
4862 	qgroup = find_qgroup_rb(fs_info, root);
4863 	if (!qgroup) {
4864 		ret = -ENOENT;
4865 		goto out;
4866 	}
4867 
4868 	ret = 0;
4869 	qgroup_iterator_add(&qgroup_list, qgroup);
4870 	list_for_each_entry(qg, &qgroup_list, iterator) {
4871 		struct btrfs_qgroup_list *glist;
4872 
4873 		qg->excl += num_bytes * sign;
4874 		qg->rfer += num_bytes * sign;
4875 		qgroup_dirty(fs_info, qg);
4876 
4877 		list_for_each_entry(glist, &qg->groups, next_group)
4878 			qgroup_iterator_add(&qgroup_list, glist->group);
4879 	}
4880 	qgroup_iterator_clean(&qgroup_list);
4881 
4882 out:
4883 	spin_unlock(&fs_info->qgroup_lock);
4884 	return ret;
4885 }
4886