xref: /linux/fs/btrfs/qgroup.c (revision 0e9ab8e4d44ae9d9aaf213bfd2c90bbe7289337b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
33 /*
34  * Helpers to access qgroup reservation
35  *
36  * Callers should ensure the lock context and type are valid
37  */
38 
39 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
40 {
41 	u64 ret = 0;
42 	int i;
43 
44 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
45 		ret += qgroup->rsv.values[i];
46 
47 	return ret;
48 }
49 
50 #ifdef CONFIG_BTRFS_DEBUG
51 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
52 {
53 	if (type == BTRFS_QGROUP_RSV_DATA)
54 		return "data";
55 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
56 		return "meta_pertrans";
57 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
58 		return "meta_prealloc";
59 	return NULL;
60 }
61 #endif
62 
63 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
64 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
65 			   enum btrfs_qgroup_rsv_type type)
66 {
67 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
68 	qgroup->rsv.values[type] += num_bytes;
69 }
70 
71 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
72 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
73 			       enum btrfs_qgroup_rsv_type type)
74 {
75 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
76 	if (qgroup->rsv.values[type] >= num_bytes) {
77 		qgroup->rsv.values[type] -= num_bytes;
78 		return;
79 	}
80 #ifdef CONFIG_BTRFS_DEBUG
81 	WARN_RATELIMIT(1,
82 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
83 		qgroup->qgroupid, qgroup_rsv_type_str(type),
84 		qgroup->rsv.values[type], num_bytes);
85 #endif
86 	qgroup->rsv.values[type] = 0;
87 }
88 
89 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
90 				     struct btrfs_qgroup *dest,
91 				     struct btrfs_qgroup *src)
92 {
93 	int i;
94 
95 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
96 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
97 }
98 
99 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
100 					 struct btrfs_qgroup *dest,
101 					  struct btrfs_qgroup *src)
102 {
103 	int i;
104 
105 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
106 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
107 }
108 
109 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
110 					   int mod)
111 {
112 	if (qg->old_refcnt < seq)
113 		qg->old_refcnt = seq;
114 	qg->old_refcnt += mod;
115 }
116 
117 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
118 					   int mod)
119 {
120 	if (qg->new_refcnt < seq)
121 		qg->new_refcnt = seq;
122 	qg->new_refcnt += mod;
123 }
124 
125 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
126 {
127 	if (qg->old_refcnt < seq)
128 		return 0;
129 	return qg->old_refcnt - seq;
130 }
131 
132 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
133 {
134 	if (qg->new_refcnt < seq)
135 		return 0;
136 	return qg->new_refcnt - seq;
137 }
138 
139 /*
140  * glue structure to represent the relations between qgroups.
141  */
142 struct btrfs_qgroup_list {
143 	struct list_head next_group;
144 	struct list_head next_member;
145 	struct btrfs_qgroup *group;
146 	struct btrfs_qgroup *member;
147 };
148 
149 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
150 {
151 	return (u64)(uintptr_t)qg;
152 }
153 
154 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
155 {
156 	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
157 }
158 
159 static int
160 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
161 		   int init_flags);
162 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
163 
164 /* must be called with qgroup_ioctl_lock held */
165 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
166 					   u64 qgroupid)
167 {
168 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
169 	struct btrfs_qgroup *qgroup;
170 
171 	while (n) {
172 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
173 		if (qgroup->qgroupid < qgroupid)
174 			n = n->rb_left;
175 		else if (qgroup->qgroupid > qgroupid)
176 			n = n->rb_right;
177 		else
178 			return qgroup;
179 	}
180 	return NULL;
181 }
182 
183 /* must be called with qgroup_lock held */
184 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
185 					  u64 qgroupid)
186 {
187 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
188 	struct rb_node *parent = NULL;
189 	struct btrfs_qgroup *qgroup;
190 
191 	while (*p) {
192 		parent = *p;
193 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
194 
195 		if (qgroup->qgroupid < qgroupid)
196 			p = &(*p)->rb_left;
197 		else if (qgroup->qgroupid > qgroupid)
198 			p = &(*p)->rb_right;
199 		else
200 			return qgroup;
201 	}
202 
203 	qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
204 	if (!qgroup)
205 		return ERR_PTR(-ENOMEM);
206 
207 	qgroup->qgroupid = qgroupid;
208 	INIT_LIST_HEAD(&qgroup->groups);
209 	INIT_LIST_HEAD(&qgroup->members);
210 	INIT_LIST_HEAD(&qgroup->dirty);
211 
212 	rb_link_node(&qgroup->node, parent, p);
213 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
214 
215 	return qgroup;
216 }
217 
218 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
219 			    struct btrfs_qgroup *qgroup)
220 {
221 	struct btrfs_qgroup_list *list;
222 
223 	list_del(&qgroup->dirty);
224 	while (!list_empty(&qgroup->groups)) {
225 		list = list_first_entry(&qgroup->groups,
226 					struct btrfs_qgroup_list, next_group);
227 		list_del(&list->next_group);
228 		list_del(&list->next_member);
229 		kfree(list);
230 	}
231 
232 	while (!list_empty(&qgroup->members)) {
233 		list = list_first_entry(&qgroup->members,
234 					struct btrfs_qgroup_list, next_member);
235 		list_del(&list->next_group);
236 		list_del(&list->next_member);
237 		kfree(list);
238 	}
239 }
240 
241 /* must be called with qgroup_lock held */
242 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
243 {
244 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
245 
246 	if (!qgroup)
247 		return -ENOENT;
248 
249 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
250 	__del_qgroup_rb(fs_info, qgroup);
251 	return 0;
252 }
253 
254 /*
255  * Add relation specified by two qgroups.
256  *
257  * Must be called with qgroup_lock held.
258  *
259  * Return: 0        on success
260  *         -ENOENT  if one of the qgroups is NULL
261  *         <0       other errors
262  */
263 static int __add_relation_rb(struct btrfs_qgroup *member, struct btrfs_qgroup *parent)
264 {
265 	struct btrfs_qgroup_list *list;
266 
267 	if (!member || !parent)
268 		return -ENOENT;
269 
270 	list = kzalloc(sizeof(*list), GFP_ATOMIC);
271 	if (!list)
272 		return -ENOMEM;
273 
274 	list->group = parent;
275 	list->member = member;
276 	list_add_tail(&list->next_group, &member->groups);
277 	list_add_tail(&list->next_member, &parent->members);
278 
279 	return 0;
280 }
281 
282 /*
283  * Add relation specified by two qgroup ids.
284  *
285  * Must be called with qgroup_lock held.
286  *
287  * Return: 0        on success
288  *         -ENOENT  if one of the ids does not exist
289  *         <0       other errors
290  */
291 static int add_relation_rb(struct btrfs_fs_info *fs_info, u64 memberid, u64 parentid)
292 {
293 	struct btrfs_qgroup *member;
294 	struct btrfs_qgroup *parent;
295 
296 	member = find_qgroup_rb(fs_info, memberid);
297 	parent = find_qgroup_rb(fs_info, parentid);
298 
299 	return __add_relation_rb(member, parent);
300 }
301 
302 /* Must be called with qgroup_lock held */
303 static int del_relation_rb(struct btrfs_fs_info *fs_info,
304 			   u64 memberid, u64 parentid)
305 {
306 	struct btrfs_qgroup *member;
307 	struct btrfs_qgroup *parent;
308 	struct btrfs_qgroup_list *list;
309 
310 	member = find_qgroup_rb(fs_info, memberid);
311 	parent = find_qgroup_rb(fs_info, parentid);
312 	if (!member || !parent)
313 		return -ENOENT;
314 
315 	list_for_each_entry(list, &member->groups, next_group) {
316 		if (list->group == parent) {
317 			list_del(&list->next_group);
318 			list_del(&list->next_member);
319 			kfree(list);
320 			return 0;
321 		}
322 	}
323 	return -ENOENT;
324 }
325 
326 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
327 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
328 			       u64 rfer, u64 excl)
329 {
330 	struct btrfs_qgroup *qgroup;
331 
332 	qgroup = find_qgroup_rb(fs_info, qgroupid);
333 	if (!qgroup)
334 		return -EINVAL;
335 	if (qgroup->rfer != rfer || qgroup->excl != excl)
336 		return -EINVAL;
337 	return 0;
338 }
339 #endif
340 
341 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
342 {
343 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
344 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
345 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
346 }
347 
348 /*
349  * The full config is read in one go, only called from open_ctree()
350  * It doesn't use any locking, as at this point we're still single-threaded
351  */
352 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
353 {
354 	struct btrfs_key key;
355 	struct btrfs_key found_key;
356 	struct btrfs_root *quota_root = fs_info->quota_root;
357 	struct btrfs_path *path = NULL;
358 	struct extent_buffer *l;
359 	int slot;
360 	int ret = 0;
361 	u64 flags = 0;
362 	u64 rescan_progress = 0;
363 
364 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
365 		return 0;
366 
367 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
368 	if (!fs_info->qgroup_ulist) {
369 		ret = -ENOMEM;
370 		goto out;
371 	}
372 
373 	path = btrfs_alloc_path();
374 	if (!path) {
375 		ret = -ENOMEM;
376 		goto out;
377 	}
378 
379 	ret = btrfs_sysfs_add_qgroups(fs_info);
380 	if (ret < 0)
381 		goto out;
382 	/* default this to quota off, in case no status key is found */
383 	fs_info->qgroup_flags = 0;
384 
385 	/*
386 	 * pass 1: read status, all qgroup infos and limits
387 	 */
388 	key.objectid = 0;
389 	key.type = 0;
390 	key.offset = 0;
391 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
392 	if (ret)
393 		goto out;
394 
395 	while (1) {
396 		struct btrfs_qgroup *qgroup;
397 
398 		slot = path->slots[0];
399 		l = path->nodes[0];
400 		btrfs_item_key_to_cpu(l, &found_key, slot);
401 
402 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
403 			struct btrfs_qgroup_status_item *ptr;
404 
405 			ptr = btrfs_item_ptr(l, slot,
406 					     struct btrfs_qgroup_status_item);
407 
408 			if (btrfs_qgroup_status_version(l, ptr) !=
409 			    BTRFS_QGROUP_STATUS_VERSION) {
410 				btrfs_err(fs_info,
411 				 "old qgroup version, quota disabled");
412 				goto out;
413 			}
414 			if (btrfs_qgroup_status_generation(l, ptr) !=
415 			    fs_info->generation) {
416 				qgroup_mark_inconsistent(fs_info);
417 				btrfs_err(fs_info,
418 					"qgroup generation mismatch, marked as inconsistent");
419 			}
420 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
421 									  ptr);
422 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
423 			goto next1;
424 		}
425 
426 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
427 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
428 			goto next1;
429 
430 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
431 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
432 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
433 			btrfs_err(fs_info, "inconsistent qgroup config");
434 			qgroup_mark_inconsistent(fs_info);
435 		}
436 		if (!qgroup) {
437 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
438 			if (IS_ERR(qgroup)) {
439 				ret = PTR_ERR(qgroup);
440 				goto out;
441 			}
442 		}
443 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
444 		if (ret < 0)
445 			goto out;
446 
447 		switch (found_key.type) {
448 		case BTRFS_QGROUP_INFO_KEY: {
449 			struct btrfs_qgroup_info_item *ptr;
450 
451 			ptr = btrfs_item_ptr(l, slot,
452 					     struct btrfs_qgroup_info_item);
453 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
454 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
455 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
456 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
457 			/* generation currently unused */
458 			break;
459 		}
460 		case BTRFS_QGROUP_LIMIT_KEY: {
461 			struct btrfs_qgroup_limit_item *ptr;
462 
463 			ptr = btrfs_item_ptr(l, slot,
464 					     struct btrfs_qgroup_limit_item);
465 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
466 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
467 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
468 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
469 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
470 			break;
471 		}
472 		}
473 next1:
474 		ret = btrfs_next_item(quota_root, path);
475 		if (ret < 0)
476 			goto out;
477 		if (ret)
478 			break;
479 	}
480 	btrfs_release_path(path);
481 
482 	/*
483 	 * pass 2: read all qgroup relations
484 	 */
485 	key.objectid = 0;
486 	key.type = BTRFS_QGROUP_RELATION_KEY;
487 	key.offset = 0;
488 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
489 	if (ret)
490 		goto out;
491 	while (1) {
492 		slot = path->slots[0];
493 		l = path->nodes[0];
494 		btrfs_item_key_to_cpu(l, &found_key, slot);
495 
496 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
497 			goto next2;
498 
499 		if (found_key.objectid > found_key.offset) {
500 			/* parent <- member, not needed to build config */
501 			/* FIXME should we omit the key completely? */
502 			goto next2;
503 		}
504 
505 		ret = add_relation_rb(fs_info, found_key.objectid,
506 				      found_key.offset);
507 		if (ret == -ENOENT) {
508 			btrfs_warn(fs_info,
509 				"orphan qgroup relation 0x%llx->0x%llx",
510 				found_key.objectid, found_key.offset);
511 			ret = 0;	/* ignore the error */
512 		}
513 		if (ret)
514 			goto out;
515 next2:
516 		ret = btrfs_next_item(quota_root, path);
517 		if (ret < 0)
518 			goto out;
519 		if (ret)
520 			break;
521 	}
522 out:
523 	btrfs_free_path(path);
524 	fs_info->qgroup_flags |= flags;
525 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
526 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
527 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
528 		 ret >= 0)
529 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
530 
531 	if (ret < 0) {
532 		ulist_free(fs_info->qgroup_ulist);
533 		fs_info->qgroup_ulist = NULL;
534 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
535 		btrfs_sysfs_del_qgroups(fs_info);
536 	}
537 
538 	return ret < 0 ? ret : 0;
539 }
540 
541 /*
542  * Called in close_ctree() when quota is still enabled.  This verifies we don't
543  * leak some reserved space.
544  *
545  * Return false if no reserved space is left.
546  * Return true if some reserved space is leaked.
547  */
548 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
549 {
550 	struct rb_node *node;
551 	bool ret = false;
552 
553 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
554 		return ret;
555 	/*
556 	 * Since we're unmounting, there is no race and no need to grab qgroup
557 	 * lock.  And here we don't go post-order to provide a more user
558 	 * friendly sorted result.
559 	 */
560 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
561 		struct btrfs_qgroup *qgroup;
562 		int i;
563 
564 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
565 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
566 			if (qgroup->rsv.values[i]) {
567 				ret = true;
568 				btrfs_warn(fs_info,
569 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
570 				   btrfs_qgroup_level(qgroup->qgroupid),
571 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
572 				   i, qgroup->rsv.values[i]);
573 			}
574 		}
575 	}
576 	return ret;
577 }
578 
579 /*
580  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
581  * first two are in single-threaded paths.And for the third one, we have set
582  * quota_root to be null with qgroup_lock held before, so it is safe to clean
583  * up the in-memory structures without qgroup_lock held.
584  */
585 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
586 {
587 	struct rb_node *n;
588 	struct btrfs_qgroup *qgroup;
589 
590 	while ((n = rb_first(&fs_info->qgroup_tree))) {
591 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
592 		rb_erase(n, &fs_info->qgroup_tree);
593 		__del_qgroup_rb(fs_info, qgroup);
594 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
595 		kfree(qgroup);
596 	}
597 	/*
598 	 * We call btrfs_free_qgroup_config() when unmounting
599 	 * filesystem and disabling quota, so we set qgroup_ulist
600 	 * to be null here to avoid double free.
601 	 */
602 	ulist_free(fs_info->qgroup_ulist);
603 	fs_info->qgroup_ulist = NULL;
604 	btrfs_sysfs_del_qgroups(fs_info);
605 }
606 
607 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
608 				    u64 dst)
609 {
610 	int ret;
611 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
612 	struct btrfs_path *path;
613 	struct btrfs_key key;
614 
615 	path = btrfs_alloc_path();
616 	if (!path)
617 		return -ENOMEM;
618 
619 	key.objectid = src;
620 	key.type = BTRFS_QGROUP_RELATION_KEY;
621 	key.offset = dst;
622 
623 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
624 
625 	btrfs_mark_buffer_dirty(path->nodes[0]);
626 
627 	btrfs_free_path(path);
628 	return ret;
629 }
630 
631 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
632 				    u64 dst)
633 {
634 	int ret;
635 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
636 	struct btrfs_path *path;
637 	struct btrfs_key key;
638 
639 	path = btrfs_alloc_path();
640 	if (!path)
641 		return -ENOMEM;
642 
643 	key.objectid = src;
644 	key.type = BTRFS_QGROUP_RELATION_KEY;
645 	key.offset = dst;
646 
647 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
648 	if (ret < 0)
649 		goto out;
650 
651 	if (ret > 0) {
652 		ret = -ENOENT;
653 		goto out;
654 	}
655 
656 	ret = btrfs_del_item(trans, quota_root, path);
657 out:
658 	btrfs_free_path(path);
659 	return ret;
660 }
661 
662 static int add_qgroup_item(struct btrfs_trans_handle *trans,
663 			   struct btrfs_root *quota_root, u64 qgroupid)
664 {
665 	int ret;
666 	struct btrfs_path *path;
667 	struct btrfs_qgroup_info_item *qgroup_info;
668 	struct btrfs_qgroup_limit_item *qgroup_limit;
669 	struct extent_buffer *leaf;
670 	struct btrfs_key key;
671 
672 	if (btrfs_is_testing(quota_root->fs_info))
673 		return 0;
674 
675 	path = btrfs_alloc_path();
676 	if (!path)
677 		return -ENOMEM;
678 
679 	key.objectid = 0;
680 	key.type = BTRFS_QGROUP_INFO_KEY;
681 	key.offset = qgroupid;
682 
683 	/*
684 	 * Avoid a transaction abort by catching -EEXIST here. In that
685 	 * case, we proceed by re-initializing the existing structure
686 	 * on disk.
687 	 */
688 
689 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
690 				      sizeof(*qgroup_info));
691 	if (ret && ret != -EEXIST)
692 		goto out;
693 
694 	leaf = path->nodes[0];
695 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
696 				 struct btrfs_qgroup_info_item);
697 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
698 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
699 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
700 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
701 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
702 
703 	btrfs_mark_buffer_dirty(leaf);
704 
705 	btrfs_release_path(path);
706 
707 	key.type = BTRFS_QGROUP_LIMIT_KEY;
708 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
709 				      sizeof(*qgroup_limit));
710 	if (ret && ret != -EEXIST)
711 		goto out;
712 
713 	leaf = path->nodes[0];
714 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
715 				  struct btrfs_qgroup_limit_item);
716 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
717 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
718 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
719 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
720 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
721 
722 	btrfs_mark_buffer_dirty(leaf);
723 
724 	ret = 0;
725 out:
726 	btrfs_free_path(path);
727 	return ret;
728 }
729 
730 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
731 {
732 	int ret;
733 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
734 	struct btrfs_path *path;
735 	struct btrfs_key key;
736 
737 	path = btrfs_alloc_path();
738 	if (!path)
739 		return -ENOMEM;
740 
741 	key.objectid = 0;
742 	key.type = BTRFS_QGROUP_INFO_KEY;
743 	key.offset = qgroupid;
744 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
745 	if (ret < 0)
746 		goto out;
747 
748 	if (ret > 0) {
749 		ret = -ENOENT;
750 		goto out;
751 	}
752 
753 	ret = btrfs_del_item(trans, quota_root, path);
754 	if (ret)
755 		goto out;
756 
757 	btrfs_release_path(path);
758 
759 	key.type = BTRFS_QGROUP_LIMIT_KEY;
760 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
761 	if (ret < 0)
762 		goto out;
763 
764 	if (ret > 0) {
765 		ret = -ENOENT;
766 		goto out;
767 	}
768 
769 	ret = btrfs_del_item(trans, quota_root, path);
770 
771 out:
772 	btrfs_free_path(path);
773 	return ret;
774 }
775 
776 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
777 				    struct btrfs_qgroup *qgroup)
778 {
779 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
780 	struct btrfs_path *path;
781 	struct btrfs_key key;
782 	struct extent_buffer *l;
783 	struct btrfs_qgroup_limit_item *qgroup_limit;
784 	int ret;
785 	int slot;
786 
787 	key.objectid = 0;
788 	key.type = BTRFS_QGROUP_LIMIT_KEY;
789 	key.offset = qgroup->qgroupid;
790 
791 	path = btrfs_alloc_path();
792 	if (!path)
793 		return -ENOMEM;
794 
795 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
796 	if (ret > 0)
797 		ret = -ENOENT;
798 
799 	if (ret)
800 		goto out;
801 
802 	l = path->nodes[0];
803 	slot = path->slots[0];
804 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
805 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
806 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
807 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
808 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
809 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
810 
811 	btrfs_mark_buffer_dirty(l);
812 
813 out:
814 	btrfs_free_path(path);
815 	return ret;
816 }
817 
818 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
819 				   struct btrfs_qgroup *qgroup)
820 {
821 	struct btrfs_fs_info *fs_info = trans->fs_info;
822 	struct btrfs_root *quota_root = fs_info->quota_root;
823 	struct btrfs_path *path;
824 	struct btrfs_key key;
825 	struct extent_buffer *l;
826 	struct btrfs_qgroup_info_item *qgroup_info;
827 	int ret;
828 	int slot;
829 
830 	if (btrfs_is_testing(fs_info))
831 		return 0;
832 
833 	key.objectid = 0;
834 	key.type = BTRFS_QGROUP_INFO_KEY;
835 	key.offset = qgroup->qgroupid;
836 
837 	path = btrfs_alloc_path();
838 	if (!path)
839 		return -ENOMEM;
840 
841 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
842 	if (ret > 0)
843 		ret = -ENOENT;
844 
845 	if (ret)
846 		goto out;
847 
848 	l = path->nodes[0];
849 	slot = path->slots[0];
850 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
851 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
852 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
853 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
854 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
855 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
856 
857 	btrfs_mark_buffer_dirty(l);
858 
859 out:
860 	btrfs_free_path(path);
861 	return ret;
862 }
863 
864 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
865 {
866 	struct btrfs_fs_info *fs_info = trans->fs_info;
867 	struct btrfs_root *quota_root = fs_info->quota_root;
868 	struct btrfs_path *path;
869 	struct btrfs_key key;
870 	struct extent_buffer *l;
871 	struct btrfs_qgroup_status_item *ptr;
872 	int ret;
873 	int slot;
874 
875 	key.objectid = 0;
876 	key.type = BTRFS_QGROUP_STATUS_KEY;
877 	key.offset = 0;
878 
879 	path = btrfs_alloc_path();
880 	if (!path)
881 		return -ENOMEM;
882 
883 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
884 	if (ret > 0)
885 		ret = -ENOENT;
886 
887 	if (ret)
888 		goto out;
889 
890 	l = path->nodes[0];
891 	slot = path->slots[0];
892 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
893 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
894 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
895 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
896 	btrfs_set_qgroup_status_rescan(l, ptr,
897 				fs_info->qgroup_rescan_progress.objectid);
898 
899 	btrfs_mark_buffer_dirty(l);
900 
901 out:
902 	btrfs_free_path(path);
903 	return ret;
904 }
905 
906 /*
907  * called with qgroup_lock held
908  */
909 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
910 				  struct btrfs_root *root)
911 {
912 	struct btrfs_path *path;
913 	struct btrfs_key key;
914 	struct extent_buffer *leaf = NULL;
915 	int ret;
916 	int nr = 0;
917 
918 	path = btrfs_alloc_path();
919 	if (!path)
920 		return -ENOMEM;
921 
922 	key.objectid = 0;
923 	key.offset = 0;
924 	key.type = 0;
925 
926 	while (1) {
927 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
928 		if (ret < 0)
929 			goto out;
930 		leaf = path->nodes[0];
931 		nr = btrfs_header_nritems(leaf);
932 		if (!nr)
933 			break;
934 		/*
935 		 * delete the leaf one by one
936 		 * since the whole tree is going
937 		 * to be deleted.
938 		 */
939 		path->slots[0] = 0;
940 		ret = btrfs_del_items(trans, root, path, 0, nr);
941 		if (ret)
942 			goto out;
943 
944 		btrfs_release_path(path);
945 	}
946 	ret = 0;
947 out:
948 	btrfs_free_path(path);
949 	return ret;
950 }
951 
952 int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
953 {
954 	struct btrfs_root *quota_root;
955 	struct btrfs_root *tree_root = fs_info->tree_root;
956 	struct btrfs_path *path = NULL;
957 	struct btrfs_qgroup_status_item *ptr;
958 	struct extent_buffer *leaf;
959 	struct btrfs_key key;
960 	struct btrfs_key found_key;
961 	struct btrfs_qgroup *qgroup = NULL;
962 	struct btrfs_trans_handle *trans = NULL;
963 	struct ulist *ulist = NULL;
964 	int ret = 0;
965 	int slot;
966 
967 	/*
968 	 * We need to have subvol_sem write locked, to prevent races between
969 	 * concurrent tasks trying to enable quotas, because we will unlock
970 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
971 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
972 	 */
973 	lockdep_assert_held_write(&fs_info->subvol_sem);
974 
975 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
976 		btrfs_err(fs_info,
977 			  "qgroups are currently unsupported in extent tree v2");
978 		return -EINVAL;
979 	}
980 
981 	mutex_lock(&fs_info->qgroup_ioctl_lock);
982 	if (fs_info->quota_root)
983 		goto out;
984 
985 	ulist = ulist_alloc(GFP_KERNEL);
986 	if (!ulist) {
987 		ret = -ENOMEM;
988 		goto out;
989 	}
990 
991 	ret = btrfs_sysfs_add_qgroups(fs_info);
992 	if (ret < 0)
993 		goto out;
994 
995 	/*
996 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
997 	 * avoid lock acquisition inversion problems (reported by lockdep) between
998 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
999 	 * start a transaction.
1000 	 * After we started the transaction lock qgroup_ioctl_lock again and
1001 	 * check if someone else created the quota root in the meanwhile. If so,
1002 	 * just return success and release the transaction handle.
1003 	 *
1004 	 * Also we don't need to worry about someone else calling
1005 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1006 	 * that function returns 0 (success) when the sysfs entries already exist.
1007 	 */
1008 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1009 
1010 	/*
1011 	 * 1 for quota root item
1012 	 * 1 for BTRFS_QGROUP_STATUS item
1013 	 *
1014 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1015 	 * per subvolume. However those are not currently reserved since it
1016 	 * would be a lot of overkill.
1017 	 */
1018 	trans = btrfs_start_transaction(tree_root, 2);
1019 
1020 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1021 	if (IS_ERR(trans)) {
1022 		ret = PTR_ERR(trans);
1023 		trans = NULL;
1024 		goto out;
1025 	}
1026 
1027 	if (fs_info->quota_root)
1028 		goto out;
1029 
1030 	fs_info->qgroup_ulist = ulist;
1031 	ulist = NULL;
1032 
1033 	/*
1034 	 * initially create the quota tree
1035 	 */
1036 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1037 	if (IS_ERR(quota_root)) {
1038 		ret =  PTR_ERR(quota_root);
1039 		btrfs_abort_transaction(trans, ret);
1040 		goto out;
1041 	}
1042 
1043 	path = btrfs_alloc_path();
1044 	if (!path) {
1045 		ret = -ENOMEM;
1046 		btrfs_abort_transaction(trans, ret);
1047 		goto out_free_root;
1048 	}
1049 
1050 	key.objectid = 0;
1051 	key.type = BTRFS_QGROUP_STATUS_KEY;
1052 	key.offset = 0;
1053 
1054 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1055 				      sizeof(*ptr));
1056 	if (ret) {
1057 		btrfs_abort_transaction(trans, ret);
1058 		goto out_free_path;
1059 	}
1060 
1061 	leaf = path->nodes[0];
1062 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1063 				 struct btrfs_qgroup_status_item);
1064 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1065 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1066 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
1067 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1068 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1069 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1070 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1071 
1072 	btrfs_mark_buffer_dirty(leaf);
1073 
1074 	key.objectid = 0;
1075 	key.type = BTRFS_ROOT_REF_KEY;
1076 	key.offset = 0;
1077 
1078 	btrfs_release_path(path);
1079 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1080 	if (ret > 0)
1081 		goto out_add_root;
1082 	if (ret < 0) {
1083 		btrfs_abort_transaction(trans, ret);
1084 		goto out_free_path;
1085 	}
1086 
1087 	while (1) {
1088 		slot = path->slots[0];
1089 		leaf = path->nodes[0];
1090 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1091 
1092 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1093 
1094 			/* Release locks on tree_root before we access quota_root */
1095 			btrfs_release_path(path);
1096 
1097 			ret = add_qgroup_item(trans, quota_root,
1098 					      found_key.offset);
1099 			if (ret) {
1100 				btrfs_abort_transaction(trans, ret);
1101 				goto out_free_path;
1102 			}
1103 
1104 			qgroup = add_qgroup_rb(fs_info, found_key.offset);
1105 			if (IS_ERR(qgroup)) {
1106 				ret = PTR_ERR(qgroup);
1107 				btrfs_abort_transaction(trans, ret);
1108 				goto out_free_path;
1109 			}
1110 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1111 			if (ret < 0) {
1112 				btrfs_abort_transaction(trans, ret);
1113 				goto out_free_path;
1114 			}
1115 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1116 							 path, 1, 0);
1117 			if (ret < 0) {
1118 				btrfs_abort_transaction(trans, ret);
1119 				goto out_free_path;
1120 			}
1121 			if (ret > 0) {
1122 				/*
1123 				 * Shouldn't happen, but in case it does we
1124 				 * don't need to do the btrfs_next_item, just
1125 				 * continue.
1126 				 */
1127 				continue;
1128 			}
1129 		}
1130 		ret = btrfs_next_item(tree_root, path);
1131 		if (ret < 0) {
1132 			btrfs_abort_transaction(trans, ret);
1133 			goto out_free_path;
1134 		}
1135 		if (ret)
1136 			break;
1137 	}
1138 
1139 out_add_root:
1140 	btrfs_release_path(path);
1141 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1142 	if (ret) {
1143 		btrfs_abort_transaction(trans, ret);
1144 		goto out_free_path;
1145 	}
1146 
1147 	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1148 	if (IS_ERR(qgroup)) {
1149 		ret = PTR_ERR(qgroup);
1150 		btrfs_abort_transaction(trans, ret);
1151 		goto out_free_path;
1152 	}
1153 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1154 	if (ret < 0) {
1155 		btrfs_abort_transaction(trans, ret);
1156 		goto out_free_path;
1157 	}
1158 
1159 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1160 	/*
1161 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1162 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1163 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1164 	 * because all qgroup operations first start or join a transaction and then
1165 	 * lock the qgroup_ioctl_lock mutex.
1166 	 * We are safe from a concurrent task trying to enable quotas, by calling
1167 	 * this function, since we are serialized by fs_info->subvol_sem.
1168 	 */
1169 	ret = btrfs_commit_transaction(trans);
1170 	trans = NULL;
1171 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1172 	if (ret)
1173 		goto out_free_path;
1174 
1175 	/*
1176 	 * Set quota enabled flag after committing the transaction, to avoid
1177 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1178 	 * creation.
1179 	 */
1180 	spin_lock(&fs_info->qgroup_lock);
1181 	fs_info->quota_root = quota_root;
1182 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1183 	spin_unlock(&fs_info->qgroup_lock);
1184 
1185 	ret = qgroup_rescan_init(fs_info, 0, 1);
1186 	if (!ret) {
1187 	        qgroup_rescan_zero_tracking(fs_info);
1188 		fs_info->qgroup_rescan_running = true;
1189 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1190 	                         &fs_info->qgroup_rescan_work);
1191 	} else {
1192 		/*
1193 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1194 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1195 		 * -EINPROGRESS. That can happen because someone started the
1196 		 * rescan worker by calling quota rescan ioctl before we
1197 		 * attempted to initialize the rescan worker. Failure due to
1198 		 * quotas disabled in the meanwhile is not possible, because
1199 		 * we are holding a write lock on fs_info->subvol_sem, which
1200 		 * is also acquired when disabling quotas.
1201 		 * Ignore such error, and any other error would need to undo
1202 		 * everything we did in the transaction we just committed.
1203 		 */
1204 		ASSERT(ret == -EINPROGRESS);
1205 		ret = 0;
1206 	}
1207 
1208 out_free_path:
1209 	btrfs_free_path(path);
1210 out_free_root:
1211 	if (ret)
1212 		btrfs_put_root(quota_root);
1213 out:
1214 	if (ret) {
1215 		ulist_free(fs_info->qgroup_ulist);
1216 		fs_info->qgroup_ulist = NULL;
1217 		btrfs_sysfs_del_qgroups(fs_info);
1218 	}
1219 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1220 	if (ret && trans)
1221 		btrfs_end_transaction(trans);
1222 	else if (trans)
1223 		ret = btrfs_end_transaction(trans);
1224 	ulist_free(ulist);
1225 	return ret;
1226 }
1227 
1228 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1229 {
1230 	struct btrfs_root *quota_root;
1231 	struct btrfs_trans_handle *trans = NULL;
1232 	int ret = 0;
1233 
1234 	/*
1235 	 * We need to have subvol_sem write locked, to prevent races between
1236 	 * concurrent tasks trying to disable quotas, because we will unlock
1237 	 * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
1238 	 */
1239 	lockdep_assert_held_write(&fs_info->subvol_sem);
1240 
1241 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1242 	if (!fs_info->quota_root)
1243 		goto out;
1244 
1245 	/*
1246 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1247 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1248 	 * to lock that mutex while holding a transaction handle and the rescan
1249 	 * worker needs to commit a transaction.
1250 	 */
1251 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1252 
1253 	/*
1254 	 * Request qgroup rescan worker to complete and wait for it. This wait
1255 	 * must be done before transaction start for quota disable since it may
1256 	 * deadlock with transaction by the qgroup rescan worker.
1257 	 */
1258 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1259 	btrfs_qgroup_wait_for_completion(fs_info, false);
1260 
1261 	/*
1262 	 * 1 For the root item
1263 	 *
1264 	 * We should also reserve enough items for the quota tree deletion in
1265 	 * btrfs_clean_quota_tree but this is not done.
1266 	 *
1267 	 * Also, we must always start a transaction without holding the mutex
1268 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1269 	 */
1270 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1271 
1272 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1273 	if (IS_ERR(trans)) {
1274 		ret = PTR_ERR(trans);
1275 		trans = NULL;
1276 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1277 		goto out;
1278 	}
1279 
1280 	if (!fs_info->quota_root)
1281 		goto out;
1282 
1283 	spin_lock(&fs_info->qgroup_lock);
1284 	quota_root = fs_info->quota_root;
1285 	fs_info->quota_root = NULL;
1286 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1287 	fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1288 	spin_unlock(&fs_info->qgroup_lock);
1289 
1290 	btrfs_free_qgroup_config(fs_info);
1291 
1292 	ret = btrfs_clean_quota_tree(trans, quota_root);
1293 	if (ret) {
1294 		btrfs_abort_transaction(trans, ret);
1295 		goto out;
1296 	}
1297 
1298 	ret = btrfs_del_root(trans, &quota_root->root_key);
1299 	if (ret) {
1300 		btrfs_abort_transaction(trans, ret);
1301 		goto out;
1302 	}
1303 
1304 	list_del(&quota_root->dirty_list);
1305 
1306 	btrfs_tree_lock(quota_root->node);
1307 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1308 	btrfs_tree_unlock(quota_root->node);
1309 	btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1310 			      quota_root->node, 0, 1);
1311 
1312 	btrfs_put_root(quota_root);
1313 
1314 out:
1315 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1316 	if (ret && trans)
1317 		btrfs_end_transaction(trans);
1318 	else if (trans)
1319 		ret = btrfs_end_transaction(trans);
1320 
1321 	return ret;
1322 }
1323 
1324 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1325 			 struct btrfs_qgroup *qgroup)
1326 {
1327 	if (list_empty(&qgroup->dirty))
1328 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1329 }
1330 
1331 /*
1332  * The easy accounting, we're updating qgroup relationship whose child qgroup
1333  * only has exclusive extents.
1334  *
1335  * In this case, all exclusive extents will also be exclusive for parent, so
1336  * excl/rfer just get added/removed.
1337  *
1338  * So is qgroup reservation space, which should also be added/removed to
1339  * parent.
1340  * Or when child tries to release reservation space, parent will underflow its
1341  * reservation (for relationship adding case).
1342  *
1343  * Caller should hold fs_info->qgroup_lock.
1344  */
1345 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1346 				    struct ulist *tmp, u64 ref_root,
1347 				    struct btrfs_qgroup *src, int sign)
1348 {
1349 	struct btrfs_qgroup *qgroup;
1350 	struct btrfs_qgroup_list *glist;
1351 	struct ulist_node *unode;
1352 	struct ulist_iterator uiter;
1353 	u64 num_bytes = src->excl;
1354 	int ret = 0;
1355 
1356 	qgroup = find_qgroup_rb(fs_info, ref_root);
1357 	if (!qgroup)
1358 		goto out;
1359 
1360 	qgroup->rfer += sign * num_bytes;
1361 	qgroup->rfer_cmpr += sign * num_bytes;
1362 
1363 	WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1364 	qgroup->excl += sign * num_bytes;
1365 	qgroup->excl_cmpr += sign * num_bytes;
1366 
1367 	if (sign > 0)
1368 		qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1369 	else
1370 		qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1371 
1372 	qgroup_dirty(fs_info, qgroup);
1373 
1374 	/* Get all of the parent groups that contain this qgroup */
1375 	list_for_each_entry(glist, &qgroup->groups, next_group) {
1376 		ret = ulist_add(tmp, glist->group->qgroupid,
1377 				qgroup_to_aux(glist->group), GFP_ATOMIC);
1378 		if (ret < 0)
1379 			goto out;
1380 	}
1381 
1382 	/* Iterate all of the parents and adjust their reference counts */
1383 	ULIST_ITER_INIT(&uiter);
1384 	while ((unode = ulist_next(tmp, &uiter))) {
1385 		qgroup = unode_aux_to_qgroup(unode);
1386 		qgroup->rfer += sign * num_bytes;
1387 		qgroup->rfer_cmpr += sign * num_bytes;
1388 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1389 		qgroup->excl += sign * num_bytes;
1390 		if (sign > 0)
1391 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1392 		else
1393 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1394 		qgroup->excl_cmpr += sign * num_bytes;
1395 		qgroup_dirty(fs_info, qgroup);
1396 
1397 		/* Add any parents of the parents */
1398 		list_for_each_entry(glist, &qgroup->groups, next_group) {
1399 			ret = ulist_add(tmp, glist->group->qgroupid,
1400 					qgroup_to_aux(glist->group), GFP_ATOMIC);
1401 			if (ret < 0)
1402 				goto out;
1403 		}
1404 	}
1405 	ret = 0;
1406 out:
1407 	return ret;
1408 }
1409 
1410 
1411 /*
1412  * Quick path for updating qgroup with only excl refs.
1413  *
1414  * In that case, just update all parent will be enough.
1415  * Or we needs to do a full rescan.
1416  * Caller should also hold fs_info->qgroup_lock.
1417  *
1418  * Return 0 for quick update, return >0 for need to full rescan
1419  * and mark INCONSISTENT flag.
1420  * Return < 0 for other error.
1421  */
1422 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1423 				   struct ulist *tmp, u64 src, u64 dst,
1424 				   int sign)
1425 {
1426 	struct btrfs_qgroup *qgroup;
1427 	int ret = 1;
1428 	int err = 0;
1429 
1430 	qgroup = find_qgroup_rb(fs_info, src);
1431 	if (!qgroup)
1432 		goto out;
1433 	if (qgroup->excl == qgroup->rfer) {
1434 		ret = 0;
1435 		err = __qgroup_excl_accounting(fs_info, tmp, dst,
1436 					       qgroup, sign);
1437 		if (err < 0) {
1438 			ret = err;
1439 			goto out;
1440 		}
1441 	}
1442 out:
1443 	if (ret)
1444 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1445 	return ret;
1446 }
1447 
1448 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1449 			      u64 dst)
1450 {
1451 	struct btrfs_fs_info *fs_info = trans->fs_info;
1452 	struct btrfs_qgroup *parent;
1453 	struct btrfs_qgroup *member;
1454 	struct btrfs_qgroup_list *list;
1455 	struct ulist *tmp;
1456 	unsigned int nofs_flag;
1457 	int ret = 0;
1458 
1459 	/* Check the level of src and dst first */
1460 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1461 		return -EINVAL;
1462 
1463 	/* We hold a transaction handle open, must do a NOFS allocation. */
1464 	nofs_flag = memalloc_nofs_save();
1465 	tmp = ulist_alloc(GFP_KERNEL);
1466 	memalloc_nofs_restore(nofs_flag);
1467 	if (!tmp)
1468 		return -ENOMEM;
1469 
1470 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1471 	if (!fs_info->quota_root) {
1472 		ret = -ENOTCONN;
1473 		goto out;
1474 	}
1475 	member = find_qgroup_rb(fs_info, src);
1476 	parent = find_qgroup_rb(fs_info, dst);
1477 	if (!member || !parent) {
1478 		ret = -EINVAL;
1479 		goto out;
1480 	}
1481 
1482 	/* check if such qgroup relation exist firstly */
1483 	list_for_each_entry(list, &member->groups, next_group) {
1484 		if (list->group == parent) {
1485 			ret = -EEXIST;
1486 			goto out;
1487 		}
1488 	}
1489 
1490 	ret = add_qgroup_relation_item(trans, src, dst);
1491 	if (ret)
1492 		goto out;
1493 
1494 	ret = add_qgroup_relation_item(trans, dst, src);
1495 	if (ret) {
1496 		del_qgroup_relation_item(trans, src, dst);
1497 		goto out;
1498 	}
1499 
1500 	spin_lock(&fs_info->qgroup_lock);
1501 	ret = __add_relation_rb(member, parent);
1502 	if (ret < 0) {
1503 		spin_unlock(&fs_info->qgroup_lock);
1504 		goto out;
1505 	}
1506 	ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1507 	spin_unlock(&fs_info->qgroup_lock);
1508 out:
1509 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1510 	ulist_free(tmp);
1511 	return ret;
1512 }
1513 
1514 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1515 				 u64 dst)
1516 {
1517 	struct btrfs_fs_info *fs_info = trans->fs_info;
1518 	struct btrfs_qgroup *parent;
1519 	struct btrfs_qgroup *member;
1520 	struct btrfs_qgroup_list *list;
1521 	struct ulist *tmp;
1522 	bool found = false;
1523 	unsigned int nofs_flag;
1524 	int ret = 0;
1525 	int ret2;
1526 
1527 	/* We hold a transaction handle open, must do a NOFS allocation. */
1528 	nofs_flag = memalloc_nofs_save();
1529 	tmp = ulist_alloc(GFP_KERNEL);
1530 	memalloc_nofs_restore(nofs_flag);
1531 	if (!tmp)
1532 		return -ENOMEM;
1533 
1534 	if (!fs_info->quota_root) {
1535 		ret = -ENOTCONN;
1536 		goto out;
1537 	}
1538 
1539 	member = find_qgroup_rb(fs_info, src);
1540 	parent = find_qgroup_rb(fs_info, dst);
1541 	/*
1542 	 * The parent/member pair doesn't exist, then try to delete the dead
1543 	 * relation items only.
1544 	 */
1545 	if (!member || !parent)
1546 		goto delete_item;
1547 
1548 	/* check if such qgroup relation exist firstly */
1549 	list_for_each_entry(list, &member->groups, next_group) {
1550 		if (list->group == parent) {
1551 			found = true;
1552 			break;
1553 		}
1554 	}
1555 
1556 delete_item:
1557 	ret = del_qgroup_relation_item(trans, src, dst);
1558 	if (ret < 0 && ret != -ENOENT)
1559 		goto out;
1560 	ret2 = del_qgroup_relation_item(trans, dst, src);
1561 	if (ret2 < 0 && ret2 != -ENOENT)
1562 		goto out;
1563 
1564 	/* At least one deletion succeeded, return 0 */
1565 	if (!ret || !ret2)
1566 		ret = 0;
1567 
1568 	if (found) {
1569 		spin_lock(&fs_info->qgroup_lock);
1570 		del_relation_rb(fs_info, src, dst);
1571 		ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1572 		spin_unlock(&fs_info->qgroup_lock);
1573 	}
1574 out:
1575 	ulist_free(tmp);
1576 	return ret;
1577 }
1578 
1579 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1580 			      u64 dst)
1581 {
1582 	struct btrfs_fs_info *fs_info = trans->fs_info;
1583 	int ret = 0;
1584 
1585 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1586 	ret = __del_qgroup_relation(trans, src, dst);
1587 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1588 
1589 	return ret;
1590 }
1591 
1592 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1593 {
1594 	struct btrfs_fs_info *fs_info = trans->fs_info;
1595 	struct btrfs_root *quota_root;
1596 	struct btrfs_qgroup *qgroup;
1597 	int ret = 0;
1598 
1599 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1600 	if (!fs_info->quota_root) {
1601 		ret = -ENOTCONN;
1602 		goto out;
1603 	}
1604 	quota_root = fs_info->quota_root;
1605 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1606 	if (qgroup) {
1607 		ret = -EEXIST;
1608 		goto out;
1609 	}
1610 
1611 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1612 	if (ret)
1613 		goto out;
1614 
1615 	spin_lock(&fs_info->qgroup_lock);
1616 	qgroup = add_qgroup_rb(fs_info, qgroupid);
1617 	spin_unlock(&fs_info->qgroup_lock);
1618 
1619 	if (IS_ERR(qgroup)) {
1620 		ret = PTR_ERR(qgroup);
1621 		goto out;
1622 	}
1623 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1624 out:
1625 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1626 	return ret;
1627 }
1628 
1629 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1630 {
1631 	struct btrfs_fs_info *fs_info = trans->fs_info;
1632 	struct btrfs_qgroup *qgroup;
1633 	struct btrfs_qgroup_list *list;
1634 	int ret = 0;
1635 
1636 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1637 	if (!fs_info->quota_root) {
1638 		ret = -ENOTCONN;
1639 		goto out;
1640 	}
1641 
1642 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1643 	if (!qgroup) {
1644 		ret = -ENOENT;
1645 		goto out;
1646 	}
1647 
1648 	/* Check if there are no children of this qgroup */
1649 	if (!list_empty(&qgroup->members)) {
1650 		ret = -EBUSY;
1651 		goto out;
1652 	}
1653 
1654 	ret = del_qgroup_item(trans, qgroupid);
1655 	if (ret && ret != -ENOENT)
1656 		goto out;
1657 
1658 	while (!list_empty(&qgroup->groups)) {
1659 		list = list_first_entry(&qgroup->groups,
1660 					struct btrfs_qgroup_list, next_group);
1661 		ret = __del_qgroup_relation(trans, qgroupid,
1662 					    list->group->qgroupid);
1663 		if (ret)
1664 			goto out;
1665 	}
1666 
1667 	spin_lock(&fs_info->qgroup_lock);
1668 	del_qgroup_rb(fs_info, qgroupid);
1669 	spin_unlock(&fs_info->qgroup_lock);
1670 
1671 	/*
1672 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1673 	 * spinlock, since the sysfs_remove_group() function needs to take
1674 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1675 	 */
1676 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1677 	kfree(qgroup);
1678 out:
1679 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1680 	return ret;
1681 }
1682 
1683 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1684 		       struct btrfs_qgroup_limit *limit)
1685 {
1686 	struct btrfs_fs_info *fs_info = trans->fs_info;
1687 	struct btrfs_qgroup *qgroup;
1688 	int ret = 0;
1689 	/* Sometimes we would want to clear the limit on this qgroup.
1690 	 * To meet this requirement, we treat the -1 as a special value
1691 	 * which tell kernel to clear the limit on this qgroup.
1692 	 */
1693 	const u64 CLEAR_VALUE = -1;
1694 
1695 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1696 	if (!fs_info->quota_root) {
1697 		ret = -ENOTCONN;
1698 		goto out;
1699 	}
1700 
1701 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1702 	if (!qgroup) {
1703 		ret = -ENOENT;
1704 		goto out;
1705 	}
1706 
1707 	spin_lock(&fs_info->qgroup_lock);
1708 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1709 		if (limit->max_rfer == CLEAR_VALUE) {
1710 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1711 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1712 			qgroup->max_rfer = 0;
1713 		} else {
1714 			qgroup->max_rfer = limit->max_rfer;
1715 		}
1716 	}
1717 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1718 		if (limit->max_excl == CLEAR_VALUE) {
1719 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1720 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1721 			qgroup->max_excl = 0;
1722 		} else {
1723 			qgroup->max_excl = limit->max_excl;
1724 		}
1725 	}
1726 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1727 		if (limit->rsv_rfer == CLEAR_VALUE) {
1728 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1729 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1730 			qgroup->rsv_rfer = 0;
1731 		} else {
1732 			qgroup->rsv_rfer = limit->rsv_rfer;
1733 		}
1734 	}
1735 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1736 		if (limit->rsv_excl == CLEAR_VALUE) {
1737 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1738 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1739 			qgroup->rsv_excl = 0;
1740 		} else {
1741 			qgroup->rsv_excl = limit->rsv_excl;
1742 		}
1743 	}
1744 	qgroup->lim_flags |= limit->flags;
1745 
1746 	spin_unlock(&fs_info->qgroup_lock);
1747 
1748 	ret = update_qgroup_limit_item(trans, qgroup);
1749 	if (ret) {
1750 		qgroup_mark_inconsistent(fs_info);
1751 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1752 		       qgroupid);
1753 	}
1754 
1755 out:
1756 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1757 	return ret;
1758 }
1759 
1760 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1761 				struct btrfs_delayed_ref_root *delayed_refs,
1762 				struct btrfs_qgroup_extent_record *record)
1763 {
1764 	struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1765 	struct rb_node *parent_node = NULL;
1766 	struct btrfs_qgroup_extent_record *entry;
1767 	u64 bytenr = record->bytenr;
1768 
1769 	lockdep_assert_held(&delayed_refs->lock);
1770 	trace_btrfs_qgroup_trace_extent(fs_info, record);
1771 
1772 	while (*p) {
1773 		parent_node = *p;
1774 		entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1775 				 node);
1776 		if (bytenr < entry->bytenr) {
1777 			p = &(*p)->rb_left;
1778 		} else if (bytenr > entry->bytenr) {
1779 			p = &(*p)->rb_right;
1780 		} else {
1781 			if (record->data_rsv && !entry->data_rsv) {
1782 				entry->data_rsv = record->data_rsv;
1783 				entry->data_rsv_refroot =
1784 					record->data_rsv_refroot;
1785 			}
1786 			return 1;
1787 		}
1788 	}
1789 
1790 	rb_link_node(&record->node, parent_node, p);
1791 	rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1792 	return 0;
1793 }
1794 
1795 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
1796 				   struct btrfs_qgroup_extent_record *qrecord)
1797 {
1798 	struct btrfs_backref_walk_ctx ctx = { 0 };
1799 	int ret;
1800 
1801 	/*
1802 	 * We are always called in a context where we are already holding a
1803 	 * transaction handle. Often we are called when adding a data delayed
1804 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
1805 	 * in which case we will be holding a write lock on extent buffer from a
1806 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
1807 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
1808 	 * that must be acquired before locking any extent buffers.
1809 	 *
1810 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
1811 	 * but we can't pass it a non-NULL transaction handle, because otherwise
1812 	 * it would not use commit roots and would lock extent buffers, causing
1813 	 * a deadlock if it ends up trying to read lock the same extent buffer
1814 	 * that was previously write locked at btrfs_truncate_inode_items().
1815 	 *
1816 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
1817 	 * explicitly tell it to not acquire the commit_root_sem - if we are
1818 	 * holding a transaction handle we don't need its protection.
1819 	 */
1820 	ASSERT(trans != NULL);
1821 
1822 	if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
1823 		return 0;
1824 
1825 	ctx.bytenr = qrecord->bytenr;
1826 	ctx.fs_info = trans->fs_info;
1827 
1828 	ret = btrfs_find_all_roots(&ctx, true);
1829 	if (ret < 0) {
1830 		qgroup_mark_inconsistent(trans->fs_info);
1831 		btrfs_warn(trans->fs_info,
1832 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
1833 			ret);
1834 		return 0;
1835 	}
1836 
1837 	/*
1838 	 * Here we don't need to get the lock of
1839 	 * trans->transaction->delayed_refs, since inserted qrecord won't
1840 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
1841 	 *
1842 	 * So modifying qrecord->old_roots is safe here
1843 	 */
1844 	qrecord->old_roots = ctx.roots;
1845 	return 0;
1846 }
1847 
1848 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1849 			      u64 num_bytes)
1850 {
1851 	struct btrfs_fs_info *fs_info = trans->fs_info;
1852 	struct btrfs_qgroup_extent_record *record;
1853 	struct btrfs_delayed_ref_root *delayed_refs;
1854 	int ret;
1855 
1856 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1857 	    || bytenr == 0 || num_bytes == 0)
1858 		return 0;
1859 	record = kzalloc(sizeof(*record), GFP_NOFS);
1860 	if (!record)
1861 		return -ENOMEM;
1862 
1863 	delayed_refs = &trans->transaction->delayed_refs;
1864 	record->bytenr = bytenr;
1865 	record->num_bytes = num_bytes;
1866 	record->old_roots = NULL;
1867 
1868 	spin_lock(&delayed_refs->lock);
1869 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1870 	spin_unlock(&delayed_refs->lock);
1871 	if (ret > 0) {
1872 		kfree(record);
1873 		return 0;
1874 	}
1875 	return btrfs_qgroup_trace_extent_post(trans, record);
1876 }
1877 
1878 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1879 				  struct extent_buffer *eb)
1880 {
1881 	struct btrfs_fs_info *fs_info = trans->fs_info;
1882 	int nr = btrfs_header_nritems(eb);
1883 	int i, extent_type, ret;
1884 	struct btrfs_key key;
1885 	struct btrfs_file_extent_item *fi;
1886 	u64 bytenr, num_bytes;
1887 
1888 	/* We can be called directly from walk_up_proc() */
1889 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1890 		return 0;
1891 
1892 	for (i = 0; i < nr; i++) {
1893 		btrfs_item_key_to_cpu(eb, &key, i);
1894 
1895 		if (key.type != BTRFS_EXTENT_DATA_KEY)
1896 			continue;
1897 
1898 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1899 		/* filter out non qgroup-accountable extents  */
1900 		extent_type = btrfs_file_extent_type(eb, fi);
1901 
1902 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1903 			continue;
1904 
1905 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1906 		if (!bytenr)
1907 			continue;
1908 
1909 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1910 
1911 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
1912 		if (ret)
1913 			return ret;
1914 	}
1915 	cond_resched();
1916 	return 0;
1917 }
1918 
1919 /*
1920  * Walk up the tree from the bottom, freeing leaves and any interior
1921  * nodes which have had all slots visited. If a node (leaf or
1922  * interior) is freed, the node above it will have it's slot
1923  * incremented. The root node will never be freed.
1924  *
1925  * At the end of this function, we should have a path which has all
1926  * slots incremented to the next position for a search. If we need to
1927  * read a new node it will be NULL and the node above it will have the
1928  * correct slot selected for a later read.
1929  *
1930  * If we increment the root nodes slot counter past the number of
1931  * elements, 1 is returned to signal completion of the search.
1932  */
1933 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1934 {
1935 	int level = 0;
1936 	int nr, slot;
1937 	struct extent_buffer *eb;
1938 
1939 	if (root_level == 0)
1940 		return 1;
1941 
1942 	while (level <= root_level) {
1943 		eb = path->nodes[level];
1944 		nr = btrfs_header_nritems(eb);
1945 		path->slots[level]++;
1946 		slot = path->slots[level];
1947 		if (slot >= nr || level == 0) {
1948 			/*
1949 			 * Don't free the root -  we will detect this
1950 			 * condition after our loop and return a
1951 			 * positive value for caller to stop walking the tree.
1952 			 */
1953 			if (level != root_level) {
1954 				btrfs_tree_unlock_rw(eb, path->locks[level]);
1955 				path->locks[level] = 0;
1956 
1957 				free_extent_buffer(eb);
1958 				path->nodes[level] = NULL;
1959 				path->slots[level] = 0;
1960 			}
1961 		} else {
1962 			/*
1963 			 * We have a valid slot to walk back down
1964 			 * from. Stop here so caller can process these
1965 			 * new nodes.
1966 			 */
1967 			break;
1968 		}
1969 
1970 		level++;
1971 	}
1972 
1973 	eb = path->nodes[root_level];
1974 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
1975 		return 1;
1976 
1977 	return 0;
1978 }
1979 
1980 /*
1981  * Helper function to trace a subtree tree block swap.
1982  *
1983  * The swap will happen in highest tree block, but there may be a lot of
1984  * tree blocks involved.
1985  *
1986  * For example:
1987  *  OO = Old tree blocks
1988  *  NN = New tree blocks allocated during balance
1989  *
1990  *           File tree (257)                  Reloc tree for 257
1991  * L2              OO                                NN
1992  *               /    \                            /    \
1993  * L1          OO      OO (a)                    OO      NN (a)
1994  *            / \     / \                       / \     / \
1995  * L0       OO   OO OO   OO                   OO   OO NN   NN
1996  *                  (b)  (c)                          (b)  (c)
1997  *
1998  * When calling qgroup_trace_extent_swap(), we will pass:
1999  * @src_eb = OO(a)
2000  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2001  * @dst_level = 0
2002  * @root_level = 1
2003  *
2004  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2005  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2006  *
2007  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2008  *
2009  * 1) Tree search from @src_eb
2010  *    It should acts as a simplified btrfs_search_slot().
2011  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2012  *    (first key).
2013  *
2014  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2015  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2016  *    They should be marked during previous (@dst_level = 1) iteration.
2017  *
2018  * 3) Mark file extents in leaves dirty
2019  *    We don't have good way to pick out new file extents only.
2020  *    So we still follow the old method by scanning all file extents in
2021  *    the leave.
2022  *
2023  * This function can free us from keeping two paths, thus later we only need
2024  * to care about how to iterate all new tree blocks in reloc tree.
2025  */
2026 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2027 				    struct extent_buffer *src_eb,
2028 				    struct btrfs_path *dst_path,
2029 				    int dst_level, int root_level,
2030 				    bool trace_leaf)
2031 {
2032 	struct btrfs_key key;
2033 	struct btrfs_path *src_path;
2034 	struct btrfs_fs_info *fs_info = trans->fs_info;
2035 	u32 nodesize = fs_info->nodesize;
2036 	int cur_level = root_level;
2037 	int ret;
2038 
2039 	BUG_ON(dst_level > root_level);
2040 	/* Level mismatch */
2041 	if (btrfs_header_level(src_eb) != root_level)
2042 		return -EINVAL;
2043 
2044 	src_path = btrfs_alloc_path();
2045 	if (!src_path) {
2046 		ret = -ENOMEM;
2047 		goto out;
2048 	}
2049 
2050 	if (dst_level)
2051 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2052 	else
2053 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2054 
2055 	/* For src_path */
2056 	atomic_inc(&src_eb->refs);
2057 	src_path->nodes[root_level] = src_eb;
2058 	src_path->slots[root_level] = dst_path->slots[root_level];
2059 	src_path->locks[root_level] = 0;
2060 
2061 	/* A simplified version of btrfs_search_slot() */
2062 	while (cur_level >= dst_level) {
2063 		struct btrfs_key src_key;
2064 		struct btrfs_key dst_key;
2065 
2066 		if (src_path->nodes[cur_level] == NULL) {
2067 			struct extent_buffer *eb;
2068 			int parent_slot;
2069 
2070 			eb = src_path->nodes[cur_level + 1];
2071 			parent_slot = src_path->slots[cur_level + 1];
2072 
2073 			eb = btrfs_read_node_slot(eb, parent_slot);
2074 			if (IS_ERR(eb)) {
2075 				ret = PTR_ERR(eb);
2076 				goto out;
2077 			}
2078 
2079 			src_path->nodes[cur_level] = eb;
2080 
2081 			btrfs_tree_read_lock(eb);
2082 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2083 		}
2084 
2085 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2086 		if (cur_level) {
2087 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2088 					&dst_key, dst_path->slots[cur_level]);
2089 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2090 					&src_key, src_path->slots[cur_level]);
2091 		} else {
2092 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2093 					&dst_key, dst_path->slots[cur_level]);
2094 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2095 					&src_key, src_path->slots[cur_level]);
2096 		}
2097 		/* Content mismatch, something went wrong */
2098 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2099 			ret = -ENOENT;
2100 			goto out;
2101 		}
2102 		cur_level--;
2103 	}
2104 
2105 	/*
2106 	 * Now both @dst_path and @src_path have been populated, record the tree
2107 	 * blocks for qgroup accounting.
2108 	 */
2109 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2110 					nodesize);
2111 	if (ret < 0)
2112 		goto out;
2113 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2114 					nodesize);
2115 	if (ret < 0)
2116 		goto out;
2117 
2118 	/* Record leaf file extents */
2119 	if (dst_level == 0 && trace_leaf) {
2120 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2121 		if (ret < 0)
2122 			goto out;
2123 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2124 	}
2125 out:
2126 	btrfs_free_path(src_path);
2127 	return ret;
2128 }
2129 
2130 /*
2131  * Helper function to do recursive generation-aware depth-first search, to
2132  * locate all new tree blocks in a subtree of reloc tree.
2133  *
2134  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2135  *         reloc tree
2136  * L2         NN (a)
2137  *          /    \
2138  * L1    OO        NN (b)
2139  *      /  \      /  \
2140  * L0  OO  OO    OO  NN
2141  *               (c) (d)
2142  * If we pass:
2143  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2144  * @cur_level = 1
2145  * @root_level = 1
2146  *
2147  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2148  * above tree blocks along with their counter parts in file tree.
2149  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2150  * won't affect OO(c).
2151  */
2152 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2153 					   struct extent_buffer *src_eb,
2154 					   struct btrfs_path *dst_path,
2155 					   int cur_level, int root_level,
2156 					   u64 last_snapshot, bool trace_leaf)
2157 {
2158 	struct btrfs_fs_info *fs_info = trans->fs_info;
2159 	struct extent_buffer *eb;
2160 	bool need_cleanup = false;
2161 	int ret = 0;
2162 	int i;
2163 
2164 	/* Level sanity check */
2165 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2166 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2167 	    root_level < cur_level) {
2168 		btrfs_err_rl(fs_info,
2169 			"%s: bad levels, cur_level=%d root_level=%d",
2170 			__func__, cur_level, root_level);
2171 		return -EUCLEAN;
2172 	}
2173 
2174 	/* Read the tree block if needed */
2175 	if (dst_path->nodes[cur_level] == NULL) {
2176 		int parent_slot;
2177 		u64 child_gen;
2178 
2179 		/*
2180 		 * dst_path->nodes[root_level] must be initialized before
2181 		 * calling this function.
2182 		 */
2183 		if (cur_level == root_level) {
2184 			btrfs_err_rl(fs_info,
2185 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2186 				__func__, root_level, root_level, cur_level);
2187 			return -EUCLEAN;
2188 		}
2189 
2190 		/*
2191 		 * We need to get child blockptr/gen from parent before we can
2192 		 * read it.
2193 		  */
2194 		eb = dst_path->nodes[cur_level + 1];
2195 		parent_slot = dst_path->slots[cur_level + 1];
2196 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2197 
2198 		/* This node is old, no need to trace */
2199 		if (child_gen < last_snapshot)
2200 			goto out;
2201 
2202 		eb = btrfs_read_node_slot(eb, parent_slot);
2203 		if (IS_ERR(eb)) {
2204 			ret = PTR_ERR(eb);
2205 			goto out;
2206 		}
2207 
2208 		dst_path->nodes[cur_level] = eb;
2209 		dst_path->slots[cur_level] = 0;
2210 
2211 		btrfs_tree_read_lock(eb);
2212 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2213 		need_cleanup = true;
2214 	}
2215 
2216 	/* Now record this tree block and its counter part for qgroups */
2217 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2218 				       root_level, trace_leaf);
2219 	if (ret < 0)
2220 		goto cleanup;
2221 
2222 	eb = dst_path->nodes[cur_level];
2223 
2224 	if (cur_level > 0) {
2225 		/* Iterate all child tree blocks */
2226 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2227 			/* Skip old tree blocks as they won't be swapped */
2228 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2229 				continue;
2230 			dst_path->slots[cur_level] = i;
2231 
2232 			/* Recursive call (at most 7 times) */
2233 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2234 					dst_path, cur_level - 1, root_level,
2235 					last_snapshot, trace_leaf);
2236 			if (ret < 0)
2237 				goto cleanup;
2238 		}
2239 	}
2240 
2241 cleanup:
2242 	if (need_cleanup) {
2243 		/* Clean up */
2244 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2245 				     dst_path->locks[cur_level]);
2246 		free_extent_buffer(dst_path->nodes[cur_level]);
2247 		dst_path->nodes[cur_level] = NULL;
2248 		dst_path->slots[cur_level] = 0;
2249 		dst_path->locks[cur_level] = 0;
2250 	}
2251 out:
2252 	return ret;
2253 }
2254 
2255 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2256 				struct extent_buffer *src_eb,
2257 				struct extent_buffer *dst_eb,
2258 				u64 last_snapshot, bool trace_leaf)
2259 {
2260 	struct btrfs_fs_info *fs_info = trans->fs_info;
2261 	struct btrfs_path *dst_path = NULL;
2262 	int level;
2263 	int ret;
2264 
2265 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2266 		return 0;
2267 
2268 	/* Wrong parameter order */
2269 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2270 		btrfs_err_rl(fs_info,
2271 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2272 			     btrfs_header_generation(src_eb),
2273 			     btrfs_header_generation(dst_eb));
2274 		return -EUCLEAN;
2275 	}
2276 
2277 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2278 		ret = -EIO;
2279 		goto out;
2280 	}
2281 
2282 	level = btrfs_header_level(dst_eb);
2283 	dst_path = btrfs_alloc_path();
2284 	if (!dst_path) {
2285 		ret = -ENOMEM;
2286 		goto out;
2287 	}
2288 	/* For dst_path */
2289 	atomic_inc(&dst_eb->refs);
2290 	dst_path->nodes[level] = dst_eb;
2291 	dst_path->slots[level] = 0;
2292 	dst_path->locks[level] = 0;
2293 
2294 	/* Do the generation aware breadth-first search */
2295 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2296 					      level, last_snapshot, trace_leaf);
2297 	if (ret < 0)
2298 		goto out;
2299 	ret = 0;
2300 
2301 out:
2302 	btrfs_free_path(dst_path);
2303 	if (ret < 0)
2304 		qgroup_mark_inconsistent(fs_info);
2305 	return ret;
2306 }
2307 
2308 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2309 			       struct extent_buffer *root_eb,
2310 			       u64 root_gen, int root_level)
2311 {
2312 	struct btrfs_fs_info *fs_info = trans->fs_info;
2313 	int ret = 0;
2314 	int level;
2315 	u8 drop_subptree_thres;
2316 	struct extent_buffer *eb = root_eb;
2317 	struct btrfs_path *path = NULL;
2318 
2319 	BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
2320 	BUG_ON(root_eb == NULL);
2321 
2322 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2323 		return 0;
2324 
2325 	spin_lock(&fs_info->qgroup_lock);
2326 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2327 	spin_unlock(&fs_info->qgroup_lock);
2328 
2329 	/*
2330 	 * This function only gets called for snapshot drop, if we hit a high
2331 	 * node here, it means we are going to change ownership for quite a lot
2332 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2333 	 *
2334 	 * So here if we find a high tree here, we just skip the accounting and
2335 	 * mark qgroup inconsistent.
2336 	 */
2337 	if (root_level >= drop_subptree_thres) {
2338 		qgroup_mark_inconsistent(fs_info);
2339 		return 0;
2340 	}
2341 
2342 	if (!extent_buffer_uptodate(root_eb)) {
2343 		struct btrfs_tree_parent_check check = {
2344 			.has_first_key = false,
2345 			.transid = root_gen,
2346 			.level = root_level
2347 		};
2348 
2349 		ret = btrfs_read_extent_buffer(root_eb, &check);
2350 		if (ret)
2351 			goto out;
2352 	}
2353 
2354 	if (root_level == 0) {
2355 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2356 		goto out;
2357 	}
2358 
2359 	path = btrfs_alloc_path();
2360 	if (!path)
2361 		return -ENOMEM;
2362 
2363 	/*
2364 	 * Walk down the tree.  Missing extent blocks are filled in as
2365 	 * we go. Metadata is accounted every time we read a new
2366 	 * extent block.
2367 	 *
2368 	 * When we reach a leaf, we account for file extent items in it,
2369 	 * walk back up the tree (adjusting slot pointers as we go)
2370 	 * and restart the search process.
2371 	 */
2372 	atomic_inc(&root_eb->refs);	/* For path */
2373 	path->nodes[root_level] = root_eb;
2374 	path->slots[root_level] = 0;
2375 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2376 walk_down:
2377 	level = root_level;
2378 	while (level >= 0) {
2379 		if (path->nodes[level] == NULL) {
2380 			int parent_slot;
2381 			u64 child_bytenr;
2382 
2383 			/*
2384 			 * We need to get child blockptr from parent before we
2385 			 * can read it.
2386 			  */
2387 			eb = path->nodes[level + 1];
2388 			parent_slot = path->slots[level + 1];
2389 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2390 
2391 			eb = btrfs_read_node_slot(eb, parent_slot);
2392 			if (IS_ERR(eb)) {
2393 				ret = PTR_ERR(eb);
2394 				goto out;
2395 			}
2396 
2397 			path->nodes[level] = eb;
2398 			path->slots[level] = 0;
2399 
2400 			btrfs_tree_read_lock(eb);
2401 			path->locks[level] = BTRFS_READ_LOCK;
2402 
2403 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2404 							fs_info->nodesize);
2405 			if (ret)
2406 				goto out;
2407 		}
2408 
2409 		if (level == 0) {
2410 			ret = btrfs_qgroup_trace_leaf_items(trans,
2411 							    path->nodes[level]);
2412 			if (ret)
2413 				goto out;
2414 
2415 			/* Nonzero return here means we completed our search */
2416 			ret = adjust_slots_upwards(path, root_level);
2417 			if (ret)
2418 				break;
2419 
2420 			/* Restart search with new slots */
2421 			goto walk_down;
2422 		}
2423 
2424 		level--;
2425 	}
2426 
2427 	ret = 0;
2428 out:
2429 	btrfs_free_path(path);
2430 
2431 	return ret;
2432 }
2433 
2434 #define UPDATE_NEW	0
2435 #define UPDATE_OLD	1
2436 /*
2437  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2438  */
2439 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2440 				struct ulist *roots, struct ulist *tmp,
2441 				struct ulist *qgroups, u64 seq, int update_old)
2442 {
2443 	struct ulist_node *unode;
2444 	struct ulist_iterator uiter;
2445 	struct ulist_node *tmp_unode;
2446 	struct ulist_iterator tmp_uiter;
2447 	struct btrfs_qgroup *qg;
2448 	int ret = 0;
2449 
2450 	if (!roots)
2451 		return 0;
2452 	ULIST_ITER_INIT(&uiter);
2453 	while ((unode = ulist_next(roots, &uiter))) {
2454 		qg = find_qgroup_rb(fs_info, unode->val);
2455 		if (!qg)
2456 			continue;
2457 
2458 		ulist_reinit(tmp);
2459 		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
2460 				GFP_ATOMIC);
2461 		if (ret < 0)
2462 			return ret;
2463 		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
2464 		if (ret < 0)
2465 			return ret;
2466 		ULIST_ITER_INIT(&tmp_uiter);
2467 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2468 			struct btrfs_qgroup_list *glist;
2469 
2470 			qg = unode_aux_to_qgroup(tmp_unode);
2471 			if (update_old)
2472 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2473 			else
2474 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2475 			list_for_each_entry(glist, &qg->groups, next_group) {
2476 				ret = ulist_add(qgroups, glist->group->qgroupid,
2477 						qgroup_to_aux(glist->group),
2478 						GFP_ATOMIC);
2479 				if (ret < 0)
2480 					return ret;
2481 				ret = ulist_add(tmp, glist->group->qgroupid,
2482 						qgroup_to_aux(glist->group),
2483 						GFP_ATOMIC);
2484 				if (ret < 0)
2485 					return ret;
2486 			}
2487 		}
2488 	}
2489 	return 0;
2490 }
2491 
2492 /*
2493  * Update qgroup rfer/excl counters.
2494  * Rfer update is easy, codes can explain themselves.
2495  *
2496  * Excl update is tricky, the update is split into 2 parts.
2497  * Part 1: Possible exclusive <-> sharing detect:
2498  *	|	A	|	!A	|
2499  *  -------------------------------------
2500  *  B	|	*	|	-	|
2501  *  -------------------------------------
2502  *  !B	|	+	|	**	|
2503  *  -------------------------------------
2504  *
2505  * Conditions:
2506  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2507  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2508  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2509  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2510  *
2511  * Results:
2512  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2513  * *: Definitely not changed.		**: Possible unchanged.
2514  *
2515  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2516  *
2517  * To make the logic clear, we first use condition A and B to split
2518  * combination into 4 results.
2519  *
2520  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2521  * only on variant maybe 0.
2522  *
2523  * Lastly, check result **, since there are 2 variants maybe 0, split them
2524  * again(2x2).
2525  * But this time we don't need to consider other things, the codes and logic
2526  * is easy to understand now.
2527  */
2528 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2529 				  struct ulist *qgroups,
2530 				  u64 nr_old_roots,
2531 				  u64 nr_new_roots,
2532 				  u64 num_bytes, u64 seq)
2533 {
2534 	struct ulist_node *unode;
2535 	struct ulist_iterator uiter;
2536 	struct btrfs_qgroup *qg;
2537 	u64 cur_new_count, cur_old_count;
2538 
2539 	ULIST_ITER_INIT(&uiter);
2540 	while ((unode = ulist_next(qgroups, &uiter))) {
2541 		bool dirty = false;
2542 
2543 		qg = unode_aux_to_qgroup(unode);
2544 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2545 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2546 
2547 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2548 					     cur_new_count);
2549 
2550 		/* Rfer update part */
2551 		if (cur_old_count == 0 && cur_new_count > 0) {
2552 			qg->rfer += num_bytes;
2553 			qg->rfer_cmpr += num_bytes;
2554 			dirty = true;
2555 		}
2556 		if (cur_old_count > 0 && cur_new_count == 0) {
2557 			qg->rfer -= num_bytes;
2558 			qg->rfer_cmpr -= num_bytes;
2559 			dirty = true;
2560 		}
2561 
2562 		/* Excl update part */
2563 		/* Exclusive/none -> shared case */
2564 		if (cur_old_count == nr_old_roots &&
2565 		    cur_new_count < nr_new_roots) {
2566 			/* Exclusive -> shared */
2567 			if (cur_old_count != 0) {
2568 				qg->excl -= num_bytes;
2569 				qg->excl_cmpr -= num_bytes;
2570 				dirty = true;
2571 			}
2572 		}
2573 
2574 		/* Shared -> exclusive/none case */
2575 		if (cur_old_count < nr_old_roots &&
2576 		    cur_new_count == nr_new_roots) {
2577 			/* Shared->exclusive */
2578 			if (cur_new_count != 0) {
2579 				qg->excl += num_bytes;
2580 				qg->excl_cmpr += num_bytes;
2581 				dirty = true;
2582 			}
2583 		}
2584 
2585 		/* Exclusive/none -> exclusive/none case */
2586 		if (cur_old_count == nr_old_roots &&
2587 		    cur_new_count == nr_new_roots) {
2588 			if (cur_old_count == 0) {
2589 				/* None -> exclusive/none */
2590 
2591 				if (cur_new_count != 0) {
2592 					/* None -> exclusive */
2593 					qg->excl += num_bytes;
2594 					qg->excl_cmpr += num_bytes;
2595 					dirty = true;
2596 				}
2597 				/* None -> none, nothing changed */
2598 			} else {
2599 				/* Exclusive -> exclusive/none */
2600 
2601 				if (cur_new_count == 0) {
2602 					/* Exclusive -> none */
2603 					qg->excl -= num_bytes;
2604 					qg->excl_cmpr -= num_bytes;
2605 					dirty = true;
2606 				}
2607 				/* Exclusive -> exclusive, nothing changed */
2608 			}
2609 		}
2610 
2611 		if (dirty)
2612 			qgroup_dirty(fs_info, qg);
2613 	}
2614 	return 0;
2615 }
2616 
2617 /*
2618  * Check if the @roots potentially is a list of fs tree roots
2619  *
2620  * Return 0 for definitely not a fs/subvol tree roots ulist
2621  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2622  *          one as well)
2623  */
2624 static int maybe_fs_roots(struct ulist *roots)
2625 {
2626 	struct ulist_node *unode;
2627 	struct ulist_iterator uiter;
2628 
2629 	/* Empty one, still possible for fs roots */
2630 	if (!roots || roots->nnodes == 0)
2631 		return 1;
2632 
2633 	ULIST_ITER_INIT(&uiter);
2634 	unode = ulist_next(roots, &uiter);
2635 	if (!unode)
2636 		return 1;
2637 
2638 	/*
2639 	 * If it contains fs tree roots, then it must belong to fs/subvol
2640 	 * trees.
2641 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2642 	 */
2643 	return is_fstree(unode->val);
2644 }
2645 
2646 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2647 				u64 num_bytes, struct ulist *old_roots,
2648 				struct ulist *new_roots)
2649 {
2650 	struct btrfs_fs_info *fs_info = trans->fs_info;
2651 	struct ulist *qgroups = NULL;
2652 	struct ulist *tmp = NULL;
2653 	u64 seq;
2654 	u64 nr_new_roots = 0;
2655 	u64 nr_old_roots = 0;
2656 	int ret = 0;
2657 
2658 	/*
2659 	 * If quotas get disabled meanwhile, the resources need to be freed and
2660 	 * we can't just exit here.
2661 	 */
2662 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2663 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2664 		goto out_free;
2665 
2666 	if (new_roots) {
2667 		if (!maybe_fs_roots(new_roots))
2668 			goto out_free;
2669 		nr_new_roots = new_roots->nnodes;
2670 	}
2671 	if (old_roots) {
2672 		if (!maybe_fs_roots(old_roots))
2673 			goto out_free;
2674 		nr_old_roots = old_roots->nnodes;
2675 	}
2676 
2677 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2678 	if (nr_old_roots == 0 && nr_new_roots == 0)
2679 		goto out_free;
2680 
2681 	BUG_ON(!fs_info->quota_root);
2682 
2683 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2684 					num_bytes, nr_old_roots, nr_new_roots);
2685 
2686 	qgroups = ulist_alloc(GFP_NOFS);
2687 	if (!qgroups) {
2688 		ret = -ENOMEM;
2689 		goto out_free;
2690 	}
2691 	tmp = ulist_alloc(GFP_NOFS);
2692 	if (!tmp) {
2693 		ret = -ENOMEM;
2694 		goto out_free;
2695 	}
2696 
2697 	mutex_lock(&fs_info->qgroup_rescan_lock);
2698 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2699 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2700 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2701 			ret = 0;
2702 			goto out_free;
2703 		}
2704 	}
2705 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2706 
2707 	spin_lock(&fs_info->qgroup_lock);
2708 	seq = fs_info->qgroup_seq;
2709 
2710 	/* Update old refcnts using old_roots */
2711 	ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2712 				   UPDATE_OLD);
2713 	if (ret < 0)
2714 		goto out;
2715 
2716 	/* Update new refcnts using new_roots */
2717 	ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2718 				   UPDATE_NEW);
2719 	if (ret < 0)
2720 		goto out;
2721 
2722 	qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2723 			       num_bytes, seq);
2724 
2725 	/*
2726 	 * Bump qgroup_seq to avoid seq overlap
2727 	 */
2728 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2729 out:
2730 	spin_unlock(&fs_info->qgroup_lock);
2731 out_free:
2732 	ulist_free(tmp);
2733 	ulist_free(qgroups);
2734 	ulist_free(old_roots);
2735 	ulist_free(new_roots);
2736 	return ret;
2737 }
2738 
2739 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2740 {
2741 	struct btrfs_fs_info *fs_info = trans->fs_info;
2742 	struct btrfs_qgroup_extent_record *record;
2743 	struct btrfs_delayed_ref_root *delayed_refs;
2744 	struct ulist *new_roots = NULL;
2745 	struct rb_node *node;
2746 	u64 num_dirty_extents = 0;
2747 	u64 qgroup_to_skip;
2748 	int ret = 0;
2749 
2750 	delayed_refs = &trans->transaction->delayed_refs;
2751 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
2752 	while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2753 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
2754 				  node);
2755 
2756 		num_dirty_extents++;
2757 		trace_btrfs_qgroup_account_extents(fs_info, record);
2758 
2759 		if (!ret && !(fs_info->qgroup_flags &
2760 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
2761 			struct btrfs_backref_walk_ctx ctx = { 0 };
2762 
2763 			ctx.bytenr = record->bytenr;
2764 			ctx.fs_info = fs_info;
2765 
2766 			/*
2767 			 * Old roots should be searched when inserting qgroup
2768 			 * extent record.
2769 			 *
2770 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
2771 			 * we may have some record inserted during
2772 			 * NO_ACCOUNTING (thus no old_roots populated), but
2773 			 * later we start rescan, which clears NO_ACCOUNTING,
2774 			 * leaving some inserted records without old_roots
2775 			 * populated.
2776 			 *
2777 			 * Those cases are rare and should not cause too much
2778 			 * time spent during commit_transaction().
2779 			 */
2780 			if (!record->old_roots) {
2781 				/* Search commit root to find old_roots */
2782 				ret = btrfs_find_all_roots(&ctx, false);
2783 				if (ret < 0)
2784 					goto cleanup;
2785 				record->old_roots = ctx.roots;
2786 				ctx.roots = NULL;
2787 			}
2788 
2789 			/* Free the reserved data space */
2790 			btrfs_qgroup_free_refroot(fs_info,
2791 					record->data_rsv_refroot,
2792 					record->data_rsv,
2793 					BTRFS_QGROUP_RSV_DATA);
2794 			/*
2795 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
2796 			 * which doesn't lock tree or delayed_refs and search
2797 			 * current root. It's safe inside commit_transaction().
2798 			 */
2799 			ctx.trans = trans;
2800 			ctx.time_seq = BTRFS_SEQ_LAST;
2801 			ret = btrfs_find_all_roots(&ctx, false);
2802 			if (ret < 0)
2803 				goto cleanup;
2804 			new_roots = ctx.roots;
2805 			if (qgroup_to_skip) {
2806 				ulist_del(new_roots, qgroup_to_skip, 0);
2807 				ulist_del(record->old_roots, qgroup_to_skip,
2808 					  0);
2809 			}
2810 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2811 							  record->num_bytes,
2812 							  record->old_roots,
2813 							  new_roots);
2814 			record->old_roots = NULL;
2815 			new_roots = NULL;
2816 		}
2817 cleanup:
2818 		ulist_free(record->old_roots);
2819 		ulist_free(new_roots);
2820 		new_roots = NULL;
2821 		rb_erase(node, &delayed_refs->dirty_extent_root);
2822 		kfree(record);
2823 
2824 	}
2825 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2826 				       num_dirty_extents);
2827 	return ret;
2828 }
2829 
2830 /*
2831  * called from commit_transaction. Writes all changed qgroups to disk.
2832  */
2833 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
2834 {
2835 	struct btrfs_fs_info *fs_info = trans->fs_info;
2836 	int ret = 0;
2837 
2838 	if (!fs_info->quota_root)
2839 		return ret;
2840 
2841 	spin_lock(&fs_info->qgroup_lock);
2842 	while (!list_empty(&fs_info->dirty_qgroups)) {
2843 		struct btrfs_qgroup *qgroup;
2844 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
2845 					  struct btrfs_qgroup, dirty);
2846 		list_del_init(&qgroup->dirty);
2847 		spin_unlock(&fs_info->qgroup_lock);
2848 		ret = update_qgroup_info_item(trans, qgroup);
2849 		if (ret)
2850 			qgroup_mark_inconsistent(fs_info);
2851 		ret = update_qgroup_limit_item(trans, qgroup);
2852 		if (ret)
2853 			qgroup_mark_inconsistent(fs_info);
2854 		spin_lock(&fs_info->qgroup_lock);
2855 	}
2856 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2857 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2858 	else
2859 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2860 	spin_unlock(&fs_info->qgroup_lock);
2861 
2862 	ret = update_qgroup_status_item(trans);
2863 	if (ret)
2864 		qgroup_mark_inconsistent(fs_info);
2865 
2866 	return ret;
2867 }
2868 
2869 /*
2870  * Copy the accounting information between qgroups. This is necessary
2871  * when a snapshot or a subvolume is created. Throwing an error will
2872  * cause a transaction abort so we take extra care here to only error
2873  * when a readonly fs is a reasonable outcome.
2874  */
2875 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2876 			 u64 objectid, struct btrfs_qgroup_inherit *inherit)
2877 {
2878 	int ret = 0;
2879 	int i;
2880 	u64 *i_qgroups;
2881 	bool committing = false;
2882 	struct btrfs_fs_info *fs_info = trans->fs_info;
2883 	struct btrfs_root *quota_root;
2884 	struct btrfs_qgroup *srcgroup;
2885 	struct btrfs_qgroup *dstgroup;
2886 	bool need_rescan = false;
2887 	u32 level_size = 0;
2888 	u64 nums;
2889 
2890 	/*
2891 	 * There are only two callers of this function.
2892 	 *
2893 	 * One in create_subvol() in the ioctl context, which needs to hold
2894 	 * the qgroup_ioctl_lock.
2895 	 *
2896 	 * The other one in create_pending_snapshot() where no other qgroup
2897 	 * code can modify the fs as they all need to either start a new trans
2898 	 * or hold a trans handler, thus we don't need to hold
2899 	 * qgroup_ioctl_lock.
2900 	 * This would avoid long and complex lock chain and make lockdep happy.
2901 	 */
2902 	spin_lock(&fs_info->trans_lock);
2903 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2904 		committing = true;
2905 	spin_unlock(&fs_info->trans_lock);
2906 
2907 	if (!committing)
2908 		mutex_lock(&fs_info->qgroup_ioctl_lock);
2909 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2910 		goto out;
2911 
2912 	quota_root = fs_info->quota_root;
2913 	if (!quota_root) {
2914 		ret = -EINVAL;
2915 		goto out;
2916 	}
2917 
2918 	if (inherit) {
2919 		i_qgroups = (u64 *)(inherit + 1);
2920 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2921 		       2 * inherit->num_excl_copies;
2922 		for (i = 0; i < nums; ++i) {
2923 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2924 
2925 			/*
2926 			 * Zero out invalid groups so we can ignore
2927 			 * them later.
2928 			 */
2929 			if (!srcgroup ||
2930 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2931 				*i_qgroups = 0ULL;
2932 
2933 			++i_qgroups;
2934 		}
2935 	}
2936 
2937 	/*
2938 	 * create a tracking group for the subvol itself
2939 	 */
2940 	ret = add_qgroup_item(trans, quota_root, objectid);
2941 	if (ret)
2942 		goto out;
2943 
2944 	/*
2945 	 * add qgroup to all inherited groups
2946 	 */
2947 	if (inherit) {
2948 		i_qgroups = (u64 *)(inherit + 1);
2949 		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2950 			if (*i_qgroups == 0)
2951 				continue;
2952 			ret = add_qgroup_relation_item(trans, objectid,
2953 						       *i_qgroups);
2954 			if (ret && ret != -EEXIST)
2955 				goto out;
2956 			ret = add_qgroup_relation_item(trans, *i_qgroups,
2957 						       objectid);
2958 			if (ret && ret != -EEXIST)
2959 				goto out;
2960 		}
2961 		ret = 0;
2962 	}
2963 
2964 
2965 	spin_lock(&fs_info->qgroup_lock);
2966 
2967 	dstgroup = add_qgroup_rb(fs_info, objectid);
2968 	if (IS_ERR(dstgroup)) {
2969 		ret = PTR_ERR(dstgroup);
2970 		goto unlock;
2971 	}
2972 
2973 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2974 		dstgroup->lim_flags = inherit->lim.flags;
2975 		dstgroup->max_rfer = inherit->lim.max_rfer;
2976 		dstgroup->max_excl = inherit->lim.max_excl;
2977 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2978 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
2979 
2980 		qgroup_dirty(fs_info, dstgroup);
2981 	}
2982 
2983 	if (srcid) {
2984 		srcgroup = find_qgroup_rb(fs_info, srcid);
2985 		if (!srcgroup)
2986 			goto unlock;
2987 
2988 		/*
2989 		 * We call inherit after we clone the root in order to make sure
2990 		 * our counts don't go crazy, so at this point the only
2991 		 * difference between the two roots should be the root node.
2992 		 */
2993 		level_size = fs_info->nodesize;
2994 		dstgroup->rfer = srcgroup->rfer;
2995 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2996 		dstgroup->excl = level_size;
2997 		dstgroup->excl_cmpr = level_size;
2998 		srcgroup->excl = level_size;
2999 		srcgroup->excl_cmpr = level_size;
3000 
3001 		/* inherit the limit info */
3002 		dstgroup->lim_flags = srcgroup->lim_flags;
3003 		dstgroup->max_rfer = srcgroup->max_rfer;
3004 		dstgroup->max_excl = srcgroup->max_excl;
3005 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3006 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3007 
3008 		qgroup_dirty(fs_info, dstgroup);
3009 		qgroup_dirty(fs_info, srcgroup);
3010 	}
3011 
3012 	if (!inherit)
3013 		goto unlock;
3014 
3015 	i_qgroups = (u64 *)(inherit + 1);
3016 	for (i = 0; i < inherit->num_qgroups; ++i) {
3017 		if (*i_qgroups) {
3018 			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
3019 			if (ret)
3020 				goto unlock;
3021 		}
3022 		++i_qgroups;
3023 
3024 		/*
3025 		 * If we're doing a snapshot, and adding the snapshot to a new
3026 		 * qgroup, the numbers are guaranteed to be incorrect.
3027 		 */
3028 		if (srcid)
3029 			need_rescan = true;
3030 	}
3031 
3032 	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
3033 		struct btrfs_qgroup *src;
3034 		struct btrfs_qgroup *dst;
3035 
3036 		if (!i_qgroups[0] || !i_qgroups[1])
3037 			continue;
3038 
3039 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3040 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3041 
3042 		if (!src || !dst) {
3043 			ret = -EINVAL;
3044 			goto unlock;
3045 		}
3046 
3047 		dst->rfer = src->rfer - level_size;
3048 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3049 
3050 		/* Manually tweaking numbers certainly needs a rescan */
3051 		need_rescan = true;
3052 	}
3053 	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
3054 		struct btrfs_qgroup *src;
3055 		struct btrfs_qgroup *dst;
3056 
3057 		if (!i_qgroups[0] || !i_qgroups[1])
3058 			continue;
3059 
3060 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3061 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3062 
3063 		if (!src || !dst) {
3064 			ret = -EINVAL;
3065 			goto unlock;
3066 		}
3067 
3068 		dst->excl = src->excl + level_size;
3069 		dst->excl_cmpr = src->excl_cmpr + level_size;
3070 		need_rescan = true;
3071 	}
3072 
3073 unlock:
3074 	spin_unlock(&fs_info->qgroup_lock);
3075 	if (!ret)
3076 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3077 out:
3078 	if (!committing)
3079 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3080 	if (need_rescan)
3081 		qgroup_mark_inconsistent(fs_info);
3082 	return ret;
3083 }
3084 
3085 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3086 {
3087 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3088 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3089 		return false;
3090 
3091 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3092 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3093 		return false;
3094 
3095 	return true;
3096 }
3097 
3098 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3099 			  enum btrfs_qgroup_rsv_type type)
3100 {
3101 	struct btrfs_qgroup *qgroup;
3102 	struct btrfs_fs_info *fs_info = root->fs_info;
3103 	u64 ref_root = root->root_key.objectid;
3104 	int ret = 0;
3105 	struct ulist_node *unode;
3106 	struct ulist_iterator uiter;
3107 
3108 	if (!is_fstree(ref_root))
3109 		return 0;
3110 
3111 	if (num_bytes == 0)
3112 		return 0;
3113 
3114 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3115 	    capable(CAP_SYS_RESOURCE))
3116 		enforce = false;
3117 
3118 	spin_lock(&fs_info->qgroup_lock);
3119 	if (!fs_info->quota_root)
3120 		goto out;
3121 
3122 	qgroup = find_qgroup_rb(fs_info, ref_root);
3123 	if (!qgroup)
3124 		goto out;
3125 
3126 	/*
3127 	 * in a first step, we check all affected qgroups if any limits would
3128 	 * be exceeded
3129 	 */
3130 	ulist_reinit(fs_info->qgroup_ulist);
3131 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3132 			qgroup_to_aux(qgroup), GFP_ATOMIC);
3133 	if (ret < 0)
3134 		goto out;
3135 	ULIST_ITER_INIT(&uiter);
3136 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3137 		struct btrfs_qgroup *qg;
3138 		struct btrfs_qgroup_list *glist;
3139 
3140 		qg = unode_aux_to_qgroup(unode);
3141 
3142 		if (enforce && !qgroup_check_limits(qg, num_bytes)) {
3143 			ret = -EDQUOT;
3144 			goto out;
3145 		}
3146 
3147 		list_for_each_entry(glist, &qg->groups, next_group) {
3148 			ret = ulist_add(fs_info->qgroup_ulist,
3149 					glist->group->qgroupid,
3150 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3151 			if (ret < 0)
3152 				goto out;
3153 		}
3154 	}
3155 	ret = 0;
3156 	/*
3157 	 * no limits exceeded, now record the reservation into all qgroups
3158 	 */
3159 	ULIST_ITER_INIT(&uiter);
3160 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3161 		struct btrfs_qgroup *qg;
3162 
3163 		qg = unode_aux_to_qgroup(unode);
3164 
3165 		qgroup_rsv_add(fs_info, qg, num_bytes, type);
3166 	}
3167 
3168 out:
3169 	spin_unlock(&fs_info->qgroup_lock);
3170 	return ret;
3171 }
3172 
3173 /*
3174  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3175  * qgroup).
3176  *
3177  * Will handle all higher level qgroup too.
3178  *
3179  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3180  * This special case is only used for META_PERTRANS type.
3181  */
3182 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3183 			       u64 ref_root, u64 num_bytes,
3184 			       enum btrfs_qgroup_rsv_type type)
3185 {
3186 	struct btrfs_qgroup *qgroup;
3187 	struct ulist_node *unode;
3188 	struct ulist_iterator uiter;
3189 	int ret = 0;
3190 
3191 	if (!is_fstree(ref_root))
3192 		return;
3193 
3194 	if (num_bytes == 0)
3195 		return;
3196 
3197 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3198 		WARN(1, "%s: Invalid type to free", __func__);
3199 		return;
3200 	}
3201 	spin_lock(&fs_info->qgroup_lock);
3202 
3203 	if (!fs_info->quota_root)
3204 		goto out;
3205 
3206 	qgroup = find_qgroup_rb(fs_info, ref_root);
3207 	if (!qgroup)
3208 		goto out;
3209 
3210 	if (num_bytes == (u64)-1)
3211 		/*
3212 		 * We're freeing all pertrans rsv, get reserved value from
3213 		 * level 0 qgroup as real num_bytes to free.
3214 		 */
3215 		num_bytes = qgroup->rsv.values[type];
3216 
3217 	ulist_reinit(fs_info->qgroup_ulist);
3218 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3219 			qgroup_to_aux(qgroup), GFP_ATOMIC);
3220 	if (ret < 0)
3221 		goto out;
3222 	ULIST_ITER_INIT(&uiter);
3223 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3224 		struct btrfs_qgroup *qg;
3225 		struct btrfs_qgroup_list *glist;
3226 
3227 		qg = unode_aux_to_qgroup(unode);
3228 
3229 		qgroup_rsv_release(fs_info, qg, num_bytes, type);
3230 
3231 		list_for_each_entry(glist, &qg->groups, next_group) {
3232 			ret = ulist_add(fs_info->qgroup_ulist,
3233 					glist->group->qgroupid,
3234 					qgroup_to_aux(glist->group), GFP_ATOMIC);
3235 			if (ret < 0)
3236 				goto out;
3237 		}
3238 	}
3239 
3240 out:
3241 	spin_unlock(&fs_info->qgroup_lock);
3242 }
3243 
3244 /*
3245  * Check if the leaf is the last leaf. Which means all node pointers
3246  * are at their last position.
3247  */
3248 static bool is_last_leaf(struct btrfs_path *path)
3249 {
3250 	int i;
3251 
3252 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3253 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3254 			return false;
3255 	}
3256 	return true;
3257 }
3258 
3259 /*
3260  * returns < 0 on error, 0 when more leafs are to be scanned.
3261  * returns 1 when done.
3262  */
3263 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3264 			      struct btrfs_path *path)
3265 {
3266 	struct btrfs_fs_info *fs_info = trans->fs_info;
3267 	struct btrfs_root *extent_root;
3268 	struct btrfs_key found;
3269 	struct extent_buffer *scratch_leaf = NULL;
3270 	u64 num_bytes;
3271 	bool done;
3272 	int slot;
3273 	int ret;
3274 
3275 	mutex_lock(&fs_info->qgroup_rescan_lock);
3276 	extent_root = btrfs_extent_root(fs_info,
3277 				fs_info->qgroup_rescan_progress.objectid);
3278 	ret = btrfs_search_slot_for_read(extent_root,
3279 					 &fs_info->qgroup_rescan_progress,
3280 					 path, 1, 0);
3281 
3282 	btrfs_debug(fs_info,
3283 		"current progress key (%llu %u %llu), search_slot ret %d",
3284 		fs_info->qgroup_rescan_progress.objectid,
3285 		fs_info->qgroup_rescan_progress.type,
3286 		fs_info->qgroup_rescan_progress.offset, ret);
3287 
3288 	if (ret) {
3289 		/*
3290 		 * The rescan is about to end, we will not be scanning any
3291 		 * further blocks. We cannot unset the RESCAN flag here, because
3292 		 * we want to commit the transaction if everything went well.
3293 		 * To make the live accounting work in this phase, we set our
3294 		 * scan progress pointer such that every real extent objectid
3295 		 * will be smaller.
3296 		 */
3297 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3298 		btrfs_release_path(path);
3299 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3300 		return ret;
3301 	}
3302 	done = is_last_leaf(path);
3303 
3304 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3305 			      btrfs_header_nritems(path->nodes[0]) - 1);
3306 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3307 
3308 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3309 	if (!scratch_leaf) {
3310 		ret = -ENOMEM;
3311 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3312 		goto out;
3313 	}
3314 	slot = path->slots[0];
3315 	btrfs_release_path(path);
3316 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3317 
3318 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3319 		struct btrfs_backref_walk_ctx ctx = { 0 };
3320 
3321 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3322 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3323 		    found.type != BTRFS_METADATA_ITEM_KEY)
3324 			continue;
3325 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3326 			num_bytes = fs_info->nodesize;
3327 		else
3328 			num_bytes = found.offset;
3329 
3330 		ctx.bytenr = found.objectid;
3331 		ctx.fs_info = fs_info;
3332 
3333 		ret = btrfs_find_all_roots(&ctx, false);
3334 		if (ret < 0)
3335 			goto out;
3336 		/* For rescan, just pass old_roots as NULL */
3337 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3338 						  num_bytes, NULL, ctx.roots);
3339 		if (ret < 0)
3340 			goto out;
3341 	}
3342 out:
3343 	if (scratch_leaf)
3344 		free_extent_buffer(scratch_leaf);
3345 
3346 	if (done && !ret) {
3347 		ret = 1;
3348 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3349 	}
3350 	return ret;
3351 }
3352 
3353 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3354 {
3355 	return btrfs_fs_closing(fs_info) ||
3356 		test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
3357 		!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
3358 			  fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3359 }
3360 
3361 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3362 {
3363 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3364 						     qgroup_rescan_work);
3365 	struct btrfs_path *path;
3366 	struct btrfs_trans_handle *trans = NULL;
3367 	int err = -ENOMEM;
3368 	int ret = 0;
3369 	bool stopped = false;
3370 	bool did_leaf_rescans = false;
3371 
3372 	path = btrfs_alloc_path();
3373 	if (!path)
3374 		goto out;
3375 	/*
3376 	 * Rescan should only search for commit root, and any later difference
3377 	 * should be recorded by qgroup
3378 	 */
3379 	path->search_commit_root = 1;
3380 	path->skip_locking = 1;
3381 
3382 	err = 0;
3383 	while (!err && !(stopped = rescan_should_stop(fs_info))) {
3384 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3385 		if (IS_ERR(trans)) {
3386 			err = PTR_ERR(trans);
3387 			break;
3388 		}
3389 
3390 		err = qgroup_rescan_leaf(trans, path);
3391 		did_leaf_rescans = true;
3392 
3393 		if (err > 0)
3394 			btrfs_commit_transaction(trans);
3395 		else
3396 			btrfs_end_transaction(trans);
3397 	}
3398 
3399 out:
3400 	btrfs_free_path(path);
3401 
3402 	mutex_lock(&fs_info->qgroup_rescan_lock);
3403 	if (err > 0 &&
3404 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3405 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3406 	} else if (err < 0 || stopped) {
3407 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3408 	}
3409 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3410 
3411 	/*
3412 	 * Only update status, since the previous part has already updated the
3413 	 * qgroup info, and only if we did any actual work. This also prevents
3414 	 * race with a concurrent quota disable, which has already set
3415 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3416 	 * btrfs_quota_disable().
3417 	 */
3418 	if (did_leaf_rescans) {
3419 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3420 		if (IS_ERR(trans)) {
3421 			err = PTR_ERR(trans);
3422 			trans = NULL;
3423 			btrfs_err(fs_info,
3424 				  "fail to start transaction for status update: %d",
3425 				  err);
3426 		}
3427 	} else {
3428 		trans = NULL;
3429 	}
3430 
3431 	mutex_lock(&fs_info->qgroup_rescan_lock);
3432 	if (!stopped ||
3433 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3434 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3435 	if (trans) {
3436 		ret = update_qgroup_status_item(trans);
3437 		if (ret < 0) {
3438 			err = ret;
3439 			btrfs_err(fs_info, "fail to update qgroup status: %d",
3440 				  err);
3441 		}
3442 	}
3443 	fs_info->qgroup_rescan_running = false;
3444 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3445 	complete_all(&fs_info->qgroup_rescan_completion);
3446 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3447 
3448 	if (!trans)
3449 		return;
3450 
3451 	btrfs_end_transaction(trans);
3452 
3453 	if (stopped) {
3454 		btrfs_info(fs_info, "qgroup scan paused");
3455 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3456 		btrfs_info(fs_info, "qgroup scan cancelled");
3457 	} else if (err >= 0) {
3458 		btrfs_info(fs_info, "qgroup scan completed%s",
3459 			err > 0 ? " (inconsistency flag cleared)" : "");
3460 	} else {
3461 		btrfs_err(fs_info, "qgroup scan failed with %d", err);
3462 	}
3463 }
3464 
3465 /*
3466  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3467  * memory required for the rescan context.
3468  */
3469 static int
3470 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3471 		   int init_flags)
3472 {
3473 	int ret = 0;
3474 
3475 	if (!init_flags) {
3476 		/* we're resuming qgroup rescan at mount time */
3477 		if (!(fs_info->qgroup_flags &
3478 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3479 			btrfs_warn(fs_info,
3480 			"qgroup rescan init failed, qgroup rescan is not queued");
3481 			ret = -EINVAL;
3482 		} else if (!(fs_info->qgroup_flags &
3483 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3484 			btrfs_warn(fs_info,
3485 			"qgroup rescan init failed, qgroup is not enabled");
3486 			ret = -EINVAL;
3487 		}
3488 
3489 		if (ret)
3490 			return ret;
3491 	}
3492 
3493 	mutex_lock(&fs_info->qgroup_rescan_lock);
3494 
3495 	if (init_flags) {
3496 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3497 			btrfs_warn(fs_info,
3498 				   "qgroup rescan is already in progress");
3499 			ret = -EINPROGRESS;
3500 		} else if (!(fs_info->qgroup_flags &
3501 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3502 			btrfs_warn(fs_info,
3503 			"qgroup rescan init failed, qgroup is not enabled");
3504 			ret = -EINVAL;
3505 		} else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3506 			/* Quota disable is in progress */
3507 			ret = -EBUSY;
3508 		}
3509 
3510 		if (ret) {
3511 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3512 			return ret;
3513 		}
3514 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3515 	}
3516 
3517 	memset(&fs_info->qgroup_rescan_progress, 0,
3518 		sizeof(fs_info->qgroup_rescan_progress));
3519 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3520 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3521 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3522 	init_completion(&fs_info->qgroup_rescan_completion);
3523 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3524 
3525 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3526 			btrfs_qgroup_rescan_worker, NULL, NULL);
3527 	return 0;
3528 }
3529 
3530 static void
3531 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3532 {
3533 	struct rb_node *n;
3534 	struct btrfs_qgroup *qgroup;
3535 
3536 	spin_lock(&fs_info->qgroup_lock);
3537 	/* clear all current qgroup tracking information */
3538 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3539 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3540 		qgroup->rfer = 0;
3541 		qgroup->rfer_cmpr = 0;
3542 		qgroup->excl = 0;
3543 		qgroup->excl_cmpr = 0;
3544 		qgroup_dirty(fs_info, qgroup);
3545 	}
3546 	spin_unlock(&fs_info->qgroup_lock);
3547 }
3548 
3549 int
3550 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3551 {
3552 	int ret = 0;
3553 	struct btrfs_trans_handle *trans;
3554 
3555 	ret = qgroup_rescan_init(fs_info, 0, 1);
3556 	if (ret)
3557 		return ret;
3558 
3559 	/*
3560 	 * We have set the rescan_progress to 0, which means no more
3561 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3562 	 * However, btrfs_qgroup_account_ref may be right after its call
3563 	 * to btrfs_find_all_roots, in which case it would still do the
3564 	 * accounting.
3565 	 * To solve this, we're committing the transaction, which will
3566 	 * ensure we run all delayed refs and only after that, we are
3567 	 * going to clear all tracking information for a clean start.
3568 	 */
3569 
3570 	trans = btrfs_join_transaction(fs_info->fs_root);
3571 	if (IS_ERR(trans)) {
3572 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3573 		return PTR_ERR(trans);
3574 	}
3575 	ret = btrfs_commit_transaction(trans);
3576 	if (ret) {
3577 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3578 		return ret;
3579 	}
3580 
3581 	qgroup_rescan_zero_tracking(fs_info);
3582 
3583 	mutex_lock(&fs_info->qgroup_rescan_lock);
3584 	fs_info->qgroup_rescan_running = true;
3585 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
3586 			 &fs_info->qgroup_rescan_work);
3587 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3588 
3589 	return 0;
3590 }
3591 
3592 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3593 				     bool interruptible)
3594 {
3595 	int running;
3596 	int ret = 0;
3597 
3598 	mutex_lock(&fs_info->qgroup_rescan_lock);
3599 	running = fs_info->qgroup_rescan_running;
3600 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3601 
3602 	if (!running)
3603 		return 0;
3604 
3605 	if (interruptible)
3606 		ret = wait_for_completion_interruptible(
3607 					&fs_info->qgroup_rescan_completion);
3608 	else
3609 		wait_for_completion(&fs_info->qgroup_rescan_completion);
3610 
3611 	return ret;
3612 }
3613 
3614 /*
3615  * this is only called from open_ctree where we're still single threaded, thus
3616  * locking is omitted here.
3617  */
3618 void
3619 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3620 {
3621 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3622 		mutex_lock(&fs_info->qgroup_rescan_lock);
3623 		fs_info->qgroup_rescan_running = true;
3624 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
3625 				 &fs_info->qgroup_rescan_work);
3626 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3627 	}
3628 }
3629 
3630 #define rbtree_iterate_from_safe(node, next, start)				\
3631        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3632 
3633 static int qgroup_unreserve_range(struct btrfs_inode *inode,
3634 				  struct extent_changeset *reserved, u64 start,
3635 				  u64 len)
3636 {
3637 	struct rb_node *node;
3638 	struct rb_node *next;
3639 	struct ulist_node *entry;
3640 	int ret = 0;
3641 
3642 	node = reserved->range_changed.root.rb_node;
3643 	if (!node)
3644 		return 0;
3645 	while (node) {
3646 		entry = rb_entry(node, struct ulist_node, rb_node);
3647 		if (entry->val < start)
3648 			node = node->rb_right;
3649 		else
3650 			node = node->rb_left;
3651 	}
3652 
3653 	if (entry->val > start && rb_prev(&entry->rb_node))
3654 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
3655 				 rb_node);
3656 
3657 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
3658 		u64 entry_start;
3659 		u64 entry_end;
3660 		u64 entry_len;
3661 		int clear_ret;
3662 
3663 		entry = rb_entry(node, struct ulist_node, rb_node);
3664 		entry_start = entry->val;
3665 		entry_end = entry->aux;
3666 		entry_len = entry_end - entry_start + 1;
3667 
3668 		if (entry_start >= start + len)
3669 			break;
3670 		if (entry_start + entry_len <= start)
3671 			continue;
3672 		/*
3673 		 * Now the entry is in [start, start + len), revert the
3674 		 * EXTENT_QGROUP_RESERVED bit.
3675 		 */
3676 		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
3677 					      entry_end, EXTENT_QGROUP_RESERVED);
3678 		if (!ret && clear_ret < 0)
3679 			ret = clear_ret;
3680 
3681 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
3682 		if (likely(reserved->bytes_changed >= entry_len)) {
3683 			reserved->bytes_changed -= entry_len;
3684 		} else {
3685 			WARN_ON(1);
3686 			reserved->bytes_changed = 0;
3687 		}
3688 	}
3689 
3690 	return ret;
3691 }
3692 
3693 /*
3694  * Try to free some space for qgroup.
3695  *
3696  * For qgroup, there are only 3 ways to free qgroup space:
3697  * - Flush nodatacow write
3698  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
3699  *   In theory, we should only flush nodatacow inodes, but it's not yet
3700  *   possible, so we need to flush the whole root.
3701  *
3702  * - Wait for ordered extents
3703  *   When ordered extents are finished, their reserved metadata is finally
3704  *   converted to per_trans status, which can be freed by later commit
3705  *   transaction.
3706  *
3707  * - Commit transaction
3708  *   This would free the meta_per_trans space.
3709  *   In theory this shouldn't provide much space, but any more qgroup space
3710  *   is needed.
3711  */
3712 static int try_flush_qgroup(struct btrfs_root *root)
3713 {
3714 	struct btrfs_trans_handle *trans;
3715 	int ret;
3716 
3717 	/* Can't hold an open transaction or we run the risk of deadlocking. */
3718 	ASSERT(current->journal_info == NULL);
3719 	if (WARN_ON(current->journal_info))
3720 		return 0;
3721 
3722 	/*
3723 	 * We don't want to run flush again and again, so if there is a running
3724 	 * one, we won't try to start a new flush, but exit directly.
3725 	 */
3726 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
3727 		wait_event(root->qgroup_flush_wait,
3728 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
3729 		return 0;
3730 	}
3731 
3732 	ret = btrfs_start_delalloc_snapshot(root, true);
3733 	if (ret < 0)
3734 		goto out;
3735 	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
3736 
3737 	trans = btrfs_join_transaction(root);
3738 	if (IS_ERR(trans)) {
3739 		ret = PTR_ERR(trans);
3740 		goto out;
3741 	}
3742 
3743 	ret = btrfs_commit_transaction(trans);
3744 out:
3745 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
3746 	wake_up(&root->qgroup_flush_wait);
3747 	return ret;
3748 }
3749 
3750 static int qgroup_reserve_data(struct btrfs_inode *inode,
3751 			struct extent_changeset **reserved_ret, u64 start,
3752 			u64 len)
3753 {
3754 	struct btrfs_root *root = inode->root;
3755 	struct extent_changeset *reserved;
3756 	bool new_reserved = false;
3757 	u64 orig_reserved;
3758 	u64 to_reserve;
3759 	int ret;
3760 
3761 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
3762 	    !is_fstree(root->root_key.objectid) || len == 0)
3763 		return 0;
3764 
3765 	/* @reserved parameter is mandatory for qgroup */
3766 	if (WARN_ON(!reserved_ret))
3767 		return -EINVAL;
3768 	if (!*reserved_ret) {
3769 		new_reserved = true;
3770 		*reserved_ret = extent_changeset_alloc();
3771 		if (!*reserved_ret)
3772 			return -ENOMEM;
3773 	}
3774 	reserved = *reserved_ret;
3775 	/* Record already reserved space */
3776 	orig_reserved = reserved->bytes_changed;
3777 	ret = set_record_extent_bits(&inode->io_tree, start,
3778 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3779 
3780 	/* Newly reserved space */
3781 	to_reserve = reserved->bytes_changed - orig_reserved;
3782 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
3783 					to_reserve, QGROUP_RESERVE);
3784 	if (ret < 0)
3785 		goto out;
3786 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
3787 	if (ret < 0)
3788 		goto cleanup;
3789 
3790 	return ret;
3791 
3792 cleanup:
3793 	qgroup_unreserve_range(inode, reserved, start, len);
3794 out:
3795 	if (new_reserved) {
3796 		extent_changeset_free(reserved);
3797 		*reserved_ret = NULL;
3798 	}
3799 	return ret;
3800 }
3801 
3802 /*
3803  * Reserve qgroup space for range [start, start + len).
3804  *
3805  * This function will either reserve space from related qgroups or do nothing
3806  * if the range is already reserved.
3807  *
3808  * Return 0 for successful reservation
3809  * Return <0 for error (including -EQUOT)
3810  *
3811  * NOTE: This function may sleep for memory allocation, dirty page flushing and
3812  *	 commit transaction. So caller should not hold any dirty page locked.
3813  */
3814 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
3815 			struct extent_changeset **reserved_ret, u64 start,
3816 			u64 len)
3817 {
3818 	int ret;
3819 
3820 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
3821 	if (ret <= 0 && ret != -EDQUOT)
3822 		return ret;
3823 
3824 	ret = try_flush_qgroup(inode->root);
3825 	if (ret < 0)
3826 		return ret;
3827 	return qgroup_reserve_data(inode, reserved_ret, start, len);
3828 }
3829 
3830 /* Free ranges specified by @reserved, normally in error path */
3831 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
3832 			struct extent_changeset *reserved, u64 start, u64 len)
3833 {
3834 	struct btrfs_root *root = inode->root;
3835 	struct ulist_node *unode;
3836 	struct ulist_iterator uiter;
3837 	struct extent_changeset changeset;
3838 	int freed = 0;
3839 	int ret;
3840 
3841 	extent_changeset_init(&changeset);
3842 	len = round_up(start + len, root->fs_info->sectorsize);
3843 	start = round_down(start, root->fs_info->sectorsize);
3844 
3845 	ULIST_ITER_INIT(&uiter);
3846 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3847 		u64 range_start = unode->val;
3848 		/* unode->aux is the inclusive end */
3849 		u64 range_len = unode->aux - range_start + 1;
3850 		u64 free_start;
3851 		u64 free_len;
3852 
3853 		extent_changeset_release(&changeset);
3854 
3855 		/* Only free range in range [start, start + len) */
3856 		if (range_start >= start + len ||
3857 		    range_start + range_len <= start)
3858 			continue;
3859 		free_start = max(range_start, start);
3860 		free_len = min(start + len, range_start + range_len) -
3861 			   free_start;
3862 		/*
3863 		 * TODO: To also modify reserved->ranges_reserved to reflect
3864 		 * the modification.
3865 		 *
3866 		 * However as long as we free qgroup reserved according to
3867 		 * EXTENT_QGROUP_RESERVED, we won't double free.
3868 		 * So not need to rush.
3869 		 */
3870 		ret = clear_record_extent_bits(&inode->io_tree, free_start,
3871 				free_start + free_len - 1,
3872 				EXTENT_QGROUP_RESERVED, &changeset);
3873 		if (ret < 0)
3874 			goto out;
3875 		freed += changeset.bytes_changed;
3876 	}
3877 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
3878 				  BTRFS_QGROUP_RSV_DATA);
3879 	ret = freed;
3880 out:
3881 	extent_changeset_release(&changeset);
3882 	return ret;
3883 }
3884 
3885 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
3886 			struct extent_changeset *reserved, u64 start, u64 len,
3887 			int free)
3888 {
3889 	struct extent_changeset changeset;
3890 	int trace_op = QGROUP_RELEASE;
3891 	int ret;
3892 
3893 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
3894 		return 0;
3895 
3896 	/* In release case, we shouldn't have @reserved */
3897 	WARN_ON(!free && reserved);
3898 	if (free && reserved)
3899 		return qgroup_free_reserved_data(inode, reserved, start, len);
3900 	extent_changeset_init(&changeset);
3901 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
3902 				       EXTENT_QGROUP_RESERVED, &changeset);
3903 	if (ret < 0)
3904 		goto out;
3905 
3906 	if (free)
3907 		trace_op = QGROUP_FREE;
3908 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
3909 					changeset.bytes_changed, trace_op);
3910 	if (free)
3911 		btrfs_qgroup_free_refroot(inode->root->fs_info,
3912 				inode->root->root_key.objectid,
3913 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
3914 	ret = changeset.bytes_changed;
3915 out:
3916 	extent_changeset_release(&changeset);
3917 	return ret;
3918 }
3919 
3920 /*
3921  * Free a reserved space range from io_tree and related qgroups
3922  *
3923  * Should be called when a range of pages get invalidated before reaching disk.
3924  * Or for error cleanup case.
3925  * if @reserved is given, only reserved range in [@start, @start + @len) will
3926  * be freed.
3927  *
3928  * For data written to disk, use btrfs_qgroup_release_data().
3929  *
3930  * NOTE: This function may sleep for memory allocation.
3931  */
3932 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
3933 			struct extent_changeset *reserved, u64 start, u64 len)
3934 {
3935 	return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
3936 }
3937 
3938 /*
3939  * Release a reserved space range from io_tree only.
3940  *
3941  * Should be called when a range of pages get written to disk and corresponding
3942  * FILE_EXTENT is inserted into corresponding root.
3943  *
3944  * Since new qgroup accounting framework will only update qgroup numbers at
3945  * commit_transaction() time, its reserved space shouldn't be freed from
3946  * related qgroups.
3947  *
3948  * But we should release the range from io_tree, to allow further write to be
3949  * COWed.
3950  *
3951  * NOTE: This function may sleep for memory allocation.
3952  */
3953 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
3954 {
3955 	return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
3956 }
3957 
3958 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3959 			      enum btrfs_qgroup_rsv_type type)
3960 {
3961 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3962 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3963 		return;
3964 	if (num_bytes == 0)
3965 		return;
3966 
3967 	spin_lock(&root->qgroup_meta_rsv_lock);
3968 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3969 		root->qgroup_meta_rsv_prealloc += num_bytes;
3970 	else
3971 		root->qgroup_meta_rsv_pertrans += num_bytes;
3972 	spin_unlock(&root->qgroup_meta_rsv_lock);
3973 }
3974 
3975 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3976 			     enum btrfs_qgroup_rsv_type type)
3977 {
3978 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3979 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
3980 		return 0;
3981 	if (num_bytes == 0)
3982 		return 0;
3983 
3984 	spin_lock(&root->qgroup_meta_rsv_lock);
3985 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3986 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3987 				  num_bytes);
3988 		root->qgroup_meta_rsv_prealloc -= num_bytes;
3989 	} else {
3990 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3991 				  num_bytes);
3992 		root->qgroup_meta_rsv_pertrans -= num_bytes;
3993 	}
3994 	spin_unlock(&root->qgroup_meta_rsv_lock);
3995 	return num_bytes;
3996 }
3997 
3998 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3999 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4000 {
4001 	struct btrfs_fs_info *fs_info = root->fs_info;
4002 	int ret;
4003 
4004 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
4005 	    !is_fstree(root->root_key.objectid) || num_bytes == 0)
4006 		return 0;
4007 
4008 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4009 	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4010 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4011 	if (ret < 0)
4012 		return ret;
4013 	/*
4014 	 * Record what we have reserved into root.
4015 	 *
4016 	 * To avoid quota disabled->enabled underflow.
4017 	 * In that case, we may try to free space we haven't reserved
4018 	 * (since quota was disabled), so record what we reserved into root.
4019 	 * And ensure later release won't underflow this number.
4020 	 */
4021 	add_root_meta_rsv(root, num_bytes, type);
4022 	return ret;
4023 }
4024 
4025 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4026 				enum btrfs_qgroup_rsv_type type, bool enforce,
4027 				bool noflush)
4028 {
4029 	int ret;
4030 
4031 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4032 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4033 		return ret;
4034 
4035 	ret = try_flush_qgroup(root);
4036 	if (ret < 0)
4037 		return ret;
4038 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4039 }
4040 
4041 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4042 {
4043 	struct btrfs_fs_info *fs_info = root->fs_info;
4044 
4045 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
4046 	    !is_fstree(root->root_key.objectid))
4047 		return;
4048 
4049 	/* TODO: Update trace point to handle such free */
4050 	trace_qgroup_meta_free_all_pertrans(root);
4051 	/* Special value -1 means to free all reserved space */
4052 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
4053 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4054 }
4055 
4056 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4057 			      enum btrfs_qgroup_rsv_type type)
4058 {
4059 	struct btrfs_fs_info *fs_info = root->fs_info;
4060 
4061 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
4062 	    !is_fstree(root->root_key.objectid))
4063 		return;
4064 
4065 	/*
4066 	 * reservation for META_PREALLOC can happen before quota is enabled,
4067 	 * which can lead to underflow.
4068 	 * Here ensure we will only free what we really have reserved.
4069 	 */
4070 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4071 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4072 	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4073 	btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
4074 				  num_bytes, type);
4075 }
4076 
4077 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4078 				int num_bytes)
4079 {
4080 	struct btrfs_qgroup *qgroup;
4081 	struct ulist_node *unode;
4082 	struct ulist_iterator uiter;
4083 	int ret = 0;
4084 
4085 	if (num_bytes == 0)
4086 		return;
4087 	if (!fs_info->quota_root)
4088 		return;
4089 
4090 	spin_lock(&fs_info->qgroup_lock);
4091 	qgroup = find_qgroup_rb(fs_info, ref_root);
4092 	if (!qgroup)
4093 		goto out;
4094 	ulist_reinit(fs_info->qgroup_ulist);
4095 	ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
4096 		       qgroup_to_aux(qgroup), GFP_ATOMIC);
4097 	if (ret < 0)
4098 		goto out;
4099 	ULIST_ITER_INIT(&uiter);
4100 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
4101 		struct btrfs_qgroup *qg;
4102 		struct btrfs_qgroup_list *glist;
4103 
4104 		qg = unode_aux_to_qgroup(unode);
4105 
4106 		qgroup_rsv_release(fs_info, qg, num_bytes,
4107 				BTRFS_QGROUP_RSV_META_PREALLOC);
4108 		qgroup_rsv_add(fs_info, qg, num_bytes,
4109 				BTRFS_QGROUP_RSV_META_PERTRANS);
4110 		list_for_each_entry(glist, &qg->groups, next_group) {
4111 			ret = ulist_add(fs_info->qgroup_ulist,
4112 					glist->group->qgroupid,
4113 					qgroup_to_aux(glist->group), GFP_ATOMIC);
4114 			if (ret < 0)
4115 				goto out;
4116 		}
4117 	}
4118 out:
4119 	spin_unlock(&fs_info->qgroup_lock);
4120 }
4121 
4122 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4123 {
4124 	struct btrfs_fs_info *fs_info = root->fs_info;
4125 
4126 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
4127 	    !is_fstree(root->root_key.objectid))
4128 		return;
4129 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4130 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4131 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4132 	trace_qgroup_meta_convert(root, num_bytes);
4133 	qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
4134 }
4135 
4136 /*
4137  * Check qgroup reserved space leaking, normally at destroy inode
4138  * time
4139  */
4140 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4141 {
4142 	struct extent_changeset changeset;
4143 	struct ulist_node *unode;
4144 	struct ulist_iterator iter;
4145 	int ret;
4146 
4147 	extent_changeset_init(&changeset);
4148 	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4149 			EXTENT_QGROUP_RESERVED, &changeset);
4150 
4151 	WARN_ON(ret < 0);
4152 	if (WARN_ON(changeset.bytes_changed)) {
4153 		ULIST_ITER_INIT(&iter);
4154 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4155 			btrfs_warn(inode->root->fs_info,
4156 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4157 				btrfs_ino(inode), unode->val, unode->aux);
4158 		}
4159 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4160 				inode->root->root_key.objectid,
4161 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4162 
4163 	}
4164 	extent_changeset_release(&changeset);
4165 }
4166 
4167 void btrfs_qgroup_init_swapped_blocks(
4168 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4169 {
4170 	int i;
4171 
4172 	spin_lock_init(&swapped_blocks->lock);
4173 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4174 		swapped_blocks->blocks[i] = RB_ROOT;
4175 	swapped_blocks->swapped = false;
4176 }
4177 
4178 /*
4179  * Delete all swapped blocks record of @root.
4180  * Every record here means we skipped a full subtree scan for qgroup.
4181  *
4182  * Gets called when committing one transaction.
4183  */
4184 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4185 {
4186 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4187 	int i;
4188 
4189 	swapped_blocks = &root->swapped_blocks;
4190 
4191 	spin_lock(&swapped_blocks->lock);
4192 	if (!swapped_blocks->swapped)
4193 		goto out;
4194 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4195 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4196 		struct btrfs_qgroup_swapped_block *entry;
4197 		struct btrfs_qgroup_swapped_block *next;
4198 
4199 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4200 						     node)
4201 			kfree(entry);
4202 		swapped_blocks->blocks[i] = RB_ROOT;
4203 	}
4204 	swapped_blocks->swapped = false;
4205 out:
4206 	spin_unlock(&swapped_blocks->lock);
4207 }
4208 
4209 /*
4210  * Add subtree roots record into @subvol_root.
4211  *
4212  * @subvol_root:	tree root of the subvolume tree get swapped
4213  * @bg:			block group under balance
4214  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4215  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4216  *			BOTH POINTERS ARE BEFORE TREE SWAP
4217  * @last_snapshot:	last snapshot generation of the subvolume tree
4218  */
4219 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4220 		struct btrfs_root *subvol_root,
4221 		struct btrfs_block_group *bg,
4222 		struct extent_buffer *subvol_parent, int subvol_slot,
4223 		struct extent_buffer *reloc_parent, int reloc_slot,
4224 		u64 last_snapshot)
4225 {
4226 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4227 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4228 	struct btrfs_qgroup_swapped_block *block;
4229 	struct rb_node **cur;
4230 	struct rb_node *parent = NULL;
4231 	int level = btrfs_header_level(subvol_parent) - 1;
4232 	int ret = 0;
4233 
4234 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4235 		return 0;
4236 
4237 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4238 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4239 		btrfs_err_rl(fs_info,
4240 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4241 			__func__,
4242 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4243 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4244 		return -EUCLEAN;
4245 	}
4246 
4247 	block = kmalloc(sizeof(*block), GFP_NOFS);
4248 	if (!block) {
4249 		ret = -ENOMEM;
4250 		goto out;
4251 	}
4252 
4253 	/*
4254 	 * @reloc_parent/slot is still before swap, while @block is going to
4255 	 * record the bytenr after swap, so we do the swap here.
4256 	 */
4257 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4258 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4259 							     reloc_slot);
4260 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4261 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4262 							    subvol_slot);
4263 	block->last_snapshot = last_snapshot;
4264 	block->level = level;
4265 
4266 	/*
4267 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4268 	 * no one else can modify tree blocks thus we qgroup will not change
4269 	 * no matter the value of trace_leaf.
4270 	 */
4271 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4272 		block->trace_leaf = true;
4273 	else
4274 		block->trace_leaf = false;
4275 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4276 
4277 	/* Insert @block into @blocks */
4278 	spin_lock(&blocks->lock);
4279 	cur = &blocks->blocks[level].rb_node;
4280 	while (*cur) {
4281 		struct btrfs_qgroup_swapped_block *entry;
4282 
4283 		parent = *cur;
4284 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4285 				 node);
4286 
4287 		if (entry->subvol_bytenr < block->subvol_bytenr) {
4288 			cur = &(*cur)->rb_left;
4289 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4290 			cur = &(*cur)->rb_right;
4291 		} else {
4292 			if (entry->subvol_generation !=
4293 					block->subvol_generation ||
4294 			    entry->reloc_bytenr != block->reloc_bytenr ||
4295 			    entry->reloc_generation !=
4296 					block->reloc_generation) {
4297 				/*
4298 				 * Duplicated but mismatch entry found.
4299 				 * Shouldn't happen.
4300 				 *
4301 				 * Marking qgroup inconsistent should be enough
4302 				 * for end users.
4303 				 */
4304 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4305 				ret = -EEXIST;
4306 			}
4307 			kfree(block);
4308 			goto out_unlock;
4309 		}
4310 	}
4311 	rb_link_node(&block->node, parent, cur);
4312 	rb_insert_color(&block->node, &blocks->blocks[level]);
4313 	blocks->swapped = true;
4314 out_unlock:
4315 	spin_unlock(&blocks->lock);
4316 out:
4317 	if (ret < 0)
4318 		qgroup_mark_inconsistent(fs_info);
4319 	return ret;
4320 }
4321 
4322 /*
4323  * Check if the tree block is a subtree root, and if so do the needed
4324  * delayed subtree trace for qgroup.
4325  *
4326  * This is called during btrfs_cow_block().
4327  */
4328 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4329 					 struct btrfs_root *root,
4330 					 struct extent_buffer *subvol_eb)
4331 {
4332 	struct btrfs_fs_info *fs_info = root->fs_info;
4333 	struct btrfs_tree_parent_check check = { 0 };
4334 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4335 	struct btrfs_qgroup_swapped_block *block;
4336 	struct extent_buffer *reloc_eb = NULL;
4337 	struct rb_node *node;
4338 	bool found = false;
4339 	bool swapped = false;
4340 	int level = btrfs_header_level(subvol_eb);
4341 	int ret = 0;
4342 	int i;
4343 
4344 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4345 		return 0;
4346 	if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
4347 		return 0;
4348 
4349 	spin_lock(&blocks->lock);
4350 	if (!blocks->swapped) {
4351 		spin_unlock(&blocks->lock);
4352 		return 0;
4353 	}
4354 	node = blocks->blocks[level].rb_node;
4355 
4356 	while (node) {
4357 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4358 		if (block->subvol_bytenr < subvol_eb->start) {
4359 			node = node->rb_left;
4360 		} else if (block->subvol_bytenr > subvol_eb->start) {
4361 			node = node->rb_right;
4362 		} else {
4363 			found = true;
4364 			break;
4365 		}
4366 	}
4367 	if (!found) {
4368 		spin_unlock(&blocks->lock);
4369 		goto out;
4370 	}
4371 	/* Found one, remove it from @blocks first and update blocks->swapped */
4372 	rb_erase(&block->node, &blocks->blocks[level]);
4373 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4374 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4375 			swapped = true;
4376 			break;
4377 		}
4378 	}
4379 	blocks->swapped = swapped;
4380 	spin_unlock(&blocks->lock);
4381 
4382 	check.level = block->level;
4383 	check.transid = block->reloc_generation;
4384 	check.has_first_key = true;
4385 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4386 
4387 	/* Read out reloc subtree root */
4388 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4389 	if (IS_ERR(reloc_eb)) {
4390 		ret = PTR_ERR(reloc_eb);
4391 		reloc_eb = NULL;
4392 		goto free_out;
4393 	}
4394 	if (!extent_buffer_uptodate(reloc_eb)) {
4395 		ret = -EIO;
4396 		goto free_out;
4397 	}
4398 
4399 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4400 			block->last_snapshot, block->trace_leaf);
4401 free_out:
4402 	kfree(block);
4403 	free_extent_buffer(reloc_eb);
4404 out:
4405 	if (ret < 0) {
4406 		btrfs_err_rl(fs_info,
4407 			     "failed to account subtree at bytenr %llu: %d",
4408 			     subvol_eb->start, ret);
4409 		qgroup_mark_inconsistent(fs_info);
4410 	}
4411 	return ret;
4412 }
4413 
4414 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4415 {
4416 	struct btrfs_qgroup_extent_record *entry;
4417 	struct btrfs_qgroup_extent_record *next;
4418 	struct rb_root *root;
4419 
4420 	root = &trans->delayed_refs.dirty_extent_root;
4421 	rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4422 		ulist_free(entry->old_roots);
4423 		kfree(entry);
4424 	}
4425 }
4426