xref: /linux/fs/btrfs/qgroup.c (revision 92514ef226f511f2ca1fb1b8752966097518edc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
163 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)164 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
165 					   u64 qgroupid)
166 {
167 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
168 	struct btrfs_qgroup *qgroup;
169 
170 	while (n) {
171 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
172 		if (qgroup->qgroupid < qgroupid)
173 			n = n->rb_left;
174 		else if (qgroup->qgroupid > qgroupid)
175 			n = n->rb_right;
176 		else
177 			return qgroup;
178 	}
179 	return NULL;
180 }
181 
182 /*
183  * Add qgroup to the filesystem's qgroup tree.
184  *
185  * Must be called with qgroup_lock held and @prealloc preallocated.
186  *
187  * The control on the lifespan of @prealloc would be transferred to this
188  * function, thus caller should no longer touch @prealloc.
189  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)190 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
191 					  struct btrfs_qgroup *prealloc,
192 					  u64 qgroupid)
193 {
194 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
195 	struct rb_node *parent = NULL;
196 	struct btrfs_qgroup *qgroup;
197 
198 	/* Caller must have pre-allocated @prealloc. */
199 	ASSERT(prealloc);
200 
201 	while (*p) {
202 		parent = *p;
203 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
204 
205 		if (qgroup->qgroupid < qgroupid) {
206 			p = &(*p)->rb_left;
207 		} else if (qgroup->qgroupid > qgroupid) {
208 			p = &(*p)->rb_right;
209 		} else {
210 			kfree(prealloc);
211 			return qgroup;
212 		}
213 	}
214 
215 	qgroup = prealloc;
216 	qgroup->qgroupid = qgroupid;
217 	INIT_LIST_HEAD(&qgroup->groups);
218 	INIT_LIST_HEAD(&qgroup->members);
219 	INIT_LIST_HEAD(&qgroup->dirty);
220 	INIT_LIST_HEAD(&qgroup->iterator);
221 	INIT_LIST_HEAD(&qgroup->nested_iterator);
222 
223 	rb_link_node(&qgroup->node, parent, p);
224 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
225 
226 	return qgroup;
227 }
228 
__del_qgroup_rb(struct btrfs_qgroup * qgroup)229 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
230 {
231 	struct btrfs_qgroup_list *list;
232 
233 	list_del(&qgroup->dirty);
234 	while (!list_empty(&qgroup->groups)) {
235 		list = list_first_entry(&qgroup->groups,
236 					struct btrfs_qgroup_list, next_group);
237 		list_del(&list->next_group);
238 		list_del(&list->next_member);
239 		kfree(list);
240 	}
241 
242 	while (!list_empty(&qgroup->members)) {
243 		list = list_first_entry(&qgroup->members,
244 					struct btrfs_qgroup_list, next_member);
245 		list_del(&list->next_group);
246 		list_del(&list->next_member);
247 		kfree(list);
248 	}
249 }
250 
251 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)252 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
253 {
254 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
255 
256 	if (!qgroup)
257 		return -ENOENT;
258 
259 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
260 	__del_qgroup_rb(qgroup);
261 	return 0;
262 }
263 
264 /*
265  * Add relation specified by two qgroups.
266  *
267  * Must be called with qgroup_lock held, the ownership of @prealloc is
268  * transferred to this function and caller should not touch it anymore.
269  *
270  * Return: 0        on success
271  *         -ENOENT  if one of the qgroups is NULL
272  *         <0       other errors
273  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)274 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
275 			     struct btrfs_qgroup *member,
276 			     struct btrfs_qgroup *parent)
277 {
278 	if (!member || !parent) {
279 		kfree(prealloc);
280 		return -ENOENT;
281 	}
282 
283 	prealloc->group = parent;
284 	prealloc->member = member;
285 	list_add_tail(&prealloc->next_group, &member->groups);
286 	list_add_tail(&prealloc->next_member, &parent->members);
287 
288 	return 0;
289 }
290 
291 /*
292  * Add relation specified by two qgroup ids.
293  *
294  * Must be called with qgroup_lock held.
295  *
296  * Return: 0        on success
297  *         -ENOENT  if one of the ids does not exist
298  *         <0       other errors
299  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)300 static int add_relation_rb(struct btrfs_fs_info *fs_info,
301 			   struct btrfs_qgroup_list *prealloc,
302 			   u64 memberid, u64 parentid)
303 {
304 	struct btrfs_qgroup *member;
305 	struct btrfs_qgroup *parent;
306 
307 	member = find_qgroup_rb(fs_info, memberid);
308 	parent = find_qgroup_rb(fs_info, parentid);
309 
310 	return __add_relation_rb(prealloc, member, parent);
311 }
312 
313 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)314 static int del_relation_rb(struct btrfs_fs_info *fs_info,
315 			   u64 memberid, u64 parentid)
316 {
317 	struct btrfs_qgroup *member;
318 	struct btrfs_qgroup *parent;
319 	struct btrfs_qgroup_list *list;
320 
321 	member = find_qgroup_rb(fs_info, memberid);
322 	parent = find_qgroup_rb(fs_info, parentid);
323 	if (!member || !parent)
324 		return -ENOENT;
325 
326 	list_for_each_entry(list, &member->groups, next_group) {
327 		if (list->group == parent) {
328 			list_del(&list->next_group);
329 			list_del(&list->next_member);
330 			kfree(list);
331 			return 0;
332 		}
333 	}
334 	return -ENOENT;
335 }
336 
337 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)338 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
339 			       u64 rfer, u64 excl)
340 {
341 	struct btrfs_qgroup *qgroup;
342 
343 	qgroup = find_qgroup_rb(fs_info, qgroupid);
344 	if (!qgroup)
345 		return -EINVAL;
346 	if (qgroup->rfer != rfer || qgroup->excl != excl)
347 		return -EINVAL;
348 	return 0;
349 }
350 #endif
351 
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info)352 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
353 {
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 }
360 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)361 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
362 				   struct extent_buffer *leaf, int slot,
363 				   struct btrfs_qgroup_status_item *ptr)
364 {
365 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
366 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
367 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
368 }
369 
370 /*
371  * The full config is read in one go, only called from open_ctree()
372  * It doesn't use any locking, as at this point we're still single-threaded
373  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)374 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
375 {
376 	struct btrfs_key key;
377 	struct btrfs_key found_key;
378 	struct btrfs_root *quota_root = fs_info->quota_root;
379 	struct btrfs_path *path = NULL;
380 	struct extent_buffer *l;
381 	int slot;
382 	int ret = 0;
383 	u64 flags = 0;
384 	u64 rescan_progress = 0;
385 
386 	if (!fs_info->quota_root)
387 		return 0;
388 
389 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
390 	if (!fs_info->qgroup_ulist) {
391 		ret = -ENOMEM;
392 		goto out;
393 	}
394 
395 	path = btrfs_alloc_path();
396 	if (!path) {
397 		ret = -ENOMEM;
398 		goto out;
399 	}
400 
401 	ret = btrfs_sysfs_add_qgroups(fs_info);
402 	if (ret < 0)
403 		goto out;
404 	/* default this to quota off, in case no status key is found */
405 	fs_info->qgroup_flags = 0;
406 
407 	/*
408 	 * pass 1: read status, all qgroup infos and limits
409 	 */
410 	key.objectid = 0;
411 	key.type = 0;
412 	key.offset = 0;
413 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
414 	if (ret)
415 		goto out;
416 
417 	while (1) {
418 		struct btrfs_qgroup *qgroup;
419 
420 		slot = path->slots[0];
421 		l = path->nodes[0];
422 		btrfs_item_key_to_cpu(l, &found_key, slot);
423 
424 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
425 			struct btrfs_qgroup_status_item *ptr;
426 
427 			ptr = btrfs_item_ptr(l, slot,
428 					     struct btrfs_qgroup_status_item);
429 
430 			if (btrfs_qgroup_status_version(l, ptr) !=
431 			    BTRFS_QGROUP_STATUS_VERSION) {
432 				btrfs_err(fs_info,
433 				 "old qgroup version, quota disabled");
434 				goto out;
435 			}
436 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
437 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
438 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
439 			} else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
440 				qgroup_mark_inconsistent(fs_info);
441 				btrfs_err(fs_info,
442 					"qgroup generation mismatch, marked as inconsistent");
443 			}
444 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
445 			goto next1;
446 		}
447 
448 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
449 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
450 			goto next1;
451 
452 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
453 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
454 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
455 			btrfs_err(fs_info, "inconsistent qgroup config");
456 			qgroup_mark_inconsistent(fs_info);
457 		}
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * reusing that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		ulist_free(fs_info->qgroup_ulist);
585 		fs_info->qgroup_ulist = NULL;
586 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
587 		btrfs_sysfs_del_qgroups(fs_info);
588 	}
589 
590 	return ret < 0 ? ret : 0;
591 }
592 
593 /*
594  * Called in close_ctree() when quota is still enabled.  This verifies we don't
595  * leak some reserved space.
596  *
597  * Return false if no reserved space is left.
598  * Return true if some reserved space is leaked.
599  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)600 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
601 {
602 	struct rb_node *node;
603 	bool ret = false;
604 
605 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
606 		return ret;
607 	/*
608 	 * Since we're unmounting, there is no race and no need to grab qgroup
609 	 * lock.  And here we don't go post-order to provide a more user
610 	 * friendly sorted result.
611 	 */
612 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
613 		struct btrfs_qgroup *qgroup;
614 		int i;
615 
616 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
617 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
618 			if (qgroup->rsv.values[i]) {
619 				ret = true;
620 				btrfs_warn(fs_info,
621 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
622 				   btrfs_qgroup_level(qgroup->qgroupid),
623 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
624 				   i, qgroup->rsv.values[i]);
625 			}
626 		}
627 	}
628 	return ret;
629 }
630 
631 /*
632  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
633  * first two are in single-threaded paths.And for the third one, we have set
634  * quota_root to be null with qgroup_lock held before, so it is safe to clean
635  * up the in-memory structures without qgroup_lock held.
636  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)637 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
638 {
639 	struct rb_node *n;
640 	struct btrfs_qgroup *qgroup;
641 
642 	while ((n = rb_first(&fs_info->qgroup_tree))) {
643 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
644 		rb_erase(n, &fs_info->qgroup_tree);
645 		__del_qgroup_rb(qgroup);
646 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
647 		kfree(qgroup);
648 	}
649 	/*
650 	 * We call btrfs_free_qgroup_config() when unmounting
651 	 * filesystem and disabling quota, so we set qgroup_ulist
652 	 * to be null here to avoid double free.
653 	 */
654 	ulist_free(fs_info->qgroup_ulist);
655 	fs_info->qgroup_ulist = NULL;
656 	btrfs_sysfs_del_qgroups(fs_info);
657 }
658 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)659 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
660 				    u64 dst)
661 {
662 	int ret;
663 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
664 	struct btrfs_path *path;
665 	struct btrfs_key key;
666 
667 	path = btrfs_alloc_path();
668 	if (!path)
669 		return -ENOMEM;
670 
671 	key.objectid = src;
672 	key.type = BTRFS_QGROUP_RELATION_KEY;
673 	key.offset = dst;
674 
675 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
676 	btrfs_free_path(path);
677 	return ret;
678 }
679 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)680 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
681 				    u64 dst)
682 {
683 	int ret;
684 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
685 	struct btrfs_path *path;
686 	struct btrfs_key key;
687 
688 	path = btrfs_alloc_path();
689 	if (!path)
690 		return -ENOMEM;
691 
692 	key.objectid = src;
693 	key.type = BTRFS_QGROUP_RELATION_KEY;
694 	key.offset = dst;
695 
696 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
697 	if (ret < 0)
698 		goto out;
699 
700 	if (ret > 0) {
701 		ret = -ENOENT;
702 		goto out;
703 	}
704 
705 	ret = btrfs_del_item(trans, quota_root, path);
706 out:
707 	btrfs_free_path(path);
708 	return ret;
709 }
710 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)711 static int add_qgroup_item(struct btrfs_trans_handle *trans,
712 			   struct btrfs_root *quota_root, u64 qgroupid)
713 {
714 	int ret;
715 	struct btrfs_path *path;
716 	struct btrfs_qgroup_info_item *qgroup_info;
717 	struct btrfs_qgroup_limit_item *qgroup_limit;
718 	struct extent_buffer *leaf;
719 	struct btrfs_key key;
720 
721 	if (btrfs_is_testing(quota_root->fs_info))
722 		return 0;
723 
724 	path = btrfs_alloc_path();
725 	if (!path)
726 		return -ENOMEM;
727 
728 	key.objectid = 0;
729 	key.type = BTRFS_QGROUP_INFO_KEY;
730 	key.offset = qgroupid;
731 
732 	/*
733 	 * Avoid a transaction abort by catching -EEXIST here. In that
734 	 * case, we proceed by re-initializing the existing structure
735 	 * on disk.
736 	 */
737 
738 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
739 				      sizeof(*qgroup_info));
740 	if (ret && ret != -EEXIST)
741 		goto out;
742 
743 	leaf = path->nodes[0];
744 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
745 				 struct btrfs_qgroup_info_item);
746 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
747 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
748 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
749 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
750 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
751 
752 	btrfs_release_path(path);
753 
754 	key.type = BTRFS_QGROUP_LIMIT_KEY;
755 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
756 				      sizeof(*qgroup_limit));
757 	if (ret && ret != -EEXIST)
758 		goto out;
759 
760 	leaf = path->nodes[0];
761 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
762 				  struct btrfs_qgroup_limit_item);
763 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
764 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
765 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
766 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
767 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
768 
769 	ret = 0;
770 out:
771 	btrfs_free_path(path);
772 	return ret;
773 }
774 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)775 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
776 {
777 	int ret;
778 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
779 	struct btrfs_path *path;
780 	struct btrfs_key key;
781 
782 	path = btrfs_alloc_path();
783 	if (!path)
784 		return -ENOMEM;
785 
786 	key.objectid = 0;
787 	key.type = BTRFS_QGROUP_INFO_KEY;
788 	key.offset = qgroupid;
789 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
790 	if (ret < 0)
791 		goto out;
792 
793 	if (ret > 0) {
794 		ret = -ENOENT;
795 		goto out;
796 	}
797 
798 	ret = btrfs_del_item(trans, quota_root, path);
799 	if (ret)
800 		goto out;
801 
802 	btrfs_release_path(path);
803 
804 	key.type = BTRFS_QGROUP_LIMIT_KEY;
805 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
806 	if (ret < 0)
807 		goto out;
808 
809 	if (ret > 0) {
810 		ret = -ENOENT;
811 		goto out;
812 	}
813 
814 	ret = btrfs_del_item(trans, quota_root, path);
815 
816 out:
817 	btrfs_free_path(path);
818 	return ret;
819 }
820 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)821 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
822 				    struct btrfs_qgroup *qgroup)
823 {
824 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
825 	struct btrfs_path *path;
826 	struct btrfs_key key;
827 	struct extent_buffer *l;
828 	struct btrfs_qgroup_limit_item *qgroup_limit;
829 	int ret;
830 	int slot;
831 
832 	key.objectid = 0;
833 	key.type = BTRFS_QGROUP_LIMIT_KEY;
834 	key.offset = qgroup->qgroupid;
835 
836 	path = btrfs_alloc_path();
837 	if (!path)
838 		return -ENOMEM;
839 
840 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
841 	if (ret > 0)
842 		ret = -ENOENT;
843 
844 	if (ret)
845 		goto out;
846 
847 	l = path->nodes[0];
848 	slot = path->slots[0];
849 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
850 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
851 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
852 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
853 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
854 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
855 out:
856 	btrfs_free_path(path);
857 	return ret;
858 }
859 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)860 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
861 				   struct btrfs_qgroup *qgroup)
862 {
863 	struct btrfs_fs_info *fs_info = trans->fs_info;
864 	struct btrfs_root *quota_root = fs_info->quota_root;
865 	struct btrfs_path *path;
866 	struct btrfs_key key;
867 	struct extent_buffer *l;
868 	struct btrfs_qgroup_info_item *qgroup_info;
869 	int ret;
870 	int slot;
871 
872 	if (btrfs_is_testing(fs_info))
873 		return 0;
874 
875 	key.objectid = 0;
876 	key.type = BTRFS_QGROUP_INFO_KEY;
877 	key.offset = qgroup->qgroupid;
878 
879 	path = btrfs_alloc_path();
880 	if (!path)
881 		return -ENOMEM;
882 
883 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
884 	if (ret > 0)
885 		ret = -ENOENT;
886 
887 	if (ret)
888 		goto out;
889 
890 	l = path->nodes[0];
891 	slot = path->slots[0];
892 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
893 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
894 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
895 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
896 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
897 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
898 out:
899 	btrfs_free_path(path);
900 	return ret;
901 }
902 
update_qgroup_status_item(struct btrfs_trans_handle * trans)903 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
904 {
905 	struct btrfs_fs_info *fs_info = trans->fs_info;
906 	struct btrfs_root *quota_root = fs_info->quota_root;
907 	struct btrfs_path *path;
908 	struct btrfs_key key;
909 	struct extent_buffer *l;
910 	struct btrfs_qgroup_status_item *ptr;
911 	int ret;
912 	int slot;
913 
914 	key.objectid = 0;
915 	key.type = BTRFS_QGROUP_STATUS_KEY;
916 	key.offset = 0;
917 
918 	path = btrfs_alloc_path();
919 	if (!path)
920 		return -ENOMEM;
921 
922 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
923 	if (ret > 0)
924 		ret = -ENOENT;
925 
926 	if (ret)
927 		goto out;
928 
929 	l = path->nodes[0];
930 	slot = path->slots[0];
931 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
932 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
933 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
934 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
935 	btrfs_set_qgroup_status_rescan(l, ptr,
936 				fs_info->qgroup_rescan_progress.objectid);
937 out:
938 	btrfs_free_path(path);
939 	return ret;
940 }
941 
942 /*
943  * called with qgroup_lock held
944  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)945 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
946 				  struct btrfs_root *root)
947 {
948 	struct btrfs_path *path;
949 	struct btrfs_key key;
950 	struct extent_buffer *leaf = NULL;
951 	int ret;
952 	int nr = 0;
953 
954 	path = btrfs_alloc_path();
955 	if (!path)
956 		return -ENOMEM;
957 
958 	key.objectid = 0;
959 	key.offset = 0;
960 	key.type = 0;
961 
962 	while (1) {
963 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
964 		if (ret < 0)
965 			goto out;
966 		leaf = path->nodes[0];
967 		nr = btrfs_header_nritems(leaf);
968 		if (!nr)
969 			break;
970 		/*
971 		 * delete the leaf one by one
972 		 * since the whole tree is going
973 		 * to be deleted.
974 		 */
975 		path->slots[0] = 0;
976 		ret = btrfs_del_items(trans, root, path, 0, nr);
977 		if (ret)
978 			goto out;
979 
980 		btrfs_release_path(path);
981 	}
982 	ret = 0;
983 out:
984 	btrfs_free_path(path);
985 	return ret;
986 }
987 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)988 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
989 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
990 {
991 	struct btrfs_root *quota_root;
992 	struct btrfs_root *tree_root = fs_info->tree_root;
993 	struct btrfs_path *path = NULL;
994 	struct btrfs_qgroup_status_item *ptr;
995 	struct extent_buffer *leaf;
996 	struct btrfs_key key;
997 	struct btrfs_key found_key;
998 	struct btrfs_qgroup *qgroup = NULL;
999 	struct btrfs_qgroup *prealloc = NULL;
1000 	struct btrfs_trans_handle *trans = NULL;
1001 	struct ulist *ulist = NULL;
1002 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1003 	int ret = 0;
1004 	int slot;
1005 
1006 	/*
1007 	 * We need to have subvol_sem write locked, to prevent races between
1008 	 * concurrent tasks trying to enable quotas, because we will unlock
1009 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1010 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1011 	 */
1012 	lockdep_assert_held_write(&fs_info->subvol_sem);
1013 
1014 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1015 		btrfs_err(fs_info,
1016 			  "qgroups are currently unsupported in extent tree v2");
1017 		return -EINVAL;
1018 	}
1019 
1020 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1021 	if (fs_info->quota_root)
1022 		goto out;
1023 
1024 	ulist = ulist_alloc(GFP_KERNEL);
1025 	if (!ulist) {
1026 		ret = -ENOMEM;
1027 		goto out;
1028 	}
1029 
1030 	ret = btrfs_sysfs_add_qgroups(fs_info);
1031 	if (ret < 0)
1032 		goto out;
1033 
1034 	/*
1035 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1036 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1037 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1038 	 * start a transaction.
1039 	 * After we started the transaction lock qgroup_ioctl_lock again and
1040 	 * check if someone else created the quota root in the meanwhile. If so,
1041 	 * just return success and release the transaction handle.
1042 	 *
1043 	 * Also we don't need to worry about someone else calling
1044 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1045 	 * that function returns 0 (success) when the sysfs entries already exist.
1046 	 */
1047 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1048 
1049 	/*
1050 	 * 1 for quota root item
1051 	 * 1 for BTRFS_QGROUP_STATUS item
1052 	 *
1053 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1054 	 * per subvolume. However those are not currently reserved since it
1055 	 * would be a lot of overkill.
1056 	 */
1057 	trans = btrfs_start_transaction(tree_root, 2);
1058 
1059 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1060 	if (IS_ERR(trans)) {
1061 		ret = PTR_ERR(trans);
1062 		trans = NULL;
1063 		goto out;
1064 	}
1065 
1066 	if (fs_info->quota_root)
1067 		goto out;
1068 
1069 	fs_info->qgroup_ulist = ulist;
1070 	ulist = NULL;
1071 
1072 	/*
1073 	 * initially create the quota tree
1074 	 */
1075 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1076 	if (IS_ERR(quota_root)) {
1077 		ret =  PTR_ERR(quota_root);
1078 		btrfs_abort_transaction(trans, ret);
1079 		goto out;
1080 	}
1081 
1082 	path = btrfs_alloc_path();
1083 	if (!path) {
1084 		ret = -ENOMEM;
1085 		btrfs_abort_transaction(trans, ret);
1086 		goto out_free_root;
1087 	}
1088 
1089 	key.objectid = 0;
1090 	key.type = BTRFS_QGROUP_STATUS_KEY;
1091 	key.offset = 0;
1092 
1093 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1094 				      sizeof(*ptr));
1095 	if (ret) {
1096 		btrfs_abort_transaction(trans, ret);
1097 		goto out_free_path;
1098 	}
1099 
1100 	leaf = path->nodes[0];
1101 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1102 				 struct btrfs_qgroup_status_item);
1103 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1104 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1105 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1106 	if (simple) {
1107 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1108 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1109 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1110 	} else {
1111 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1112 	}
1113 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1114 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1115 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1116 
1117 	key.objectid = 0;
1118 	key.type = BTRFS_ROOT_REF_KEY;
1119 	key.offset = 0;
1120 
1121 	btrfs_release_path(path);
1122 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1123 	if (ret > 0)
1124 		goto out_add_root;
1125 	if (ret < 0) {
1126 		btrfs_abort_transaction(trans, ret);
1127 		goto out_free_path;
1128 	}
1129 
1130 	while (1) {
1131 		slot = path->slots[0];
1132 		leaf = path->nodes[0];
1133 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1134 
1135 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1136 
1137 			/* Release locks on tree_root before we access quota_root */
1138 			btrfs_release_path(path);
1139 
1140 			/* We should not have a stray @prealloc pointer. */
1141 			ASSERT(prealloc == NULL);
1142 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1143 			if (!prealloc) {
1144 				ret = -ENOMEM;
1145 				btrfs_abort_transaction(trans, ret);
1146 				goto out_free_path;
1147 			}
1148 
1149 			ret = add_qgroup_item(trans, quota_root,
1150 					      found_key.offset);
1151 			if (ret) {
1152 				btrfs_abort_transaction(trans, ret);
1153 				goto out_free_path;
1154 			}
1155 
1156 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1157 			prealloc = NULL;
1158 			if (IS_ERR(qgroup)) {
1159 				ret = PTR_ERR(qgroup);
1160 				btrfs_abort_transaction(trans, ret);
1161 				goto out_free_path;
1162 			}
1163 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1164 			if (ret < 0) {
1165 				btrfs_abort_transaction(trans, ret);
1166 				goto out_free_path;
1167 			}
1168 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1169 							 path, 1, 0);
1170 			if (ret < 0) {
1171 				btrfs_abort_transaction(trans, ret);
1172 				goto out_free_path;
1173 			}
1174 			if (ret > 0) {
1175 				/*
1176 				 * Shouldn't happen, but in case it does we
1177 				 * don't need to do the btrfs_next_item, just
1178 				 * continue.
1179 				 */
1180 				continue;
1181 			}
1182 		}
1183 		ret = btrfs_next_item(tree_root, path);
1184 		if (ret < 0) {
1185 			btrfs_abort_transaction(trans, ret);
1186 			goto out_free_path;
1187 		}
1188 		if (ret)
1189 			break;
1190 	}
1191 
1192 out_add_root:
1193 	btrfs_release_path(path);
1194 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1195 	if (ret) {
1196 		btrfs_abort_transaction(trans, ret);
1197 		goto out_free_path;
1198 	}
1199 
1200 	ASSERT(prealloc == NULL);
1201 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1202 	if (!prealloc) {
1203 		ret = -ENOMEM;
1204 		goto out_free_path;
1205 	}
1206 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1207 	prealloc = NULL;
1208 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1209 	if (ret < 0) {
1210 		btrfs_abort_transaction(trans, ret);
1211 		goto out_free_path;
1212 	}
1213 
1214 	fs_info->qgroup_enable_gen = trans->transid;
1215 
1216 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1217 	/*
1218 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1219 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1220 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1221 	 * because all qgroup operations first start or join a transaction and then
1222 	 * lock the qgroup_ioctl_lock mutex.
1223 	 * We are safe from a concurrent task trying to enable quotas, by calling
1224 	 * this function, since we are serialized by fs_info->subvol_sem.
1225 	 */
1226 	ret = btrfs_commit_transaction(trans);
1227 	trans = NULL;
1228 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1229 	if (ret)
1230 		goto out_free_path;
1231 
1232 	/*
1233 	 * Set quota enabled flag after committing the transaction, to avoid
1234 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1235 	 * creation.
1236 	 */
1237 	spin_lock(&fs_info->qgroup_lock);
1238 	fs_info->quota_root = quota_root;
1239 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1240 	spin_unlock(&fs_info->qgroup_lock);
1241 
1242 	/* Skip rescan for simple qgroups. */
1243 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1244 		goto out_free_path;
1245 
1246 	ret = qgroup_rescan_init(fs_info, 0, 1);
1247 	if (!ret) {
1248 	        qgroup_rescan_zero_tracking(fs_info);
1249 		fs_info->qgroup_rescan_running = true;
1250 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1251 	                         &fs_info->qgroup_rescan_work);
1252 	} else {
1253 		/*
1254 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1255 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1256 		 * -EINPROGRESS. That can happen because someone started the
1257 		 * rescan worker by calling quota rescan ioctl before we
1258 		 * attempted to initialize the rescan worker. Failure due to
1259 		 * quotas disabled in the meanwhile is not possible, because
1260 		 * we are holding a write lock on fs_info->subvol_sem, which
1261 		 * is also acquired when disabling quotas.
1262 		 * Ignore such error, and any other error would need to undo
1263 		 * everything we did in the transaction we just committed.
1264 		 */
1265 		ASSERT(ret == -EINPROGRESS);
1266 		ret = 0;
1267 	}
1268 
1269 out_free_path:
1270 	btrfs_free_path(path);
1271 out_free_root:
1272 	if (ret)
1273 		btrfs_put_root(quota_root);
1274 out:
1275 	if (ret) {
1276 		ulist_free(fs_info->qgroup_ulist);
1277 		fs_info->qgroup_ulist = NULL;
1278 		btrfs_sysfs_del_qgroups(fs_info);
1279 	}
1280 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1281 	if (ret && trans)
1282 		btrfs_end_transaction(trans);
1283 	else if (trans)
1284 		ret = btrfs_end_transaction(trans);
1285 	ulist_free(ulist);
1286 	kfree(prealloc);
1287 	return ret;
1288 }
1289 
1290 /*
1291  * It is possible to have outstanding ordered extents which reserved bytes
1292  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1293  * commit to ensure that we don't leak such reservations, only to have them
1294  * come back if we re-enable.
1295  *
1296  * - enable simple quotas
1297  * - reserve space
1298  * - release it, store rsv_bytes in OE
1299  * - disable quotas
1300  * - enable simple quotas (qgroup rsv are all 0)
1301  * - OE finishes
1302  * - run delayed refs
1303  * - free rsv_bytes, resulting in miscounting or even underflow
1304  */
flush_reservations(struct btrfs_fs_info * fs_info)1305 static int flush_reservations(struct btrfs_fs_info *fs_info)
1306 {
1307 	int ret;
1308 
1309 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1310 	if (ret)
1311 		return ret;
1312 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1313 
1314 	return btrfs_commit_current_transaction(fs_info->tree_root);
1315 }
1316 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1317 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1318 {
1319 	struct btrfs_root *quota_root = NULL;
1320 	struct btrfs_trans_handle *trans = NULL;
1321 	int ret = 0;
1322 
1323 	/*
1324 	 * We need to have subvol_sem write locked to prevent races with
1325 	 * snapshot creation.
1326 	 */
1327 	lockdep_assert_held_write(&fs_info->subvol_sem);
1328 
1329 	/*
1330 	 * Relocation will mess with backrefs, so make sure we have the
1331 	 * cleaner_mutex held to protect us from relocate.
1332 	 */
1333 	lockdep_assert_held(&fs_info->cleaner_mutex);
1334 
1335 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1336 	if (!fs_info->quota_root)
1337 		goto out;
1338 
1339 	/*
1340 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1341 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1342 	 * to lock that mutex while holding a transaction handle and the rescan
1343 	 * worker needs to commit a transaction.
1344 	 */
1345 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1346 
1347 	/*
1348 	 * Request qgroup rescan worker to complete and wait for it. This wait
1349 	 * must be done before transaction start for quota disable since it may
1350 	 * deadlock with transaction by the qgroup rescan worker.
1351 	 */
1352 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1353 	btrfs_qgroup_wait_for_completion(fs_info, false);
1354 
1355 	/*
1356 	 * We have nothing held here and no trans handle, just return the error
1357 	 * if there is one.
1358 	 */
1359 	ret = flush_reservations(fs_info);
1360 	if (ret)
1361 		return ret;
1362 
1363 	/*
1364 	 * 1 For the root item
1365 	 *
1366 	 * We should also reserve enough items for the quota tree deletion in
1367 	 * btrfs_clean_quota_tree but this is not done.
1368 	 *
1369 	 * Also, we must always start a transaction without holding the mutex
1370 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1371 	 */
1372 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1373 
1374 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1375 	if (IS_ERR(trans)) {
1376 		ret = PTR_ERR(trans);
1377 		trans = NULL;
1378 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1379 		goto out;
1380 	}
1381 
1382 	if (!fs_info->quota_root)
1383 		goto out;
1384 
1385 	spin_lock(&fs_info->qgroup_lock);
1386 	quota_root = fs_info->quota_root;
1387 	fs_info->quota_root = NULL;
1388 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1389 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1390 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1391 	spin_unlock(&fs_info->qgroup_lock);
1392 
1393 	btrfs_free_qgroup_config(fs_info);
1394 
1395 	ret = btrfs_clean_quota_tree(trans, quota_root);
1396 	if (ret) {
1397 		btrfs_abort_transaction(trans, ret);
1398 		goto out;
1399 	}
1400 
1401 	ret = btrfs_del_root(trans, &quota_root->root_key);
1402 	if (ret) {
1403 		btrfs_abort_transaction(trans, ret);
1404 		goto out;
1405 	}
1406 
1407 	spin_lock(&fs_info->trans_lock);
1408 	list_del(&quota_root->dirty_list);
1409 	spin_unlock(&fs_info->trans_lock);
1410 
1411 	btrfs_tree_lock(quota_root->node);
1412 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1413 	btrfs_tree_unlock(quota_root->node);
1414 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1415 				    quota_root->node, 0, 1);
1416 
1417 	if (ret < 0)
1418 		btrfs_abort_transaction(trans, ret);
1419 
1420 out:
1421 	btrfs_put_root(quota_root);
1422 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1423 	if (ret && trans)
1424 		btrfs_end_transaction(trans);
1425 	else if (trans)
1426 		ret = btrfs_commit_transaction(trans);
1427 	return ret;
1428 }
1429 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1430 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1431 			 struct btrfs_qgroup *qgroup)
1432 {
1433 	if (list_empty(&qgroup->dirty))
1434 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1435 }
1436 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1437 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1438 {
1439 	if (!list_empty(&qgroup->iterator))
1440 		return;
1441 
1442 	list_add_tail(&qgroup->iterator, head);
1443 }
1444 
qgroup_iterator_clean(struct list_head * head)1445 static void qgroup_iterator_clean(struct list_head *head)
1446 {
1447 	while (!list_empty(head)) {
1448 		struct btrfs_qgroup *qgroup;
1449 
1450 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1451 		list_del_init(&qgroup->iterator);
1452 	}
1453 }
1454 
1455 /*
1456  * The easy accounting, we're updating qgroup relationship whose child qgroup
1457  * only has exclusive extents.
1458  *
1459  * In this case, all exclusive extents will also be exclusive for parent, so
1460  * excl/rfer just get added/removed.
1461  *
1462  * So is qgroup reservation space, which should also be added/removed to
1463  * parent.
1464  * Or when child tries to release reservation space, parent will underflow its
1465  * reservation (for relationship adding case).
1466  *
1467  * Caller should hold fs_info->qgroup_lock.
1468  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1469 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1470 				    struct btrfs_qgroup *src, int sign)
1471 {
1472 	struct btrfs_qgroup *qgroup;
1473 	struct btrfs_qgroup *cur;
1474 	LIST_HEAD(qgroup_list);
1475 	u64 num_bytes = src->excl;
1476 	int ret = 0;
1477 
1478 	qgroup = find_qgroup_rb(fs_info, ref_root);
1479 	if (!qgroup)
1480 		goto out;
1481 
1482 	qgroup_iterator_add(&qgroup_list, qgroup);
1483 	list_for_each_entry(cur, &qgroup_list, iterator) {
1484 		struct btrfs_qgroup_list *glist;
1485 
1486 		qgroup->rfer += sign * num_bytes;
1487 		qgroup->rfer_cmpr += sign * num_bytes;
1488 
1489 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1490 		qgroup->excl += sign * num_bytes;
1491 		qgroup->excl_cmpr += sign * num_bytes;
1492 
1493 		if (sign > 0)
1494 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1495 		else
1496 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1497 		qgroup_dirty(fs_info, qgroup);
1498 
1499 		/* Append parent qgroups to @qgroup_list. */
1500 		list_for_each_entry(glist, &qgroup->groups, next_group)
1501 			qgroup_iterator_add(&qgroup_list, glist->group);
1502 	}
1503 	ret = 0;
1504 out:
1505 	qgroup_iterator_clean(&qgroup_list);
1506 	return ret;
1507 }
1508 
1509 
1510 /*
1511  * Quick path for updating qgroup with only excl refs.
1512  *
1513  * In that case, just update all parent will be enough.
1514  * Or we needs to do a full rescan.
1515  * Caller should also hold fs_info->qgroup_lock.
1516  *
1517  * Return 0 for quick update, return >0 for need to full rescan
1518  * and mark INCONSISTENT flag.
1519  * Return < 0 for other error.
1520  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1521 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1522 				   u64 src, u64 dst, int sign)
1523 {
1524 	struct btrfs_qgroup *qgroup;
1525 	int ret = 1;
1526 
1527 	qgroup = find_qgroup_rb(fs_info, src);
1528 	if (!qgroup)
1529 		goto out;
1530 	if (qgroup->excl == qgroup->rfer) {
1531 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1532 		if (ret < 0)
1533 			goto out;
1534 		ret = 0;
1535 	}
1536 out:
1537 	if (ret)
1538 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1539 	return ret;
1540 }
1541 
1542 /*
1543  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1544  * callers and transferred here (either used or freed on error).
1545  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1546 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1547 			      struct btrfs_qgroup_list *prealloc)
1548 {
1549 	struct btrfs_fs_info *fs_info = trans->fs_info;
1550 	struct btrfs_qgroup *parent;
1551 	struct btrfs_qgroup *member;
1552 	struct btrfs_qgroup_list *list;
1553 	int ret = 0;
1554 
1555 	ASSERT(prealloc);
1556 
1557 	/* Check the level of src and dst first */
1558 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1559 		return -EINVAL;
1560 
1561 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1562 	if (!fs_info->quota_root) {
1563 		ret = -ENOTCONN;
1564 		goto out;
1565 	}
1566 	member = find_qgroup_rb(fs_info, src);
1567 	parent = find_qgroup_rb(fs_info, dst);
1568 	if (!member || !parent) {
1569 		ret = -EINVAL;
1570 		goto out;
1571 	}
1572 
1573 	/* check if such qgroup relation exist firstly */
1574 	list_for_each_entry(list, &member->groups, next_group) {
1575 		if (list->group == parent) {
1576 			ret = -EEXIST;
1577 			goto out;
1578 		}
1579 	}
1580 
1581 	ret = add_qgroup_relation_item(trans, src, dst);
1582 	if (ret)
1583 		goto out;
1584 
1585 	ret = add_qgroup_relation_item(trans, dst, src);
1586 	if (ret) {
1587 		del_qgroup_relation_item(trans, src, dst);
1588 		goto out;
1589 	}
1590 
1591 	spin_lock(&fs_info->qgroup_lock);
1592 	ret = __add_relation_rb(prealloc, member, parent);
1593 	prealloc = NULL;
1594 	if (ret < 0) {
1595 		spin_unlock(&fs_info->qgroup_lock);
1596 		goto out;
1597 	}
1598 	ret = quick_update_accounting(fs_info, src, dst, 1);
1599 	spin_unlock(&fs_info->qgroup_lock);
1600 out:
1601 	kfree(prealloc);
1602 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1603 	return ret;
1604 }
1605 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1606 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1607 				 u64 dst)
1608 {
1609 	struct btrfs_fs_info *fs_info = trans->fs_info;
1610 	struct btrfs_qgroup *parent;
1611 	struct btrfs_qgroup *member;
1612 	struct btrfs_qgroup_list *list;
1613 	bool found = false;
1614 	int ret = 0;
1615 	int ret2;
1616 
1617 	if (!fs_info->quota_root) {
1618 		ret = -ENOTCONN;
1619 		goto out;
1620 	}
1621 
1622 	member = find_qgroup_rb(fs_info, src);
1623 	parent = find_qgroup_rb(fs_info, dst);
1624 	/*
1625 	 * The parent/member pair doesn't exist, then try to delete the dead
1626 	 * relation items only.
1627 	 */
1628 	if (!member || !parent)
1629 		goto delete_item;
1630 
1631 	/* check if such qgroup relation exist firstly */
1632 	list_for_each_entry(list, &member->groups, next_group) {
1633 		if (list->group == parent) {
1634 			found = true;
1635 			break;
1636 		}
1637 	}
1638 
1639 delete_item:
1640 	ret = del_qgroup_relation_item(trans, src, dst);
1641 	if (ret < 0 && ret != -ENOENT)
1642 		goto out;
1643 	ret2 = del_qgroup_relation_item(trans, dst, src);
1644 	if (ret2 < 0 && ret2 != -ENOENT)
1645 		goto out;
1646 
1647 	/* At least one deletion succeeded, return 0 */
1648 	if (!ret || !ret2)
1649 		ret = 0;
1650 
1651 	if (found) {
1652 		spin_lock(&fs_info->qgroup_lock);
1653 		del_relation_rb(fs_info, src, dst);
1654 		ret = quick_update_accounting(fs_info, src, dst, -1);
1655 		spin_unlock(&fs_info->qgroup_lock);
1656 	}
1657 out:
1658 	return ret;
1659 }
1660 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1661 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1662 			      u64 dst)
1663 {
1664 	struct btrfs_fs_info *fs_info = trans->fs_info;
1665 	int ret = 0;
1666 
1667 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1668 	ret = __del_qgroup_relation(trans, src, dst);
1669 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1670 
1671 	return ret;
1672 }
1673 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1674 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1675 {
1676 	struct btrfs_fs_info *fs_info = trans->fs_info;
1677 	struct btrfs_root *quota_root;
1678 	struct btrfs_qgroup *qgroup;
1679 	struct btrfs_qgroup *prealloc = NULL;
1680 	int ret = 0;
1681 
1682 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
1683 		return 0;
1684 
1685 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1686 	if (!fs_info->quota_root) {
1687 		ret = -ENOTCONN;
1688 		goto out;
1689 	}
1690 	quota_root = fs_info->quota_root;
1691 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1692 	if (qgroup) {
1693 		ret = -EEXIST;
1694 		goto out;
1695 	}
1696 
1697 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1698 	if (!prealloc) {
1699 		ret = -ENOMEM;
1700 		goto out;
1701 	}
1702 
1703 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1704 	if (ret)
1705 		goto out;
1706 
1707 	spin_lock(&fs_info->qgroup_lock);
1708 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1709 	spin_unlock(&fs_info->qgroup_lock);
1710 	prealloc = NULL;
1711 
1712 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1713 out:
1714 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1715 	kfree(prealloc);
1716 	return ret;
1717 }
1718 
1719 /*
1720  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1721  * Return >0 if we can delete the qgroup.
1722  * Return <0 for other errors during tree search.
1723  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1724 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1725 {
1726 	struct btrfs_key key;
1727 	struct btrfs_path *path;
1728 	int ret;
1729 
1730 	/*
1731 	 * Squota would never be inconsistent, but there can still be case
1732 	 * where a dropped subvolume still has qgroup numbers, and squota
1733 	 * relies on such qgroup for future accounting.
1734 	 *
1735 	 * So for squota, do not allow dropping any non-zero qgroup.
1736 	 */
1737 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1738 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1739 		return 0;
1740 
1741 	/* For higher level qgroup, we can only delete it if it has no child. */
1742 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1743 		if (!list_empty(&qgroup->members))
1744 			return 0;
1745 		return 1;
1746 	}
1747 
1748 	/*
1749 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1750 	 * for it.
1751 	 * This means even a subvolume is unlinked but not yet fully dropped,
1752 	 * we can not delete the qgroup.
1753 	 */
1754 	key.objectid = qgroup->qgroupid;
1755 	key.type = BTRFS_ROOT_ITEM_KEY;
1756 	key.offset = -1ULL;
1757 	path = btrfs_alloc_path();
1758 	if (!path)
1759 		return -ENOMEM;
1760 
1761 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1762 	btrfs_free_path(path);
1763 	/*
1764 	 * The @ret from btrfs_find_root() exactly matches our definition for
1765 	 * the return value, thus can be returned directly.
1766 	 */
1767 	return ret;
1768 }
1769 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1770 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1771 {
1772 	struct btrfs_fs_info *fs_info = trans->fs_info;
1773 	struct btrfs_qgroup *qgroup;
1774 	struct btrfs_qgroup_list *list;
1775 	int ret = 0;
1776 
1777 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1778 	if (!fs_info->quota_root) {
1779 		ret = -ENOTCONN;
1780 		goto out;
1781 	}
1782 
1783 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1784 	if (!qgroup) {
1785 		ret = -ENOENT;
1786 		goto out;
1787 	}
1788 
1789 	ret = can_delete_qgroup(fs_info, qgroup);
1790 	if (ret < 0)
1791 		goto out;
1792 	if (ret == 0) {
1793 		ret = -EBUSY;
1794 		goto out;
1795 	}
1796 
1797 	/* Check if there are no children of this qgroup */
1798 	if (!list_empty(&qgroup->members)) {
1799 		ret = -EBUSY;
1800 		goto out;
1801 	}
1802 
1803 	ret = del_qgroup_item(trans, qgroupid);
1804 	if (ret && ret != -ENOENT)
1805 		goto out;
1806 
1807 	while (!list_empty(&qgroup->groups)) {
1808 		list = list_first_entry(&qgroup->groups,
1809 					struct btrfs_qgroup_list, next_group);
1810 		ret = __del_qgroup_relation(trans, qgroupid,
1811 					    list->group->qgroupid);
1812 		if (ret)
1813 			goto out;
1814 	}
1815 
1816 	spin_lock(&fs_info->qgroup_lock);
1817 	/*
1818 	 * Warn on reserved space. The subvolume should has no child nor
1819 	 * corresponding subvolume.
1820 	 * Thus its reserved space should all be zero, no matter if qgroup
1821 	 * is consistent or the mode.
1822 	 */
1823 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1824 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1825 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1826 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1827 		btrfs_warn_rl(fs_info,
1828 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1829 			      btrfs_qgroup_level(qgroup->qgroupid),
1830 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1831 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1832 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1833 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1834 
1835 	}
1836 	/*
1837 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1838 	 * consistent and if it's in regular qgroup mode.
1839 	 * For simple mode it's not as accurate thus we can hit non-zero values
1840 	 * very frequently.
1841 	 */
1842 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1843 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1844 		if (qgroup->rfer || qgroup->excl ||
1845 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1846 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1847 			btrfs_warn_rl(fs_info,
1848 "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1849 				      btrfs_qgroup_level(qgroup->qgroupid),
1850 				      btrfs_qgroup_subvolid(qgroup->qgroupid),
1851 				      qgroup->rfer, qgroup->rfer_cmpr,
1852 				      qgroup->excl, qgroup->excl_cmpr);
1853 			qgroup_mark_inconsistent(fs_info);
1854 		}
1855 	}
1856 	del_qgroup_rb(fs_info, qgroupid);
1857 	spin_unlock(&fs_info->qgroup_lock);
1858 
1859 	/*
1860 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1861 	 * spinlock, since the sysfs_remove_group() function needs to take
1862 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1863 	 */
1864 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1865 	kfree(qgroup);
1866 out:
1867 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1868 	return ret;
1869 }
1870 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1871 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1872 {
1873 	struct btrfs_trans_handle *trans;
1874 	int ret;
1875 
1876 	if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
1877 		return 0;
1878 
1879 	/*
1880 	 * Commit current transaction to make sure all the rfer/excl numbers
1881 	 * get updated.
1882 	 */
1883 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1884 	if (ret < 0)
1885 		return ret;
1886 
1887 	/* Start new trans to delete the qgroup info and limit items. */
1888 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1889 	if (IS_ERR(trans))
1890 		return PTR_ERR(trans);
1891 	ret = btrfs_remove_qgroup(trans, subvolid);
1892 	btrfs_end_transaction(trans);
1893 	/*
1894 	 * It's squota and the subvolume still has numbers needed for future
1895 	 * accounting, in this case we can not delete it.  Just skip it.
1896 	 *
1897 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1898 	 * safe to ignore them.
1899 	 */
1900 	if (ret == -EBUSY || ret == -ENOENT)
1901 		ret = 0;
1902 	return ret;
1903 }
1904 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1905 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1906 		       struct btrfs_qgroup_limit *limit)
1907 {
1908 	struct btrfs_fs_info *fs_info = trans->fs_info;
1909 	struct btrfs_qgroup *qgroup;
1910 	int ret = 0;
1911 	/* Sometimes we would want to clear the limit on this qgroup.
1912 	 * To meet this requirement, we treat the -1 as a special value
1913 	 * which tell kernel to clear the limit on this qgroup.
1914 	 */
1915 	const u64 CLEAR_VALUE = -1;
1916 
1917 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1918 	if (!fs_info->quota_root) {
1919 		ret = -ENOTCONN;
1920 		goto out;
1921 	}
1922 
1923 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1924 	if (!qgroup) {
1925 		ret = -ENOENT;
1926 		goto out;
1927 	}
1928 
1929 	spin_lock(&fs_info->qgroup_lock);
1930 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1931 		if (limit->max_rfer == CLEAR_VALUE) {
1932 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1933 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1934 			qgroup->max_rfer = 0;
1935 		} else {
1936 			qgroup->max_rfer = limit->max_rfer;
1937 		}
1938 	}
1939 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1940 		if (limit->max_excl == CLEAR_VALUE) {
1941 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1942 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1943 			qgroup->max_excl = 0;
1944 		} else {
1945 			qgroup->max_excl = limit->max_excl;
1946 		}
1947 	}
1948 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1949 		if (limit->rsv_rfer == CLEAR_VALUE) {
1950 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1951 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1952 			qgroup->rsv_rfer = 0;
1953 		} else {
1954 			qgroup->rsv_rfer = limit->rsv_rfer;
1955 		}
1956 	}
1957 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1958 		if (limit->rsv_excl == CLEAR_VALUE) {
1959 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1960 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1961 			qgroup->rsv_excl = 0;
1962 		} else {
1963 			qgroup->rsv_excl = limit->rsv_excl;
1964 		}
1965 	}
1966 	qgroup->lim_flags |= limit->flags;
1967 
1968 	spin_unlock(&fs_info->qgroup_lock);
1969 
1970 	ret = update_qgroup_limit_item(trans, qgroup);
1971 	if (ret) {
1972 		qgroup_mark_inconsistent(fs_info);
1973 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1974 		       qgroupid);
1975 	}
1976 
1977 out:
1978 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1979 	return ret;
1980 }
1981 
1982 /*
1983  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1984  * So qgroup can account it at transaction committing time.
1985  *
1986  * No lock version, caller must acquire delayed ref lock and allocated memory,
1987  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1988  *
1989  * Return 0 for success insert
1990  * Return >0 for existing record, caller can free @record safely.
1991  * Return <0 for insertion failure, caller can free @record safely.
1992  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record,u64 bytenr)1993 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1994 				     struct btrfs_delayed_ref_root *delayed_refs,
1995 				     struct btrfs_qgroup_extent_record *record,
1996 				     u64 bytenr)
1997 {
1998 	struct btrfs_qgroup_extent_record *existing, *ret;
1999 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2000 
2001 	if (!btrfs_qgroup_full_accounting(fs_info))
2002 		return 1;
2003 
2004 #if BITS_PER_LONG == 32
2005 	if (bytenr >= MAX_LFS_FILESIZE) {
2006 		btrfs_err_rl(fs_info,
2007 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
2008 			     bytenr);
2009 		btrfs_err_32bit_limit(fs_info);
2010 		return -EOVERFLOW;
2011 	}
2012 #endif
2013 
2014 	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
2015 
2016 	xa_lock(&delayed_refs->dirty_extents);
2017 	existing = xa_load(&delayed_refs->dirty_extents, index);
2018 	if (existing) {
2019 		if (record->data_rsv && !existing->data_rsv) {
2020 			existing->data_rsv = record->data_rsv;
2021 			existing->data_rsv_refroot = record->data_rsv_refroot;
2022 		}
2023 		xa_unlock(&delayed_refs->dirty_extents);
2024 		return 1;
2025 	}
2026 
2027 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2028 	xa_unlock(&delayed_refs->dirty_extents);
2029 	if (xa_is_err(ret)) {
2030 		qgroup_mark_inconsistent(fs_info);
2031 		return xa_err(ret);
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 /*
2038  * Post handler after qgroup_trace_extent_nolock().
2039  *
2040  * NOTE: Current qgroup does the expensive backref walk at transaction
2041  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2042  * new transaction.
2043  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2044  * result.
2045  *
2046  * However for old_roots there is no need to do backref walk at that time,
2047  * since we search commit roots to walk backref and result will always be
2048  * correct.
2049  *
2050  * Due to the nature of no lock version, we can't do backref there.
2051  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2052  * spinlock context.
2053  *
2054  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2055  * using current root, then we can move all expensive backref walk out of
2056  * transaction committing, but not now as qgroup accounting will be wrong again.
2057  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr)2058 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2059 				   struct btrfs_qgroup_extent_record *qrecord,
2060 				   u64 bytenr)
2061 {
2062 	struct btrfs_fs_info *fs_info = trans->fs_info;
2063 	struct btrfs_backref_walk_ctx ctx = {
2064 		.bytenr = bytenr,
2065 		.fs_info = fs_info,
2066 	};
2067 	int ret;
2068 
2069 	if (!btrfs_qgroup_full_accounting(fs_info))
2070 		return 0;
2071 	/*
2072 	 * We are always called in a context where we are already holding a
2073 	 * transaction handle. Often we are called when adding a data delayed
2074 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2075 	 * in which case we will be holding a write lock on extent buffer from a
2076 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2077 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2078 	 * that must be acquired before locking any extent buffers.
2079 	 *
2080 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2081 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2082 	 * it would not use commit roots and would lock extent buffers, causing
2083 	 * a deadlock if it ends up trying to read lock the same extent buffer
2084 	 * that was previously write locked at btrfs_truncate_inode_items().
2085 	 *
2086 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2087 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2088 	 * holding a transaction handle we don't need its protection.
2089 	 */
2090 	ASSERT(trans != NULL);
2091 
2092 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2093 		return 0;
2094 
2095 	ret = btrfs_find_all_roots(&ctx, true);
2096 	if (ret < 0) {
2097 		qgroup_mark_inconsistent(fs_info);
2098 		btrfs_warn(fs_info,
2099 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
2100 			ret);
2101 		return 0;
2102 	}
2103 
2104 	/*
2105 	 * Here we don't need to get the lock of
2106 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2107 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2108 	 *
2109 	 * So modifying qrecord->old_roots is safe here
2110 	 */
2111 	qrecord->old_roots = ctx.roots;
2112 	return 0;
2113 }
2114 
2115 /*
2116  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2117  * @num_bytes.
2118  * So qgroup can account it at commit trans time.
2119  *
2120  * Better encapsulated version, with memory allocation and backref walk for
2121  * commit roots.
2122  * So this can sleep.
2123  *
2124  * Return 0 if the operation is done.
2125  * Return <0 for error, like memory allocation failure or invalid parameter
2126  * (NULL trans)
2127  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2128 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2129 			      u64 num_bytes)
2130 {
2131 	struct btrfs_fs_info *fs_info = trans->fs_info;
2132 	struct btrfs_qgroup_extent_record *record;
2133 	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2134 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2135 	int ret;
2136 
2137 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2138 		return 0;
2139 	record = kzalloc(sizeof(*record), GFP_NOFS);
2140 	if (!record)
2141 		return -ENOMEM;
2142 
2143 	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2144 		kfree(record);
2145 		return -ENOMEM;
2146 	}
2147 
2148 	record->num_bytes = num_bytes;
2149 
2150 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2151 	if (ret) {
2152 		/* Clean up if insertion fails or item exists. */
2153 		xa_release(&delayed_refs->dirty_extents, index);
2154 		kfree(record);
2155 		return 0;
2156 	}
2157 	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2158 }
2159 
2160 /*
2161  * Inform qgroup to trace all leaf items of data
2162  *
2163  * Return 0 for success
2164  * Return <0 for error(ENOMEM)
2165  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2166 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2167 				  struct extent_buffer *eb)
2168 {
2169 	struct btrfs_fs_info *fs_info = trans->fs_info;
2170 	int nr = btrfs_header_nritems(eb);
2171 	int i, extent_type, ret;
2172 	struct btrfs_key key;
2173 	struct btrfs_file_extent_item *fi;
2174 	u64 bytenr, num_bytes;
2175 
2176 	/* We can be called directly from walk_up_proc() */
2177 	if (!btrfs_qgroup_full_accounting(fs_info))
2178 		return 0;
2179 
2180 	for (i = 0; i < nr; i++) {
2181 		btrfs_item_key_to_cpu(eb, &key, i);
2182 
2183 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2184 			continue;
2185 
2186 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2187 		/* filter out non qgroup-accountable extents  */
2188 		extent_type = btrfs_file_extent_type(eb, fi);
2189 
2190 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2191 			continue;
2192 
2193 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2194 		if (!bytenr)
2195 			continue;
2196 
2197 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2198 
2199 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2200 		if (ret)
2201 			return ret;
2202 	}
2203 	cond_resched();
2204 	return 0;
2205 }
2206 
2207 /*
2208  * Walk up the tree from the bottom, freeing leaves and any interior
2209  * nodes which have had all slots visited. If a node (leaf or
2210  * interior) is freed, the node above it will have it's slot
2211  * incremented. The root node will never be freed.
2212  *
2213  * At the end of this function, we should have a path which has all
2214  * slots incremented to the next position for a search. If we need to
2215  * read a new node it will be NULL and the node above it will have the
2216  * correct slot selected for a later read.
2217  *
2218  * If we increment the root nodes slot counter past the number of
2219  * elements, 1 is returned to signal completion of the search.
2220  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2221 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2222 {
2223 	int level = 0;
2224 	int nr, slot;
2225 	struct extent_buffer *eb;
2226 
2227 	if (root_level == 0)
2228 		return 1;
2229 
2230 	while (level <= root_level) {
2231 		eb = path->nodes[level];
2232 		nr = btrfs_header_nritems(eb);
2233 		path->slots[level]++;
2234 		slot = path->slots[level];
2235 		if (slot >= nr || level == 0) {
2236 			/*
2237 			 * Don't free the root -  we will detect this
2238 			 * condition after our loop and return a
2239 			 * positive value for caller to stop walking the tree.
2240 			 */
2241 			if (level != root_level) {
2242 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2243 				path->locks[level] = 0;
2244 
2245 				free_extent_buffer(eb);
2246 				path->nodes[level] = NULL;
2247 				path->slots[level] = 0;
2248 			}
2249 		} else {
2250 			/*
2251 			 * We have a valid slot to walk back down
2252 			 * from. Stop here so caller can process these
2253 			 * new nodes.
2254 			 */
2255 			break;
2256 		}
2257 
2258 		level++;
2259 	}
2260 
2261 	eb = path->nodes[root_level];
2262 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2263 		return 1;
2264 
2265 	return 0;
2266 }
2267 
2268 /*
2269  * Helper function to trace a subtree tree block swap.
2270  *
2271  * The swap will happen in highest tree block, but there may be a lot of
2272  * tree blocks involved.
2273  *
2274  * For example:
2275  *  OO = Old tree blocks
2276  *  NN = New tree blocks allocated during balance
2277  *
2278  *           File tree (257)                  Reloc tree for 257
2279  * L2              OO                                NN
2280  *               /    \                            /    \
2281  * L1          OO      OO (a)                    OO      NN (a)
2282  *            / \     / \                       / \     / \
2283  * L0       OO   OO OO   OO                   OO   OO NN   NN
2284  *                  (b)  (c)                          (b)  (c)
2285  *
2286  * When calling qgroup_trace_extent_swap(), we will pass:
2287  * @src_eb = OO(a)
2288  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2289  * @dst_level = 0
2290  * @root_level = 1
2291  *
2292  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2293  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2294  *
2295  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2296  *
2297  * 1) Tree search from @src_eb
2298  *    It should acts as a simplified btrfs_search_slot().
2299  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2300  *    (first key).
2301  *
2302  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2303  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2304  *    They should be marked during previous (@dst_level = 1) iteration.
2305  *
2306  * 3) Mark file extents in leaves dirty
2307  *    We don't have good way to pick out new file extents only.
2308  *    So we still follow the old method by scanning all file extents in
2309  *    the leave.
2310  *
2311  * This function can free us from keeping two paths, thus later we only need
2312  * to care about how to iterate all new tree blocks in reloc tree.
2313  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2314 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2315 				    struct extent_buffer *src_eb,
2316 				    struct btrfs_path *dst_path,
2317 				    int dst_level, int root_level,
2318 				    bool trace_leaf)
2319 {
2320 	struct btrfs_key key;
2321 	struct btrfs_path *src_path;
2322 	struct btrfs_fs_info *fs_info = trans->fs_info;
2323 	u32 nodesize = fs_info->nodesize;
2324 	int cur_level = root_level;
2325 	int ret;
2326 
2327 	BUG_ON(dst_level > root_level);
2328 	/* Level mismatch */
2329 	if (btrfs_header_level(src_eb) != root_level)
2330 		return -EINVAL;
2331 
2332 	src_path = btrfs_alloc_path();
2333 	if (!src_path) {
2334 		ret = -ENOMEM;
2335 		goto out;
2336 	}
2337 
2338 	if (dst_level)
2339 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2340 	else
2341 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2342 
2343 	/* For src_path */
2344 	atomic_inc(&src_eb->refs);
2345 	src_path->nodes[root_level] = src_eb;
2346 	src_path->slots[root_level] = dst_path->slots[root_level];
2347 	src_path->locks[root_level] = 0;
2348 
2349 	/* A simplified version of btrfs_search_slot() */
2350 	while (cur_level >= dst_level) {
2351 		struct btrfs_key src_key;
2352 		struct btrfs_key dst_key;
2353 
2354 		if (src_path->nodes[cur_level] == NULL) {
2355 			struct extent_buffer *eb;
2356 			int parent_slot;
2357 
2358 			eb = src_path->nodes[cur_level + 1];
2359 			parent_slot = src_path->slots[cur_level + 1];
2360 
2361 			eb = btrfs_read_node_slot(eb, parent_slot);
2362 			if (IS_ERR(eb)) {
2363 				ret = PTR_ERR(eb);
2364 				goto out;
2365 			}
2366 
2367 			src_path->nodes[cur_level] = eb;
2368 
2369 			btrfs_tree_read_lock(eb);
2370 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2371 		}
2372 
2373 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2374 		if (cur_level) {
2375 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2376 					&dst_key, dst_path->slots[cur_level]);
2377 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2378 					&src_key, src_path->slots[cur_level]);
2379 		} else {
2380 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2381 					&dst_key, dst_path->slots[cur_level]);
2382 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2383 					&src_key, src_path->slots[cur_level]);
2384 		}
2385 		/* Content mismatch, something went wrong */
2386 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2387 			ret = -ENOENT;
2388 			goto out;
2389 		}
2390 		cur_level--;
2391 	}
2392 
2393 	/*
2394 	 * Now both @dst_path and @src_path have been populated, record the tree
2395 	 * blocks for qgroup accounting.
2396 	 */
2397 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2398 					nodesize);
2399 	if (ret < 0)
2400 		goto out;
2401 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2402 					nodesize);
2403 	if (ret < 0)
2404 		goto out;
2405 
2406 	/* Record leaf file extents */
2407 	if (dst_level == 0 && trace_leaf) {
2408 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2409 		if (ret < 0)
2410 			goto out;
2411 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2412 	}
2413 out:
2414 	btrfs_free_path(src_path);
2415 	return ret;
2416 }
2417 
2418 /*
2419  * Helper function to do recursive generation-aware depth-first search, to
2420  * locate all new tree blocks in a subtree of reloc tree.
2421  *
2422  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2423  *         reloc tree
2424  * L2         NN (a)
2425  *          /    \
2426  * L1    OO        NN (b)
2427  *      /  \      /  \
2428  * L0  OO  OO    OO  NN
2429  *               (c) (d)
2430  * If we pass:
2431  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2432  * @cur_level = 1
2433  * @root_level = 1
2434  *
2435  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2436  * above tree blocks along with their counter parts in file tree.
2437  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2438  * won't affect OO(c).
2439  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2440 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2441 					   struct extent_buffer *src_eb,
2442 					   struct btrfs_path *dst_path,
2443 					   int cur_level, int root_level,
2444 					   u64 last_snapshot, bool trace_leaf)
2445 {
2446 	struct btrfs_fs_info *fs_info = trans->fs_info;
2447 	struct extent_buffer *eb;
2448 	bool need_cleanup = false;
2449 	int ret = 0;
2450 	int i;
2451 
2452 	/* Level sanity check */
2453 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2454 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2455 	    root_level < cur_level) {
2456 		btrfs_err_rl(fs_info,
2457 			"%s: bad levels, cur_level=%d root_level=%d",
2458 			__func__, cur_level, root_level);
2459 		return -EUCLEAN;
2460 	}
2461 
2462 	/* Read the tree block if needed */
2463 	if (dst_path->nodes[cur_level] == NULL) {
2464 		int parent_slot;
2465 		u64 child_gen;
2466 
2467 		/*
2468 		 * dst_path->nodes[root_level] must be initialized before
2469 		 * calling this function.
2470 		 */
2471 		if (cur_level == root_level) {
2472 			btrfs_err_rl(fs_info,
2473 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2474 				__func__, root_level, root_level, cur_level);
2475 			return -EUCLEAN;
2476 		}
2477 
2478 		/*
2479 		 * We need to get child blockptr/gen from parent before we can
2480 		 * read it.
2481 		  */
2482 		eb = dst_path->nodes[cur_level + 1];
2483 		parent_slot = dst_path->slots[cur_level + 1];
2484 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2485 
2486 		/* This node is old, no need to trace */
2487 		if (child_gen < last_snapshot)
2488 			goto out;
2489 
2490 		eb = btrfs_read_node_slot(eb, parent_slot);
2491 		if (IS_ERR(eb)) {
2492 			ret = PTR_ERR(eb);
2493 			goto out;
2494 		}
2495 
2496 		dst_path->nodes[cur_level] = eb;
2497 		dst_path->slots[cur_level] = 0;
2498 
2499 		btrfs_tree_read_lock(eb);
2500 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2501 		need_cleanup = true;
2502 	}
2503 
2504 	/* Now record this tree block and its counter part for qgroups */
2505 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2506 				       root_level, trace_leaf);
2507 	if (ret < 0)
2508 		goto cleanup;
2509 
2510 	eb = dst_path->nodes[cur_level];
2511 
2512 	if (cur_level > 0) {
2513 		/* Iterate all child tree blocks */
2514 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2515 			/* Skip old tree blocks as they won't be swapped */
2516 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2517 				continue;
2518 			dst_path->slots[cur_level] = i;
2519 
2520 			/* Recursive call (at most 7 times) */
2521 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2522 					dst_path, cur_level - 1, root_level,
2523 					last_snapshot, trace_leaf);
2524 			if (ret < 0)
2525 				goto cleanup;
2526 		}
2527 	}
2528 
2529 cleanup:
2530 	if (need_cleanup) {
2531 		/* Clean up */
2532 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2533 				     dst_path->locks[cur_level]);
2534 		free_extent_buffer(dst_path->nodes[cur_level]);
2535 		dst_path->nodes[cur_level] = NULL;
2536 		dst_path->slots[cur_level] = 0;
2537 		dst_path->locks[cur_level] = 0;
2538 	}
2539 out:
2540 	return ret;
2541 }
2542 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2543 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2544 				struct extent_buffer *src_eb,
2545 				struct extent_buffer *dst_eb,
2546 				u64 last_snapshot, bool trace_leaf)
2547 {
2548 	struct btrfs_fs_info *fs_info = trans->fs_info;
2549 	struct btrfs_path *dst_path = NULL;
2550 	int level;
2551 	int ret;
2552 
2553 	if (!btrfs_qgroup_full_accounting(fs_info))
2554 		return 0;
2555 
2556 	/* Wrong parameter order */
2557 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2558 		btrfs_err_rl(fs_info,
2559 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2560 			     btrfs_header_generation(src_eb),
2561 			     btrfs_header_generation(dst_eb));
2562 		return -EUCLEAN;
2563 	}
2564 
2565 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2566 		ret = -EIO;
2567 		goto out;
2568 	}
2569 
2570 	level = btrfs_header_level(dst_eb);
2571 	dst_path = btrfs_alloc_path();
2572 	if (!dst_path) {
2573 		ret = -ENOMEM;
2574 		goto out;
2575 	}
2576 	/* For dst_path */
2577 	atomic_inc(&dst_eb->refs);
2578 	dst_path->nodes[level] = dst_eb;
2579 	dst_path->slots[level] = 0;
2580 	dst_path->locks[level] = 0;
2581 
2582 	/* Do the generation aware breadth-first search */
2583 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2584 					      level, last_snapshot, trace_leaf);
2585 	if (ret < 0)
2586 		goto out;
2587 	ret = 0;
2588 
2589 out:
2590 	btrfs_free_path(dst_path);
2591 	if (ret < 0)
2592 		qgroup_mark_inconsistent(fs_info);
2593 	return ret;
2594 }
2595 
2596 /*
2597  * Inform qgroup to trace a whole subtree, including all its child tree
2598  * blocks and data.
2599  * The root tree block is specified by @root_eb.
2600  *
2601  * Normally used by relocation(tree block swap) and subvolume deletion.
2602  *
2603  * Return 0 for success
2604  * Return <0 for error(ENOMEM or tree search error)
2605  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2606 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2607 			       struct extent_buffer *root_eb,
2608 			       u64 root_gen, int root_level)
2609 {
2610 	struct btrfs_fs_info *fs_info = trans->fs_info;
2611 	int ret = 0;
2612 	int level;
2613 	u8 drop_subptree_thres;
2614 	struct extent_buffer *eb = root_eb;
2615 	struct btrfs_path *path = NULL;
2616 
2617 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2618 	ASSERT(root_eb != NULL);
2619 
2620 	if (!btrfs_qgroup_full_accounting(fs_info))
2621 		return 0;
2622 
2623 	spin_lock(&fs_info->qgroup_lock);
2624 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2625 	spin_unlock(&fs_info->qgroup_lock);
2626 
2627 	/*
2628 	 * This function only gets called for snapshot drop, if we hit a high
2629 	 * node here, it means we are going to change ownership for quite a lot
2630 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2631 	 *
2632 	 * So here if we find a high tree here, we just skip the accounting and
2633 	 * mark qgroup inconsistent.
2634 	 */
2635 	if (root_level >= drop_subptree_thres) {
2636 		qgroup_mark_inconsistent(fs_info);
2637 		return 0;
2638 	}
2639 
2640 	if (!extent_buffer_uptodate(root_eb)) {
2641 		struct btrfs_tree_parent_check check = {
2642 			.transid = root_gen,
2643 			.level = root_level
2644 		};
2645 
2646 		ret = btrfs_read_extent_buffer(root_eb, &check);
2647 		if (ret)
2648 			goto out;
2649 	}
2650 
2651 	if (root_level == 0) {
2652 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2653 		goto out;
2654 	}
2655 
2656 	path = btrfs_alloc_path();
2657 	if (!path)
2658 		return -ENOMEM;
2659 
2660 	/*
2661 	 * Walk down the tree.  Missing extent blocks are filled in as
2662 	 * we go. Metadata is accounted every time we read a new
2663 	 * extent block.
2664 	 *
2665 	 * When we reach a leaf, we account for file extent items in it,
2666 	 * walk back up the tree (adjusting slot pointers as we go)
2667 	 * and restart the search process.
2668 	 */
2669 	atomic_inc(&root_eb->refs);	/* For path */
2670 	path->nodes[root_level] = root_eb;
2671 	path->slots[root_level] = 0;
2672 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2673 walk_down:
2674 	level = root_level;
2675 	while (level >= 0) {
2676 		if (path->nodes[level] == NULL) {
2677 			int parent_slot;
2678 			u64 child_bytenr;
2679 
2680 			/*
2681 			 * We need to get child blockptr from parent before we
2682 			 * can read it.
2683 			  */
2684 			eb = path->nodes[level + 1];
2685 			parent_slot = path->slots[level + 1];
2686 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2687 
2688 			eb = btrfs_read_node_slot(eb, parent_slot);
2689 			if (IS_ERR(eb)) {
2690 				ret = PTR_ERR(eb);
2691 				goto out;
2692 			}
2693 
2694 			path->nodes[level] = eb;
2695 			path->slots[level] = 0;
2696 
2697 			btrfs_tree_read_lock(eb);
2698 			path->locks[level] = BTRFS_READ_LOCK;
2699 
2700 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2701 							fs_info->nodesize);
2702 			if (ret)
2703 				goto out;
2704 		}
2705 
2706 		if (level == 0) {
2707 			ret = btrfs_qgroup_trace_leaf_items(trans,
2708 							    path->nodes[level]);
2709 			if (ret)
2710 				goto out;
2711 
2712 			/* Nonzero return here means we completed our search */
2713 			ret = adjust_slots_upwards(path, root_level);
2714 			if (ret)
2715 				break;
2716 
2717 			/* Restart search with new slots */
2718 			goto walk_down;
2719 		}
2720 
2721 		level--;
2722 	}
2723 
2724 	ret = 0;
2725 out:
2726 	btrfs_free_path(path);
2727 
2728 	return ret;
2729 }
2730 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2731 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2732 {
2733 	if (!list_empty(&qgroup->nested_iterator))
2734 		return;
2735 
2736 	list_add_tail(&qgroup->nested_iterator, head);
2737 }
2738 
qgroup_iterator_nested_clean(struct list_head * head)2739 static void qgroup_iterator_nested_clean(struct list_head *head)
2740 {
2741 	while (!list_empty(head)) {
2742 		struct btrfs_qgroup *qgroup;
2743 
2744 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2745 		list_del_init(&qgroup->nested_iterator);
2746 	}
2747 }
2748 
2749 #define UPDATE_NEW	0
2750 #define UPDATE_OLD	1
2751 /*
2752  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2753  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,int update_old)2754 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2755 				 struct ulist *roots, struct list_head *qgroups,
2756 				 u64 seq, int update_old)
2757 {
2758 	struct ulist_node *unode;
2759 	struct ulist_iterator uiter;
2760 	struct btrfs_qgroup *qg;
2761 
2762 	if (!roots)
2763 		return;
2764 	ULIST_ITER_INIT(&uiter);
2765 	while ((unode = ulist_next(roots, &uiter))) {
2766 		LIST_HEAD(tmp);
2767 
2768 		qg = find_qgroup_rb(fs_info, unode->val);
2769 		if (!qg)
2770 			continue;
2771 
2772 		qgroup_iterator_nested_add(qgroups, qg);
2773 		qgroup_iterator_add(&tmp, qg);
2774 		list_for_each_entry(qg, &tmp, iterator) {
2775 			struct btrfs_qgroup_list *glist;
2776 
2777 			if (update_old)
2778 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2779 			else
2780 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2781 
2782 			list_for_each_entry(glist, &qg->groups, next_group) {
2783 				qgroup_iterator_nested_add(qgroups, glist->group);
2784 				qgroup_iterator_add(&tmp, glist->group);
2785 			}
2786 		}
2787 		qgroup_iterator_clean(&tmp);
2788 	}
2789 }
2790 
2791 /*
2792  * Update qgroup rfer/excl counters.
2793  * Rfer update is easy, codes can explain themselves.
2794  *
2795  * Excl update is tricky, the update is split into 2 parts.
2796  * Part 1: Possible exclusive <-> sharing detect:
2797  *	|	A	|	!A	|
2798  *  -------------------------------------
2799  *  B	|	*	|	-	|
2800  *  -------------------------------------
2801  *  !B	|	+	|	**	|
2802  *  -------------------------------------
2803  *
2804  * Conditions:
2805  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2806  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2807  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2808  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2809  *
2810  * Results:
2811  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2812  * *: Definitely not changed.		**: Possible unchanged.
2813  *
2814  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2815  *
2816  * To make the logic clear, we first use condition A and B to split
2817  * combination into 4 results.
2818  *
2819  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2820  * only on variant maybe 0.
2821  *
2822  * Lastly, check result **, since there are 2 variants maybe 0, split them
2823  * again(2x2).
2824  * But this time we don't need to consider other things, the codes and logic
2825  * is easy to understand now.
2826  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2827 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2828 				   struct list_head *qgroups, u64 nr_old_roots,
2829 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2830 {
2831 	struct btrfs_qgroup *qg;
2832 
2833 	list_for_each_entry(qg, qgroups, nested_iterator) {
2834 		u64 cur_new_count, cur_old_count;
2835 		bool dirty = false;
2836 
2837 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2838 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2839 
2840 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2841 					     cur_new_count);
2842 
2843 		/* Rfer update part */
2844 		if (cur_old_count == 0 && cur_new_count > 0) {
2845 			qg->rfer += num_bytes;
2846 			qg->rfer_cmpr += num_bytes;
2847 			dirty = true;
2848 		}
2849 		if (cur_old_count > 0 && cur_new_count == 0) {
2850 			qg->rfer -= num_bytes;
2851 			qg->rfer_cmpr -= num_bytes;
2852 			dirty = true;
2853 		}
2854 
2855 		/* Excl update part */
2856 		/* Exclusive/none -> shared case */
2857 		if (cur_old_count == nr_old_roots &&
2858 		    cur_new_count < nr_new_roots) {
2859 			/* Exclusive -> shared */
2860 			if (cur_old_count != 0) {
2861 				qg->excl -= num_bytes;
2862 				qg->excl_cmpr -= num_bytes;
2863 				dirty = true;
2864 			}
2865 		}
2866 
2867 		/* Shared -> exclusive/none case */
2868 		if (cur_old_count < nr_old_roots &&
2869 		    cur_new_count == nr_new_roots) {
2870 			/* Shared->exclusive */
2871 			if (cur_new_count != 0) {
2872 				qg->excl += num_bytes;
2873 				qg->excl_cmpr += num_bytes;
2874 				dirty = true;
2875 			}
2876 		}
2877 
2878 		/* Exclusive/none -> exclusive/none case */
2879 		if (cur_old_count == nr_old_roots &&
2880 		    cur_new_count == nr_new_roots) {
2881 			if (cur_old_count == 0) {
2882 				/* None -> exclusive/none */
2883 
2884 				if (cur_new_count != 0) {
2885 					/* None -> exclusive */
2886 					qg->excl += num_bytes;
2887 					qg->excl_cmpr += num_bytes;
2888 					dirty = true;
2889 				}
2890 				/* None -> none, nothing changed */
2891 			} else {
2892 				/* Exclusive -> exclusive/none */
2893 
2894 				if (cur_new_count == 0) {
2895 					/* Exclusive -> none */
2896 					qg->excl -= num_bytes;
2897 					qg->excl_cmpr -= num_bytes;
2898 					dirty = true;
2899 				}
2900 				/* Exclusive -> exclusive, nothing changed */
2901 			}
2902 		}
2903 
2904 		if (dirty)
2905 			qgroup_dirty(fs_info, qg);
2906 	}
2907 }
2908 
2909 /*
2910  * Check if the @roots potentially is a list of fs tree roots
2911  *
2912  * Return 0 for definitely not a fs/subvol tree roots ulist
2913  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2914  *          one as well)
2915  */
maybe_fs_roots(struct ulist * roots)2916 static int maybe_fs_roots(struct ulist *roots)
2917 {
2918 	struct ulist_node *unode;
2919 	struct ulist_iterator uiter;
2920 
2921 	/* Empty one, still possible for fs roots */
2922 	if (!roots || roots->nnodes == 0)
2923 		return 1;
2924 
2925 	ULIST_ITER_INIT(&uiter);
2926 	unode = ulist_next(roots, &uiter);
2927 	if (!unode)
2928 		return 1;
2929 
2930 	/*
2931 	 * If it contains fs tree roots, then it must belong to fs/subvol
2932 	 * trees.
2933 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2934 	 */
2935 	return is_fstree(unode->val);
2936 }
2937 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2938 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2939 				u64 num_bytes, struct ulist *old_roots,
2940 				struct ulist *new_roots)
2941 {
2942 	struct btrfs_fs_info *fs_info = trans->fs_info;
2943 	LIST_HEAD(qgroups);
2944 	u64 seq;
2945 	u64 nr_new_roots = 0;
2946 	u64 nr_old_roots = 0;
2947 	int ret = 0;
2948 
2949 	/*
2950 	 * If quotas get disabled meanwhile, the resources need to be freed and
2951 	 * we can't just exit here.
2952 	 */
2953 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2954 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2955 		goto out_free;
2956 
2957 	if (new_roots) {
2958 		if (!maybe_fs_roots(new_roots))
2959 			goto out_free;
2960 		nr_new_roots = new_roots->nnodes;
2961 	}
2962 	if (old_roots) {
2963 		if (!maybe_fs_roots(old_roots))
2964 			goto out_free;
2965 		nr_old_roots = old_roots->nnodes;
2966 	}
2967 
2968 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2969 	if (nr_old_roots == 0 && nr_new_roots == 0)
2970 		goto out_free;
2971 
2972 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2973 					num_bytes, nr_old_roots, nr_new_roots);
2974 
2975 	mutex_lock(&fs_info->qgroup_rescan_lock);
2976 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2977 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2978 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2979 			ret = 0;
2980 			goto out_free;
2981 		}
2982 	}
2983 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2984 
2985 	spin_lock(&fs_info->qgroup_lock);
2986 	seq = fs_info->qgroup_seq;
2987 
2988 	/* Update old refcnts using old_roots */
2989 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2990 
2991 	/* Update new refcnts using new_roots */
2992 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2993 
2994 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2995 			       num_bytes, seq);
2996 
2997 	/*
2998 	 * We're done using the iterator, release all its qgroups while holding
2999 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
3000 	 * and trigger use-after-free accesses to qgroups.
3001 	 */
3002 	qgroup_iterator_nested_clean(&qgroups);
3003 
3004 	/*
3005 	 * Bump qgroup_seq to avoid seq overlap
3006 	 */
3007 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
3008 	spin_unlock(&fs_info->qgroup_lock);
3009 out_free:
3010 	ulist_free(old_roots);
3011 	ulist_free(new_roots);
3012 	return ret;
3013 }
3014 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)3015 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3016 {
3017 	struct btrfs_fs_info *fs_info = trans->fs_info;
3018 	struct btrfs_qgroup_extent_record *record;
3019 	struct btrfs_delayed_ref_root *delayed_refs;
3020 	struct ulist *new_roots = NULL;
3021 	unsigned long index;
3022 	u64 num_dirty_extents = 0;
3023 	u64 qgroup_to_skip;
3024 	int ret = 0;
3025 
3026 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3027 		return 0;
3028 
3029 	delayed_refs = &trans->transaction->delayed_refs;
3030 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3031 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3032 		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
3033 
3034 		num_dirty_extents++;
3035 		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
3036 
3037 		if (!ret && !(fs_info->qgroup_flags &
3038 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3039 			struct btrfs_backref_walk_ctx ctx = { 0 };
3040 
3041 			ctx.bytenr = bytenr;
3042 			ctx.fs_info = fs_info;
3043 
3044 			/*
3045 			 * Old roots should be searched when inserting qgroup
3046 			 * extent record.
3047 			 *
3048 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3049 			 * we may have some record inserted during
3050 			 * NO_ACCOUNTING (thus no old_roots populated), but
3051 			 * later we start rescan, which clears NO_ACCOUNTING,
3052 			 * leaving some inserted records without old_roots
3053 			 * populated.
3054 			 *
3055 			 * Those cases are rare and should not cause too much
3056 			 * time spent during commit_transaction().
3057 			 */
3058 			if (!record->old_roots) {
3059 				/* Search commit root to find old_roots */
3060 				ret = btrfs_find_all_roots(&ctx, false);
3061 				if (ret < 0)
3062 					goto cleanup;
3063 				record->old_roots = ctx.roots;
3064 				ctx.roots = NULL;
3065 			}
3066 
3067 			/*
3068 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3069 			 * which doesn't lock tree or delayed_refs and search
3070 			 * current root. It's safe inside commit_transaction().
3071 			 */
3072 			ctx.trans = trans;
3073 			ctx.time_seq = BTRFS_SEQ_LAST;
3074 			ret = btrfs_find_all_roots(&ctx, false);
3075 			if (ret < 0)
3076 				goto cleanup;
3077 			new_roots = ctx.roots;
3078 			if (qgroup_to_skip) {
3079 				ulist_del(new_roots, qgroup_to_skip, 0);
3080 				ulist_del(record->old_roots, qgroup_to_skip,
3081 					  0);
3082 			}
3083 			ret = btrfs_qgroup_account_extent(trans, bytenr,
3084 							  record->num_bytes,
3085 							  record->old_roots,
3086 							  new_roots);
3087 			record->old_roots = NULL;
3088 			new_roots = NULL;
3089 		}
3090 		/* Free the reserved data space */
3091 		btrfs_qgroup_free_refroot(fs_info,
3092 				record->data_rsv_refroot,
3093 				record->data_rsv,
3094 				BTRFS_QGROUP_RSV_DATA);
3095 cleanup:
3096 		ulist_free(record->old_roots);
3097 		ulist_free(new_roots);
3098 		new_roots = NULL;
3099 		xa_erase(&delayed_refs->dirty_extents, index);
3100 		kfree(record);
3101 
3102 	}
3103 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
3104 				       num_dirty_extents);
3105 	return ret;
3106 }
3107 
3108 /*
3109  * Writes all changed qgroups to disk.
3110  * Called by the transaction commit path and the qgroup assign ioctl.
3111  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3112 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3113 {
3114 	struct btrfs_fs_info *fs_info = trans->fs_info;
3115 	int ret = 0;
3116 
3117 	/*
3118 	 * In case we are called from the qgroup assign ioctl, assert that we
3119 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3120 	 * disable operation (ioctl) and access a freed quota root.
3121 	 */
3122 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3123 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3124 
3125 	if (!fs_info->quota_root)
3126 		return ret;
3127 
3128 	spin_lock(&fs_info->qgroup_lock);
3129 	while (!list_empty(&fs_info->dirty_qgroups)) {
3130 		struct btrfs_qgroup *qgroup;
3131 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3132 					  struct btrfs_qgroup, dirty);
3133 		list_del_init(&qgroup->dirty);
3134 		spin_unlock(&fs_info->qgroup_lock);
3135 		ret = update_qgroup_info_item(trans, qgroup);
3136 		if (ret)
3137 			qgroup_mark_inconsistent(fs_info);
3138 		ret = update_qgroup_limit_item(trans, qgroup);
3139 		if (ret)
3140 			qgroup_mark_inconsistent(fs_info);
3141 		spin_lock(&fs_info->qgroup_lock);
3142 	}
3143 	if (btrfs_qgroup_enabled(fs_info))
3144 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3145 	else
3146 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3147 	spin_unlock(&fs_info->qgroup_lock);
3148 
3149 	ret = update_qgroup_status_item(trans);
3150 	if (ret)
3151 		qgroup_mark_inconsistent(fs_info);
3152 
3153 	return ret;
3154 }
3155 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3156 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3157 			       struct btrfs_qgroup_inherit *inherit,
3158 			       size_t size)
3159 {
3160 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3161 		return -EOPNOTSUPP;
3162 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3163 		return -EINVAL;
3164 
3165 	/*
3166 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3167 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3168 	 * been disabled in userspace for a very long time, but here we should
3169 	 * also disable it in kernel, as this behavior is known to mark qgroup
3170 	 * inconsistent, and a rescan would wipe out the changes anyway.
3171 	 *
3172 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3173 	 */
3174 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3175 		return -EINVAL;
3176 
3177 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3178 		return -EINVAL;
3179 
3180 	/*
3181 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3182 	 * Qgroup can still be later enabled causing problems, but in that case
3183 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3184 	 */
3185 	if (!btrfs_qgroup_enabled(fs_info))
3186 		return 0;
3187 
3188 	/*
3189 	 * Now check all the remaining qgroups, they should all:
3190 	 *
3191 	 * - Exist
3192 	 * - Be higher level qgroups.
3193 	 */
3194 	for (int i = 0; i < inherit->num_qgroups; i++) {
3195 		struct btrfs_qgroup *qgroup;
3196 		u64 qgroupid = inherit->qgroups[i];
3197 
3198 		if (btrfs_qgroup_level(qgroupid) == 0)
3199 			return -EINVAL;
3200 
3201 		spin_lock(&fs_info->qgroup_lock);
3202 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3203 		if (!qgroup) {
3204 			spin_unlock(&fs_info->qgroup_lock);
3205 			return -ENOENT;
3206 		}
3207 		spin_unlock(&fs_info->qgroup_lock);
3208 	}
3209 	return 0;
3210 }
3211 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3212 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3213 			       u64 inode_rootid,
3214 			       struct btrfs_qgroup_inherit **inherit)
3215 {
3216 	int i = 0;
3217 	u64 num_qgroups = 0;
3218 	struct btrfs_qgroup *inode_qg;
3219 	struct btrfs_qgroup_list *qg_list;
3220 	struct btrfs_qgroup_inherit *res;
3221 	size_t struct_sz;
3222 	u64 *qgids;
3223 
3224 	if (*inherit)
3225 		return -EEXIST;
3226 
3227 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3228 	if (!inode_qg)
3229 		return -ENOENT;
3230 
3231 	num_qgroups = list_count_nodes(&inode_qg->groups);
3232 
3233 	if (!num_qgroups)
3234 		return 0;
3235 
3236 	struct_sz = struct_size(res, qgroups, num_qgroups);
3237 	if (struct_sz == SIZE_MAX)
3238 		return -ERANGE;
3239 
3240 	res = kzalloc(struct_sz, GFP_NOFS);
3241 	if (!res)
3242 		return -ENOMEM;
3243 	res->num_qgroups = num_qgroups;
3244 	qgids = res->qgroups;
3245 
3246 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3247 		qgids[i++] = qg_list->group->qgroupid;
3248 
3249 	*inherit = res;
3250 	return 0;
3251 }
3252 
3253 /*
3254  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3255  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3256  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3257  *
3258  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3259  * Return 0 if a quick inherit is done.
3260  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3261  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3262 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3263 					 u64 srcid, u64 parentid)
3264 {
3265 	struct btrfs_qgroup *src;
3266 	struct btrfs_qgroup *parent;
3267 	struct btrfs_qgroup_list *list;
3268 	int nr_parents = 0;
3269 
3270 	src = find_qgroup_rb(fs_info, srcid);
3271 	if (!src)
3272 		return -ENOENT;
3273 	parent = find_qgroup_rb(fs_info, parentid);
3274 	if (!parent)
3275 		return -ENOENT;
3276 
3277 	/*
3278 	 * Source has no parent qgroup, but our new qgroup would have one.
3279 	 * Qgroup numbers would become inconsistent.
3280 	 */
3281 	if (list_empty(&src->groups))
3282 		return 1;
3283 
3284 	list_for_each_entry(list, &src->groups, next_group) {
3285 		/* The parent is not the same, quick update is not possible. */
3286 		if (list->group->qgroupid != parentid)
3287 			return 1;
3288 		nr_parents++;
3289 		/*
3290 		 * More than one parent qgroup, we can't be sure about accounting
3291 		 * consistency.
3292 		 */
3293 		if (nr_parents > 1)
3294 			return 1;
3295 	}
3296 
3297 	/*
3298 	 * The parent is not exclusively owning all its bytes.  We're not sure
3299 	 * if the source has any bytes not fully owned by the parent.
3300 	 */
3301 	if (parent->excl != parent->rfer)
3302 		return 1;
3303 
3304 	parent->excl += fs_info->nodesize;
3305 	parent->rfer += fs_info->nodesize;
3306 	return 0;
3307 }
3308 
3309 /*
3310  * Copy the accounting information between qgroups. This is necessary
3311  * when a snapshot or a subvolume is created. Throwing an error will
3312  * cause a transaction abort so we take extra care here to only error
3313  * when a readonly fs is a reasonable outcome.
3314  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3315 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3316 			 u64 objectid, u64 inode_rootid,
3317 			 struct btrfs_qgroup_inherit *inherit)
3318 {
3319 	int ret = 0;
3320 	u64 *i_qgroups;
3321 	bool committing = false;
3322 	struct btrfs_fs_info *fs_info = trans->fs_info;
3323 	struct btrfs_root *quota_root;
3324 	struct btrfs_qgroup *srcgroup;
3325 	struct btrfs_qgroup *dstgroup;
3326 	struct btrfs_qgroup *prealloc;
3327 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3328 	bool free_inherit = false;
3329 	bool need_rescan = false;
3330 	u32 level_size = 0;
3331 	u64 nums;
3332 
3333 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3334 	if (!prealloc)
3335 		return -ENOMEM;
3336 
3337 	/*
3338 	 * There are only two callers of this function.
3339 	 *
3340 	 * One in create_subvol() in the ioctl context, which needs to hold
3341 	 * the qgroup_ioctl_lock.
3342 	 *
3343 	 * The other one in create_pending_snapshot() where no other qgroup
3344 	 * code can modify the fs as they all need to either start a new trans
3345 	 * or hold a trans handler, thus we don't need to hold
3346 	 * qgroup_ioctl_lock.
3347 	 * This would avoid long and complex lock chain and make lockdep happy.
3348 	 */
3349 	spin_lock(&fs_info->trans_lock);
3350 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3351 		committing = true;
3352 	spin_unlock(&fs_info->trans_lock);
3353 
3354 	if (!committing)
3355 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3356 	if (!btrfs_qgroup_enabled(fs_info))
3357 		goto out;
3358 
3359 	quota_root = fs_info->quota_root;
3360 	if (!quota_root) {
3361 		ret = -EINVAL;
3362 		goto out;
3363 	}
3364 
3365 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3366 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3367 		if (ret)
3368 			goto out;
3369 		free_inherit = true;
3370 	}
3371 
3372 	if (inherit) {
3373 		i_qgroups = (u64 *)(inherit + 1);
3374 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3375 		       2 * inherit->num_excl_copies;
3376 		for (int i = 0; i < nums; i++) {
3377 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3378 
3379 			/*
3380 			 * Zero out invalid groups so we can ignore
3381 			 * them later.
3382 			 */
3383 			if (!srcgroup ||
3384 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3385 				*i_qgroups = 0ULL;
3386 
3387 			++i_qgroups;
3388 		}
3389 	}
3390 
3391 	/*
3392 	 * create a tracking group for the subvol itself
3393 	 */
3394 	ret = add_qgroup_item(trans, quota_root, objectid);
3395 	if (ret)
3396 		goto out;
3397 
3398 	/*
3399 	 * add qgroup to all inherited groups
3400 	 */
3401 	if (inherit) {
3402 		i_qgroups = (u64 *)(inherit + 1);
3403 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3404 			if (*i_qgroups == 0)
3405 				continue;
3406 			ret = add_qgroup_relation_item(trans, objectid,
3407 						       *i_qgroups);
3408 			if (ret && ret != -EEXIST)
3409 				goto out;
3410 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3411 						       objectid);
3412 			if (ret && ret != -EEXIST)
3413 				goto out;
3414 		}
3415 		ret = 0;
3416 
3417 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3418 					 sizeof(struct btrfs_qgroup_list *),
3419 					 GFP_NOFS);
3420 		if (!qlist_prealloc) {
3421 			ret = -ENOMEM;
3422 			goto out;
3423 		}
3424 		for (int i = 0; i < inherit->num_qgroups; i++) {
3425 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3426 						    GFP_NOFS);
3427 			if (!qlist_prealloc[i]) {
3428 				ret = -ENOMEM;
3429 				goto out;
3430 			}
3431 		}
3432 	}
3433 
3434 	spin_lock(&fs_info->qgroup_lock);
3435 
3436 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3437 	prealloc = NULL;
3438 
3439 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3440 		dstgroup->lim_flags = inherit->lim.flags;
3441 		dstgroup->max_rfer = inherit->lim.max_rfer;
3442 		dstgroup->max_excl = inherit->lim.max_excl;
3443 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3444 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3445 
3446 		qgroup_dirty(fs_info, dstgroup);
3447 	}
3448 
3449 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3450 		srcgroup = find_qgroup_rb(fs_info, srcid);
3451 		if (!srcgroup)
3452 			goto unlock;
3453 
3454 		/*
3455 		 * We call inherit after we clone the root in order to make sure
3456 		 * our counts don't go crazy, so at this point the only
3457 		 * difference between the two roots should be the root node.
3458 		 */
3459 		level_size = fs_info->nodesize;
3460 		dstgroup->rfer = srcgroup->rfer;
3461 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3462 		dstgroup->excl = level_size;
3463 		dstgroup->excl_cmpr = level_size;
3464 		srcgroup->excl = level_size;
3465 		srcgroup->excl_cmpr = level_size;
3466 
3467 		/* inherit the limit info */
3468 		dstgroup->lim_flags = srcgroup->lim_flags;
3469 		dstgroup->max_rfer = srcgroup->max_rfer;
3470 		dstgroup->max_excl = srcgroup->max_excl;
3471 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3472 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3473 
3474 		qgroup_dirty(fs_info, dstgroup);
3475 		qgroup_dirty(fs_info, srcgroup);
3476 
3477 		/*
3478 		 * If the source qgroup has parent but the new one doesn't,
3479 		 * we need a full rescan.
3480 		 */
3481 		if (!inherit && !list_empty(&srcgroup->groups))
3482 			need_rescan = true;
3483 	}
3484 
3485 	if (!inherit)
3486 		goto unlock;
3487 
3488 	i_qgroups = (u64 *)(inherit + 1);
3489 	for (int i = 0; i < inherit->num_qgroups; i++) {
3490 		if (*i_qgroups) {
3491 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3492 					      *i_qgroups);
3493 			qlist_prealloc[i] = NULL;
3494 			if (ret)
3495 				goto unlock;
3496 		}
3497 		if (srcid) {
3498 			/* Check if we can do a quick inherit. */
3499 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3500 			if (ret < 0)
3501 				goto unlock;
3502 			if (ret > 0)
3503 				need_rescan = true;
3504 			ret = 0;
3505 		}
3506 		++i_qgroups;
3507 	}
3508 
3509 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3510 		struct btrfs_qgroup *src;
3511 		struct btrfs_qgroup *dst;
3512 
3513 		if (!i_qgroups[0] || !i_qgroups[1])
3514 			continue;
3515 
3516 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3517 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3518 
3519 		if (!src || !dst) {
3520 			ret = -EINVAL;
3521 			goto unlock;
3522 		}
3523 
3524 		dst->rfer = src->rfer - level_size;
3525 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3526 
3527 		/* Manually tweaking numbers certainly needs a rescan */
3528 		need_rescan = true;
3529 	}
3530 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3531 		struct btrfs_qgroup *src;
3532 		struct btrfs_qgroup *dst;
3533 
3534 		if (!i_qgroups[0] || !i_qgroups[1])
3535 			continue;
3536 
3537 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3538 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3539 
3540 		if (!src || !dst) {
3541 			ret = -EINVAL;
3542 			goto unlock;
3543 		}
3544 
3545 		dst->excl = src->excl + level_size;
3546 		dst->excl_cmpr = src->excl_cmpr + level_size;
3547 		need_rescan = true;
3548 	}
3549 
3550 unlock:
3551 	spin_unlock(&fs_info->qgroup_lock);
3552 	if (!ret)
3553 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3554 out:
3555 	if (!committing)
3556 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3557 	if (need_rescan)
3558 		qgroup_mark_inconsistent(fs_info);
3559 	if (qlist_prealloc) {
3560 		for (int i = 0; i < inherit->num_qgroups; i++)
3561 			kfree(qlist_prealloc[i]);
3562 		kfree(qlist_prealloc);
3563 	}
3564 	if (free_inherit)
3565 		kfree(inherit);
3566 	kfree(prealloc);
3567 	return ret;
3568 }
3569 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3570 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3571 {
3572 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3573 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3574 		return false;
3575 
3576 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3577 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3578 		return false;
3579 
3580 	return true;
3581 }
3582 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3583 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3584 			  enum btrfs_qgroup_rsv_type type)
3585 {
3586 	struct btrfs_qgroup *qgroup;
3587 	struct btrfs_fs_info *fs_info = root->fs_info;
3588 	u64 ref_root = btrfs_root_id(root);
3589 	int ret = 0;
3590 	LIST_HEAD(qgroup_list);
3591 
3592 	if (!is_fstree(ref_root))
3593 		return 0;
3594 
3595 	if (num_bytes == 0)
3596 		return 0;
3597 
3598 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3599 	    capable(CAP_SYS_RESOURCE))
3600 		enforce = false;
3601 
3602 	spin_lock(&fs_info->qgroup_lock);
3603 	if (!fs_info->quota_root)
3604 		goto out;
3605 
3606 	qgroup = find_qgroup_rb(fs_info, ref_root);
3607 	if (!qgroup)
3608 		goto out;
3609 
3610 	qgroup_iterator_add(&qgroup_list, qgroup);
3611 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3612 		struct btrfs_qgroup_list *glist;
3613 
3614 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3615 			ret = -EDQUOT;
3616 			goto out;
3617 		}
3618 
3619 		list_for_each_entry(glist, &qgroup->groups, next_group)
3620 			qgroup_iterator_add(&qgroup_list, glist->group);
3621 	}
3622 
3623 	ret = 0;
3624 	/*
3625 	 * no limits exceeded, now record the reservation into all qgroups
3626 	 */
3627 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3628 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3629 
3630 out:
3631 	qgroup_iterator_clean(&qgroup_list);
3632 	spin_unlock(&fs_info->qgroup_lock);
3633 	return ret;
3634 }
3635 
3636 /*
3637  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3638  * qgroup).
3639  *
3640  * Will handle all higher level qgroup too.
3641  *
3642  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3643  * This special case is only used for META_PERTRANS type.
3644  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3645 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3646 			       u64 ref_root, u64 num_bytes,
3647 			       enum btrfs_qgroup_rsv_type type)
3648 {
3649 	struct btrfs_qgroup *qgroup;
3650 	LIST_HEAD(qgroup_list);
3651 
3652 	if (!is_fstree(ref_root))
3653 		return;
3654 
3655 	if (num_bytes == 0)
3656 		return;
3657 
3658 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3659 		WARN(1, "%s: Invalid type to free", __func__);
3660 		return;
3661 	}
3662 	spin_lock(&fs_info->qgroup_lock);
3663 
3664 	if (!fs_info->quota_root)
3665 		goto out;
3666 
3667 	qgroup = find_qgroup_rb(fs_info, ref_root);
3668 	if (!qgroup)
3669 		goto out;
3670 
3671 	if (num_bytes == (u64)-1)
3672 		/*
3673 		 * We're freeing all pertrans rsv, get reserved value from
3674 		 * level 0 qgroup as real num_bytes to free.
3675 		 */
3676 		num_bytes = qgroup->rsv.values[type];
3677 
3678 	qgroup_iterator_add(&qgroup_list, qgroup);
3679 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3680 		struct btrfs_qgroup_list *glist;
3681 
3682 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3683 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3684 			qgroup_iterator_add(&qgroup_list, glist->group);
3685 		}
3686 	}
3687 out:
3688 	qgroup_iterator_clean(&qgroup_list);
3689 	spin_unlock(&fs_info->qgroup_lock);
3690 }
3691 
3692 /*
3693  * Check if the leaf is the last leaf. Which means all node pointers
3694  * are at their last position.
3695  */
is_last_leaf(struct btrfs_path * path)3696 static bool is_last_leaf(struct btrfs_path *path)
3697 {
3698 	int i;
3699 
3700 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3701 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3702 			return false;
3703 	}
3704 	return true;
3705 }
3706 
3707 /*
3708  * returns < 0 on error, 0 when more leafs are to be scanned.
3709  * returns 1 when done.
3710  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3711 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3712 			      struct btrfs_path *path)
3713 {
3714 	struct btrfs_fs_info *fs_info = trans->fs_info;
3715 	struct btrfs_root *extent_root;
3716 	struct btrfs_key found;
3717 	struct extent_buffer *scratch_leaf = NULL;
3718 	u64 num_bytes;
3719 	bool done;
3720 	int slot;
3721 	int ret;
3722 
3723 	if (!btrfs_qgroup_full_accounting(fs_info))
3724 		return 1;
3725 
3726 	mutex_lock(&fs_info->qgroup_rescan_lock);
3727 	extent_root = btrfs_extent_root(fs_info,
3728 				fs_info->qgroup_rescan_progress.objectid);
3729 	ret = btrfs_search_slot_for_read(extent_root,
3730 					 &fs_info->qgroup_rescan_progress,
3731 					 path, 1, 0);
3732 
3733 	btrfs_debug(fs_info,
3734 		"current progress key (%llu %u %llu), search_slot ret %d",
3735 		fs_info->qgroup_rescan_progress.objectid,
3736 		fs_info->qgroup_rescan_progress.type,
3737 		fs_info->qgroup_rescan_progress.offset, ret);
3738 
3739 	if (ret) {
3740 		/*
3741 		 * The rescan is about to end, we will not be scanning any
3742 		 * further blocks. We cannot unset the RESCAN flag here, because
3743 		 * we want to commit the transaction if everything went well.
3744 		 * To make the live accounting work in this phase, we set our
3745 		 * scan progress pointer such that every real extent objectid
3746 		 * will be smaller.
3747 		 */
3748 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3749 		btrfs_release_path(path);
3750 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3751 		return ret;
3752 	}
3753 	done = is_last_leaf(path);
3754 
3755 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3756 			      btrfs_header_nritems(path->nodes[0]) - 1);
3757 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3758 
3759 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3760 	if (!scratch_leaf) {
3761 		ret = -ENOMEM;
3762 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3763 		goto out;
3764 	}
3765 	slot = path->slots[0];
3766 	btrfs_release_path(path);
3767 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3768 
3769 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3770 		struct btrfs_backref_walk_ctx ctx = { 0 };
3771 
3772 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3773 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3774 		    found.type != BTRFS_METADATA_ITEM_KEY)
3775 			continue;
3776 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3777 			num_bytes = fs_info->nodesize;
3778 		else
3779 			num_bytes = found.offset;
3780 
3781 		ctx.bytenr = found.objectid;
3782 		ctx.fs_info = fs_info;
3783 
3784 		ret = btrfs_find_all_roots(&ctx, false);
3785 		if (ret < 0)
3786 			goto out;
3787 		/* For rescan, just pass old_roots as NULL */
3788 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3789 						  num_bytes, NULL, ctx.roots);
3790 		if (ret < 0)
3791 			goto out;
3792 	}
3793 out:
3794 	if (scratch_leaf)
3795 		free_extent_buffer(scratch_leaf);
3796 
3797 	if (done && !ret) {
3798 		ret = 1;
3799 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3800 	}
3801 	return ret;
3802 }
3803 
rescan_should_stop(struct btrfs_fs_info * fs_info)3804 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3805 {
3806 	if (btrfs_fs_closing(fs_info))
3807 		return true;
3808 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3809 		return true;
3810 	if (!btrfs_qgroup_enabled(fs_info))
3811 		return true;
3812 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3813 		return true;
3814 	return false;
3815 }
3816 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3817 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3818 {
3819 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3820 						     qgroup_rescan_work);
3821 	struct btrfs_path *path;
3822 	struct btrfs_trans_handle *trans = NULL;
3823 	int ret = 0;
3824 	bool stopped = false;
3825 	bool did_leaf_rescans = false;
3826 
3827 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3828 		return;
3829 
3830 	path = btrfs_alloc_path();
3831 	if (!path) {
3832 		ret = -ENOMEM;
3833 		goto out;
3834 	}
3835 	/*
3836 	 * Rescan should only search for commit root, and any later difference
3837 	 * should be recorded by qgroup
3838 	 */
3839 	path->search_commit_root = 1;
3840 	path->skip_locking = 1;
3841 
3842 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3843 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3844 		if (IS_ERR(trans)) {
3845 			ret = PTR_ERR(trans);
3846 			break;
3847 		}
3848 
3849 		ret = qgroup_rescan_leaf(trans, path);
3850 		did_leaf_rescans = true;
3851 
3852 		if (ret > 0)
3853 			btrfs_commit_transaction(trans);
3854 		else
3855 			btrfs_end_transaction(trans);
3856 	}
3857 
3858 out:
3859 	btrfs_free_path(path);
3860 
3861 	mutex_lock(&fs_info->qgroup_rescan_lock);
3862 	if (ret > 0 &&
3863 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3864 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3865 	} else if (ret < 0 || stopped) {
3866 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3867 	}
3868 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3869 
3870 	/*
3871 	 * Only update status, since the previous part has already updated the
3872 	 * qgroup info, and only if we did any actual work. This also prevents
3873 	 * race with a concurrent quota disable, which has already set
3874 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3875 	 * btrfs_quota_disable().
3876 	 */
3877 	if (did_leaf_rescans) {
3878 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3879 		if (IS_ERR(trans)) {
3880 			ret = PTR_ERR(trans);
3881 			trans = NULL;
3882 			btrfs_err(fs_info,
3883 				  "fail to start transaction for status update: %d",
3884 				  ret);
3885 		}
3886 	} else {
3887 		trans = NULL;
3888 	}
3889 
3890 	mutex_lock(&fs_info->qgroup_rescan_lock);
3891 	if (!stopped ||
3892 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3893 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3894 	if (trans) {
3895 		int ret2 = update_qgroup_status_item(trans);
3896 
3897 		if (ret2 < 0) {
3898 			ret = ret2;
3899 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3900 		}
3901 	}
3902 	fs_info->qgroup_rescan_running = false;
3903 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3904 	complete_all(&fs_info->qgroup_rescan_completion);
3905 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3906 
3907 	if (!trans)
3908 		return;
3909 
3910 	btrfs_end_transaction(trans);
3911 
3912 	if (stopped) {
3913 		btrfs_info(fs_info, "qgroup scan paused");
3914 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3915 		btrfs_info(fs_info, "qgroup scan cancelled");
3916 	} else if (ret >= 0) {
3917 		btrfs_info(fs_info, "qgroup scan completed%s",
3918 			ret > 0 ? " (inconsistency flag cleared)" : "");
3919 	} else {
3920 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3921 	}
3922 }
3923 
3924 /*
3925  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3926  * memory required for the rescan context.
3927  */
3928 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3929 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3930 		   int init_flags)
3931 {
3932 	int ret = 0;
3933 
3934 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3935 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3936 		return -EINVAL;
3937 	}
3938 
3939 	if (!init_flags) {
3940 		/* we're resuming qgroup rescan at mount time */
3941 		if (!(fs_info->qgroup_flags &
3942 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3943 			btrfs_debug(fs_info,
3944 			"qgroup rescan init failed, qgroup rescan is not queued");
3945 			ret = -EINVAL;
3946 		} else if (!(fs_info->qgroup_flags &
3947 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3948 			btrfs_debug(fs_info,
3949 			"qgroup rescan init failed, qgroup is not enabled");
3950 			ret = -ENOTCONN;
3951 		}
3952 
3953 		if (ret)
3954 			return ret;
3955 	}
3956 
3957 	mutex_lock(&fs_info->qgroup_rescan_lock);
3958 
3959 	if (init_flags) {
3960 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3961 			ret = -EINPROGRESS;
3962 		} else if (!(fs_info->qgroup_flags &
3963 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3964 			btrfs_debug(fs_info,
3965 			"qgroup rescan init failed, qgroup is not enabled");
3966 			ret = -ENOTCONN;
3967 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3968 			/* Quota disable is in progress */
3969 			ret = -EBUSY;
3970 		}
3971 
3972 		if (ret) {
3973 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3974 			return ret;
3975 		}
3976 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3977 	}
3978 
3979 	memset(&fs_info->qgroup_rescan_progress, 0,
3980 		sizeof(fs_info->qgroup_rescan_progress));
3981 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3982 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3983 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3984 	init_completion(&fs_info->qgroup_rescan_completion);
3985 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3986 
3987 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3988 			btrfs_qgroup_rescan_worker, NULL);
3989 	return 0;
3990 }
3991 
3992 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3993 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3994 {
3995 	struct rb_node *n;
3996 	struct btrfs_qgroup *qgroup;
3997 
3998 	spin_lock(&fs_info->qgroup_lock);
3999 	/* clear all current qgroup tracking information */
4000 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
4001 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
4002 		qgroup->rfer = 0;
4003 		qgroup->rfer_cmpr = 0;
4004 		qgroup->excl = 0;
4005 		qgroup->excl_cmpr = 0;
4006 		qgroup_dirty(fs_info, qgroup);
4007 	}
4008 	spin_unlock(&fs_info->qgroup_lock);
4009 }
4010 
4011 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)4012 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
4013 {
4014 	int ret = 0;
4015 
4016 	ret = qgroup_rescan_init(fs_info, 0, 1);
4017 	if (ret)
4018 		return ret;
4019 
4020 	/*
4021 	 * We have set the rescan_progress to 0, which means no more
4022 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4023 	 * However, btrfs_qgroup_account_ref may be right after its call
4024 	 * to btrfs_find_all_roots, in which case it would still do the
4025 	 * accounting.
4026 	 * To solve this, we're committing the transaction, which will
4027 	 * ensure we run all delayed refs and only after that, we are
4028 	 * going to clear all tracking information for a clean start.
4029 	 */
4030 
4031 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4032 	if (ret) {
4033 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4034 		return ret;
4035 	}
4036 
4037 	qgroup_rescan_zero_tracking(fs_info);
4038 
4039 	mutex_lock(&fs_info->qgroup_rescan_lock);
4040 	fs_info->qgroup_rescan_running = true;
4041 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
4042 			 &fs_info->qgroup_rescan_work);
4043 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4044 
4045 	return 0;
4046 }
4047 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4048 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4049 				     bool interruptible)
4050 {
4051 	int running;
4052 	int ret = 0;
4053 
4054 	mutex_lock(&fs_info->qgroup_rescan_lock);
4055 	running = fs_info->qgroup_rescan_running;
4056 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4057 
4058 	if (!running)
4059 		return 0;
4060 
4061 	if (interruptible)
4062 		ret = wait_for_completion_interruptible(
4063 					&fs_info->qgroup_rescan_completion);
4064 	else
4065 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4066 
4067 	return ret;
4068 }
4069 
4070 /*
4071  * this is only called from open_ctree where we're still single threaded, thus
4072  * locking is omitted here.
4073  */
4074 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4075 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4076 {
4077 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4078 		mutex_lock(&fs_info->qgroup_rescan_lock);
4079 		fs_info->qgroup_rescan_running = true;
4080 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4081 				 &fs_info->qgroup_rescan_work);
4082 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4083 	}
4084 }
4085 
4086 #define rbtree_iterate_from_safe(node, next, start)				\
4087        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4088 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4089 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4090 				  struct extent_changeset *reserved, u64 start,
4091 				  u64 len)
4092 {
4093 	struct rb_node *node;
4094 	struct rb_node *next;
4095 	struct ulist_node *entry;
4096 	int ret = 0;
4097 
4098 	node = reserved->range_changed.root.rb_node;
4099 	if (!node)
4100 		return 0;
4101 	while (node) {
4102 		entry = rb_entry(node, struct ulist_node, rb_node);
4103 		if (entry->val < start)
4104 			node = node->rb_right;
4105 		else
4106 			node = node->rb_left;
4107 	}
4108 
4109 	if (entry->val > start && rb_prev(&entry->rb_node))
4110 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4111 				 rb_node);
4112 
4113 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4114 		u64 entry_start;
4115 		u64 entry_end;
4116 		u64 entry_len;
4117 		int clear_ret;
4118 
4119 		entry = rb_entry(node, struct ulist_node, rb_node);
4120 		entry_start = entry->val;
4121 		entry_end = entry->aux;
4122 		entry_len = entry_end - entry_start + 1;
4123 
4124 		if (entry_start >= start + len)
4125 			break;
4126 		if (entry_start + entry_len <= start)
4127 			continue;
4128 		/*
4129 		 * Now the entry is in [start, start + len), revert the
4130 		 * EXTENT_QGROUP_RESERVED bit.
4131 		 */
4132 		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
4133 					      entry_end, EXTENT_QGROUP_RESERVED);
4134 		if (!ret && clear_ret < 0)
4135 			ret = clear_ret;
4136 
4137 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4138 		if (likely(reserved->bytes_changed >= entry_len)) {
4139 			reserved->bytes_changed -= entry_len;
4140 		} else {
4141 			WARN_ON(1);
4142 			reserved->bytes_changed = 0;
4143 		}
4144 	}
4145 
4146 	return ret;
4147 }
4148 
4149 /*
4150  * Try to free some space for qgroup.
4151  *
4152  * For qgroup, there are only 3 ways to free qgroup space:
4153  * - Flush nodatacow write
4154  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4155  *   In theory, we should only flush nodatacow inodes, but it's not yet
4156  *   possible, so we need to flush the whole root.
4157  *
4158  * - Wait for ordered extents
4159  *   When ordered extents are finished, their reserved metadata is finally
4160  *   converted to per_trans status, which can be freed by later commit
4161  *   transaction.
4162  *
4163  * - Commit transaction
4164  *   This would free the meta_per_trans space.
4165  *   In theory this shouldn't provide much space, but any more qgroup space
4166  *   is needed.
4167  */
try_flush_qgroup(struct btrfs_root * root)4168 static int try_flush_qgroup(struct btrfs_root *root)
4169 {
4170 	int ret;
4171 
4172 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4173 	ASSERT(current->journal_info == NULL);
4174 	if (WARN_ON(current->journal_info))
4175 		return 0;
4176 
4177 	/*
4178 	 * We don't want to run flush again and again, so if there is a running
4179 	 * one, we won't try to start a new flush, but exit directly.
4180 	 */
4181 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4182 		wait_event(root->qgroup_flush_wait,
4183 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4184 		return 0;
4185 	}
4186 
4187 	ret = btrfs_start_delalloc_snapshot(root, true);
4188 	if (ret < 0)
4189 		goto out;
4190 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4191 
4192 	/*
4193 	 * After waiting for ordered extents run delayed iputs in order to free
4194 	 * space from unlinked files before committing the current transaction,
4195 	 * as ordered extents may have been holding the last reference of an
4196 	 * inode and they add a delayed iput when they complete.
4197 	 */
4198 	btrfs_run_delayed_iputs(root->fs_info);
4199 	btrfs_wait_on_delayed_iputs(root->fs_info);
4200 
4201 	ret = btrfs_commit_current_transaction(root);
4202 out:
4203 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4204 	wake_up(&root->qgroup_flush_wait);
4205 	return ret;
4206 }
4207 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4208 static int qgroup_reserve_data(struct btrfs_inode *inode,
4209 			struct extent_changeset **reserved_ret, u64 start,
4210 			u64 len)
4211 {
4212 	struct btrfs_root *root = inode->root;
4213 	struct extent_changeset *reserved;
4214 	bool new_reserved = false;
4215 	u64 orig_reserved;
4216 	u64 to_reserve;
4217 	int ret;
4218 
4219 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4220 	    !is_fstree(btrfs_root_id(root)) || len == 0)
4221 		return 0;
4222 
4223 	/* @reserved parameter is mandatory for qgroup */
4224 	if (WARN_ON(!reserved_ret))
4225 		return -EINVAL;
4226 	if (!*reserved_ret) {
4227 		new_reserved = true;
4228 		*reserved_ret = extent_changeset_alloc();
4229 		if (!*reserved_ret)
4230 			return -ENOMEM;
4231 	}
4232 	reserved = *reserved_ret;
4233 	/* Record already reserved space */
4234 	orig_reserved = reserved->bytes_changed;
4235 	ret = set_record_extent_bits(&inode->io_tree, start,
4236 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
4237 
4238 	/* Newly reserved space */
4239 	to_reserve = reserved->bytes_changed - orig_reserved;
4240 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4241 					to_reserve, QGROUP_RESERVE);
4242 	if (ret < 0)
4243 		goto out;
4244 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4245 	if (ret < 0)
4246 		goto cleanup;
4247 
4248 	return ret;
4249 
4250 cleanup:
4251 	qgroup_unreserve_range(inode, reserved, start, len);
4252 out:
4253 	if (new_reserved) {
4254 		extent_changeset_free(reserved);
4255 		*reserved_ret = NULL;
4256 	}
4257 	return ret;
4258 }
4259 
4260 /*
4261  * Reserve qgroup space for range [start, start + len).
4262  *
4263  * This function will either reserve space from related qgroups or do nothing
4264  * if the range is already reserved.
4265  *
4266  * Return 0 for successful reservation
4267  * Return <0 for error (including -EQUOT)
4268  *
4269  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4270  *	 commit transaction. So caller should not hold any dirty page locked.
4271  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4272 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4273 			struct extent_changeset **reserved_ret, u64 start,
4274 			u64 len)
4275 {
4276 	int ret;
4277 
4278 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4279 	if (ret <= 0 && ret != -EDQUOT)
4280 		return ret;
4281 
4282 	ret = try_flush_qgroup(inode->root);
4283 	if (ret < 0)
4284 		return ret;
4285 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4286 }
4287 
4288 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4289 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4290 				     struct extent_changeset *reserved,
4291 				     u64 start, u64 len, u64 *freed_ret)
4292 {
4293 	struct btrfs_root *root = inode->root;
4294 	struct ulist_node *unode;
4295 	struct ulist_iterator uiter;
4296 	struct extent_changeset changeset;
4297 	u64 freed = 0;
4298 	int ret;
4299 
4300 	extent_changeset_init(&changeset);
4301 	len = round_up(start + len, root->fs_info->sectorsize);
4302 	start = round_down(start, root->fs_info->sectorsize);
4303 
4304 	ULIST_ITER_INIT(&uiter);
4305 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4306 		u64 range_start = unode->val;
4307 		/* unode->aux is the inclusive end */
4308 		u64 range_len = unode->aux - range_start + 1;
4309 		u64 free_start;
4310 		u64 free_len;
4311 
4312 		extent_changeset_release(&changeset);
4313 
4314 		/* Only free range in range [start, start + len) */
4315 		if (range_start >= start + len ||
4316 		    range_start + range_len <= start)
4317 			continue;
4318 		free_start = max(range_start, start);
4319 		free_len = min(start + len, range_start + range_len) -
4320 			   free_start;
4321 		/*
4322 		 * TODO: To also modify reserved->ranges_reserved to reflect
4323 		 * the modification.
4324 		 *
4325 		 * However as long as we free qgroup reserved according to
4326 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4327 		 * So not need to rush.
4328 		 */
4329 		ret = clear_record_extent_bits(&inode->io_tree, free_start,
4330 				free_start + free_len - 1,
4331 				EXTENT_QGROUP_RESERVED, &changeset);
4332 		if (ret < 0)
4333 			goto out;
4334 		freed += changeset.bytes_changed;
4335 	}
4336 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4337 				  BTRFS_QGROUP_RSV_DATA);
4338 	if (freed_ret)
4339 		*freed_ret = freed;
4340 	ret = 0;
4341 out:
4342 	extent_changeset_release(&changeset);
4343 	return ret;
4344 }
4345 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4346 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4347 			struct extent_changeset *reserved, u64 start, u64 len,
4348 			u64 *released, int free)
4349 {
4350 	struct extent_changeset changeset;
4351 	int trace_op = QGROUP_RELEASE;
4352 	int ret;
4353 
4354 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4355 		return clear_record_extent_bits(&inode->io_tree, start,
4356 						start + len - 1,
4357 						EXTENT_QGROUP_RESERVED, NULL);
4358 	}
4359 
4360 	/* In release case, we shouldn't have @reserved */
4361 	WARN_ON(!free && reserved);
4362 	if (free && reserved)
4363 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4364 	extent_changeset_init(&changeset);
4365 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
4366 				       EXTENT_QGROUP_RESERVED, &changeset);
4367 	if (ret < 0)
4368 		goto out;
4369 
4370 	if (free)
4371 		trace_op = QGROUP_FREE;
4372 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4373 					changeset.bytes_changed, trace_op);
4374 	if (free)
4375 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4376 				btrfs_root_id(inode->root),
4377 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4378 	if (released)
4379 		*released = changeset.bytes_changed;
4380 out:
4381 	extent_changeset_release(&changeset);
4382 	return ret;
4383 }
4384 
4385 /*
4386  * Free a reserved space range from io_tree and related qgroups
4387  *
4388  * Should be called when a range of pages get invalidated before reaching disk.
4389  * Or for error cleanup case.
4390  * if @reserved is given, only reserved range in [@start, @start + @len) will
4391  * be freed.
4392  *
4393  * For data written to disk, use btrfs_qgroup_release_data().
4394  *
4395  * NOTE: This function may sleep for memory allocation.
4396  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4397 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4398 			   struct extent_changeset *reserved,
4399 			   u64 start, u64 len, u64 *freed)
4400 {
4401 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4402 }
4403 
4404 /*
4405  * Release a reserved space range from io_tree only.
4406  *
4407  * Should be called when a range of pages get written to disk and corresponding
4408  * FILE_EXTENT is inserted into corresponding root.
4409  *
4410  * Since new qgroup accounting framework will only update qgroup numbers at
4411  * commit_transaction() time, its reserved space shouldn't be freed from
4412  * related qgroups.
4413  *
4414  * But we should release the range from io_tree, to allow further write to be
4415  * COWed.
4416  *
4417  * NOTE: This function may sleep for memory allocation.
4418  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4419 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4420 {
4421 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4422 }
4423 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4424 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4425 			      enum btrfs_qgroup_rsv_type type)
4426 {
4427 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4428 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4429 		return;
4430 	if (num_bytes == 0)
4431 		return;
4432 
4433 	spin_lock(&root->qgroup_meta_rsv_lock);
4434 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4435 		root->qgroup_meta_rsv_prealloc += num_bytes;
4436 	else
4437 		root->qgroup_meta_rsv_pertrans += num_bytes;
4438 	spin_unlock(&root->qgroup_meta_rsv_lock);
4439 }
4440 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4441 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4442 			     enum btrfs_qgroup_rsv_type type)
4443 {
4444 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4445 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4446 		return 0;
4447 	if (num_bytes == 0)
4448 		return 0;
4449 
4450 	spin_lock(&root->qgroup_meta_rsv_lock);
4451 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4452 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4453 				  num_bytes);
4454 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4455 	} else {
4456 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4457 				  num_bytes);
4458 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4459 	}
4460 	spin_unlock(&root->qgroup_meta_rsv_lock);
4461 	return num_bytes;
4462 }
4463 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4464 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4465 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4466 {
4467 	struct btrfs_fs_info *fs_info = root->fs_info;
4468 	int ret;
4469 
4470 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4471 	    !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4472 		return 0;
4473 
4474 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4475 	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4476 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4477 	if (ret < 0)
4478 		return ret;
4479 	/*
4480 	 * Record what we have reserved into root.
4481 	 *
4482 	 * To avoid quota disabled->enabled underflow.
4483 	 * In that case, we may try to free space we haven't reserved
4484 	 * (since quota was disabled), so record what we reserved into root.
4485 	 * And ensure later release won't underflow this number.
4486 	 */
4487 	add_root_meta_rsv(root, num_bytes, type);
4488 	return ret;
4489 }
4490 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4491 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4492 				enum btrfs_qgroup_rsv_type type, bool enforce,
4493 				bool noflush)
4494 {
4495 	int ret;
4496 
4497 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4498 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4499 		return ret;
4500 
4501 	ret = try_flush_qgroup(root);
4502 	if (ret < 0)
4503 		return ret;
4504 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4505 }
4506 
4507 /*
4508  * Per-transaction meta reservation should be all freed at transaction commit
4509  * time
4510  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4511 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4512 {
4513 	struct btrfs_fs_info *fs_info = root->fs_info;
4514 
4515 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4516 	    !is_fstree(btrfs_root_id(root)))
4517 		return;
4518 
4519 	/* TODO: Update trace point to handle such free */
4520 	trace_qgroup_meta_free_all_pertrans(root);
4521 	/* Special value -1 means to free all reserved space */
4522 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4523 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4524 }
4525 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4526 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4527 			      enum btrfs_qgroup_rsv_type type)
4528 {
4529 	struct btrfs_fs_info *fs_info = root->fs_info;
4530 
4531 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4532 	    !is_fstree(btrfs_root_id(root)))
4533 		return;
4534 
4535 	/*
4536 	 * reservation for META_PREALLOC can happen before quota is enabled,
4537 	 * which can lead to underflow.
4538 	 * Here ensure we will only free what we really have reserved.
4539 	 */
4540 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4541 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4542 	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4543 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4544 }
4545 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4546 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4547 				int num_bytes)
4548 {
4549 	struct btrfs_qgroup *qgroup;
4550 	LIST_HEAD(qgroup_list);
4551 
4552 	if (num_bytes == 0)
4553 		return;
4554 	if (!fs_info->quota_root)
4555 		return;
4556 
4557 	spin_lock(&fs_info->qgroup_lock);
4558 	qgroup = find_qgroup_rb(fs_info, ref_root);
4559 	if (!qgroup)
4560 		goto out;
4561 
4562 	qgroup_iterator_add(&qgroup_list, qgroup);
4563 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4564 		struct btrfs_qgroup_list *glist;
4565 
4566 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4567 				BTRFS_QGROUP_RSV_META_PREALLOC);
4568 		if (!sb_rdonly(fs_info->sb))
4569 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4570 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4571 
4572 		list_for_each_entry(glist, &qgroup->groups, next_group)
4573 			qgroup_iterator_add(&qgroup_list, glist->group);
4574 	}
4575 out:
4576 	qgroup_iterator_clean(&qgroup_list);
4577 	spin_unlock(&fs_info->qgroup_lock);
4578 }
4579 
4580 /*
4581  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4582  *
4583  * This is called when preallocated meta reservation needs to be used.
4584  * Normally after btrfs_join_transaction() call.
4585  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4586 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4587 {
4588 	struct btrfs_fs_info *fs_info = root->fs_info;
4589 
4590 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4591 	    !is_fstree(btrfs_root_id(root)))
4592 		return;
4593 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4594 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4595 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4596 	trace_qgroup_meta_convert(root, num_bytes);
4597 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4598 	if (!sb_rdonly(fs_info->sb))
4599 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4600 }
4601 
4602 /*
4603  * Check qgroup reserved space leaking, normally at destroy inode
4604  * time
4605  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4606 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4607 {
4608 	struct extent_changeset changeset;
4609 	struct ulist_node *unode;
4610 	struct ulist_iterator iter;
4611 	int ret;
4612 
4613 	extent_changeset_init(&changeset);
4614 	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4615 			EXTENT_QGROUP_RESERVED, &changeset);
4616 
4617 	WARN_ON(ret < 0);
4618 	if (WARN_ON(changeset.bytes_changed)) {
4619 		ULIST_ITER_INIT(&iter);
4620 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4621 			btrfs_warn(inode->root->fs_info,
4622 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4623 				btrfs_ino(inode), unode->val, unode->aux);
4624 		}
4625 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4626 				btrfs_root_id(inode->root),
4627 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4628 
4629 	}
4630 	extent_changeset_release(&changeset);
4631 }
4632 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4633 void btrfs_qgroup_init_swapped_blocks(
4634 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4635 {
4636 	int i;
4637 
4638 	spin_lock_init(&swapped_blocks->lock);
4639 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4640 		swapped_blocks->blocks[i] = RB_ROOT;
4641 	swapped_blocks->swapped = false;
4642 }
4643 
4644 /*
4645  * Delete all swapped blocks record of @root.
4646  * Every record here means we skipped a full subtree scan for qgroup.
4647  *
4648  * Gets called when committing one transaction.
4649  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4650 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4651 {
4652 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4653 	int i;
4654 
4655 	swapped_blocks = &root->swapped_blocks;
4656 
4657 	spin_lock(&swapped_blocks->lock);
4658 	if (!swapped_blocks->swapped)
4659 		goto out;
4660 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4661 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4662 		struct btrfs_qgroup_swapped_block *entry;
4663 		struct btrfs_qgroup_swapped_block *next;
4664 
4665 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4666 						     node)
4667 			kfree(entry);
4668 		swapped_blocks->blocks[i] = RB_ROOT;
4669 	}
4670 	swapped_blocks->swapped = false;
4671 out:
4672 	spin_unlock(&swapped_blocks->lock);
4673 }
4674 
4675 /*
4676  * Add subtree roots record into @subvol_root.
4677  *
4678  * @subvol_root:	tree root of the subvolume tree get swapped
4679  * @bg:			block group under balance
4680  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4681  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4682  *			BOTH POINTERS ARE BEFORE TREE SWAP
4683  * @last_snapshot:	last snapshot generation of the subvolume tree
4684  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4685 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
4686 		struct btrfs_block_group *bg,
4687 		struct extent_buffer *subvol_parent, int subvol_slot,
4688 		struct extent_buffer *reloc_parent, int reloc_slot,
4689 		u64 last_snapshot)
4690 {
4691 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4692 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4693 	struct btrfs_qgroup_swapped_block *block;
4694 	struct rb_node **cur;
4695 	struct rb_node *parent = NULL;
4696 	int level = btrfs_header_level(subvol_parent) - 1;
4697 	int ret = 0;
4698 
4699 	if (!btrfs_qgroup_full_accounting(fs_info))
4700 		return 0;
4701 
4702 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4703 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4704 		btrfs_err_rl(fs_info,
4705 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4706 			__func__,
4707 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4708 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4709 		return -EUCLEAN;
4710 	}
4711 
4712 	block = kmalloc(sizeof(*block), GFP_NOFS);
4713 	if (!block) {
4714 		ret = -ENOMEM;
4715 		goto out;
4716 	}
4717 
4718 	/*
4719 	 * @reloc_parent/slot is still before swap, while @block is going to
4720 	 * record the bytenr after swap, so we do the swap here.
4721 	 */
4722 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4723 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4724 							     reloc_slot);
4725 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4726 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4727 							    subvol_slot);
4728 	block->last_snapshot = last_snapshot;
4729 	block->level = level;
4730 
4731 	/*
4732 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4733 	 * no one else can modify tree blocks thus we qgroup will not change
4734 	 * no matter the value of trace_leaf.
4735 	 */
4736 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4737 		block->trace_leaf = true;
4738 	else
4739 		block->trace_leaf = false;
4740 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4741 
4742 	/* Insert @block into @blocks */
4743 	spin_lock(&blocks->lock);
4744 	cur = &blocks->blocks[level].rb_node;
4745 	while (*cur) {
4746 		struct btrfs_qgroup_swapped_block *entry;
4747 
4748 		parent = *cur;
4749 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4750 				 node);
4751 
4752 		if (entry->subvol_bytenr < block->subvol_bytenr) {
4753 			cur = &(*cur)->rb_left;
4754 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4755 			cur = &(*cur)->rb_right;
4756 		} else {
4757 			if (entry->subvol_generation !=
4758 					block->subvol_generation ||
4759 			    entry->reloc_bytenr != block->reloc_bytenr ||
4760 			    entry->reloc_generation !=
4761 					block->reloc_generation) {
4762 				/*
4763 				 * Duplicated but mismatch entry found.
4764 				 * Shouldn't happen.
4765 				 *
4766 				 * Marking qgroup inconsistent should be enough
4767 				 * for end users.
4768 				 */
4769 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4770 				ret = -EEXIST;
4771 			}
4772 			kfree(block);
4773 			goto out_unlock;
4774 		}
4775 	}
4776 	rb_link_node(&block->node, parent, cur);
4777 	rb_insert_color(&block->node, &blocks->blocks[level]);
4778 	blocks->swapped = true;
4779 out_unlock:
4780 	spin_unlock(&blocks->lock);
4781 out:
4782 	if (ret < 0)
4783 		qgroup_mark_inconsistent(fs_info);
4784 	return ret;
4785 }
4786 
4787 /*
4788  * Check if the tree block is a subtree root, and if so do the needed
4789  * delayed subtree trace for qgroup.
4790  *
4791  * This is called during btrfs_cow_block().
4792  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4793 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4794 					 struct btrfs_root *root,
4795 					 struct extent_buffer *subvol_eb)
4796 {
4797 	struct btrfs_fs_info *fs_info = root->fs_info;
4798 	struct btrfs_tree_parent_check check = { 0 };
4799 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4800 	struct btrfs_qgroup_swapped_block *block;
4801 	struct extent_buffer *reloc_eb = NULL;
4802 	struct rb_node *node;
4803 	bool found = false;
4804 	bool swapped = false;
4805 	int level = btrfs_header_level(subvol_eb);
4806 	int ret = 0;
4807 	int i;
4808 
4809 	if (!btrfs_qgroup_full_accounting(fs_info))
4810 		return 0;
4811 	if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4812 		return 0;
4813 
4814 	spin_lock(&blocks->lock);
4815 	if (!blocks->swapped) {
4816 		spin_unlock(&blocks->lock);
4817 		return 0;
4818 	}
4819 	node = blocks->blocks[level].rb_node;
4820 
4821 	while (node) {
4822 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4823 		if (block->subvol_bytenr < subvol_eb->start) {
4824 			node = node->rb_left;
4825 		} else if (block->subvol_bytenr > subvol_eb->start) {
4826 			node = node->rb_right;
4827 		} else {
4828 			found = true;
4829 			break;
4830 		}
4831 	}
4832 	if (!found) {
4833 		spin_unlock(&blocks->lock);
4834 		goto out;
4835 	}
4836 	/* Found one, remove it from @blocks first and update blocks->swapped */
4837 	rb_erase(&block->node, &blocks->blocks[level]);
4838 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4839 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4840 			swapped = true;
4841 			break;
4842 		}
4843 	}
4844 	blocks->swapped = swapped;
4845 	spin_unlock(&blocks->lock);
4846 
4847 	check.level = block->level;
4848 	check.transid = block->reloc_generation;
4849 	check.has_first_key = true;
4850 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4851 
4852 	/* Read out reloc subtree root */
4853 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4854 	if (IS_ERR(reloc_eb)) {
4855 		ret = PTR_ERR(reloc_eb);
4856 		reloc_eb = NULL;
4857 		goto free_out;
4858 	}
4859 	if (!extent_buffer_uptodate(reloc_eb)) {
4860 		ret = -EIO;
4861 		goto free_out;
4862 	}
4863 
4864 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4865 			block->last_snapshot, block->trace_leaf);
4866 free_out:
4867 	kfree(block);
4868 	free_extent_buffer(reloc_eb);
4869 out:
4870 	if (ret < 0) {
4871 		btrfs_err_rl(fs_info,
4872 			     "failed to account subtree at bytenr %llu: %d",
4873 			     subvol_eb->start, ret);
4874 		qgroup_mark_inconsistent(fs_info);
4875 	}
4876 	return ret;
4877 }
4878 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4879 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4880 {
4881 	struct btrfs_qgroup_extent_record *entry;
4882 	unsigned long index;
4883 
4884 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4885 		ulist_free(entry->old_roots);
4886 		kfree(entry);
4887 	}
4888 	xa_destroy(&trans->delayed_refs.dirty_extents);
4889 }
4890 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4891 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4892 			      const struct btrfs_squota_delta *delta)
4893 {
4894 	int ret;
4895 	struct btrfs_qgroup *qgroup;
4896 	struct btrfs_qgroup *qg;
4897 	LIST_HEAD(qgroup_list);
4898 	u64 root = delta->root;
4899 	u64 num_bytes = delta->num_bytes;
4900 	const int sign = (delta->is_inc ? 1 : -1);
4901 
4902 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4903 		return 0;
4904 
4905 	if (!is_fstree(root))
4906 		return 0;
4907 
4908 	/* If the extent predates enabling quotas, don't count it. */
4909 	if (delta->generation < fs_info->qgroup_enable_gen)
4910 		return 0;
4911 
4912 	spin_lock(&fs_info->qgroup_lock);
4913 	qgroup = find_qgroup_rb(fs_info, root);
4914 	if (!qgroup) {
4915 		ret = -ENOENT;
4916 		goto out;
4917 	}
4918 
4919 	ret = 0;
4920 	qgroup_iterator_add(&qgroup_list, qgroup);
4921 	list_for_each_entry(qg, &qgroup_list, iterator) {
4922 		struct btrfs_qgroup_list *glist;
4923 
4924 		qg->excl += num_bytes * sign;
4925 		qg->rfer += num_bytes * sign;
4926 		qgroup_dirty(fs_info, qg);
4927 
4928 		list_for_each_entry(glist, &qg->groups, next_group)
4929 			qgroup_iterator_add(&qgroup_list, glist->group);
4930 	}
4931 	qgroup_iterator_clean(&qgroup_list);
4932 
4933 out:
4934 	spin_unlock(&fs_info->qgroup_lock);
4935 	return ret;
4936 }
4937