xref: /linux/fs/btrfs/qgroup.c (revision c9cfc122f03711a5124b4aafab3211cf4d35a2ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
btrfs_qgroup_qgroupid_key_cmp(const void * key,const struct rb_node * node)163 static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
164 {
165 	const u64 *qgroupid = key;
166 	const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
167 
168 	if (qgroup->qgroupid < *qgroupid)
169 		return -1;
170 	else if (qgroup->qgroupid > *qgroupid)
171 		return 1;
172 
173 	return 0;
174 }
175 
176 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)177 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
178 					   u64 qgroupid)
179 {
180 	struct rb_node *node;
181 
182 	node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
183 	return rb_entry_safe(node, struct btrfs_qgroup, node);
184 }
185 
btrfs_qgroup_qgroupid_cmp(struct rb_node * new,const struct rb_node * existing)186 static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
187 {
188 	const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
189 
190 	return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
191 }
192 
193 /*
194  * Add qgroup to the filesystem's qgroup tree.
195  *
196  * Must be called with qgroup_lock held and @prealloc preallocated.
197  *
198  * The control on the lifespan of @prealloc would be transferred to this
199  * function, thus caller should no longer touch @prealloc.
200  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
202 					  struct btrfs_qgroup *prealloc,
203 					  u64 qgroupid)
204 {
205 	struct rb_node *node;
206 
207 	/* Caller must have pre-allocated @prealloc. */
208 	ASSERT(prealloc);
209 
210 	prealloc->qgroupid = qgroupid;
211 	node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
212 	if (node) {
213 		kfree(prealloc);
214 		return rb_entry(node, struct btrfs_qgroup, node);
215 	}
216 
217 	INIT_LIST_HEAD(&prealloc->groups);
218 	INIT_LIST_HEAD(&prealloc->members);
219 	INIT_LIST_HEAD(&prealloc->dirty);
220 	INIT_LIST_HEAD(&prealloc->iterator);
221 	INIT_LIST_HEAD(&prealloc->nested_iterator);
222 
223 	return prealloc;
224 }
225 
__del_qgroup_rb(struct btrfs_qgroup * qgroup)226 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
227 {
228 	struct btrfs_qgroup_list *list;
229 
230 	list_del(&qgroup->dirty);
231 	while (!list_empty(&qgroup->groups)) {
232 		list = list_first_entry(&qgroup->groups,
233 					struct btrfs_qgroup_list, next_group);
234 		list_del(&list->next_group);
235 		list_del(&list->next_member);
236 		kfree(list);
237 	}
238 
239 	while (!list_empty(&qgroup->members)) {
240 		list = list_first_entry(&qgroup->members,
241 					struct btrfs_qgroup_list, next_member);
242 		list_del(&list->next_group);
243 		list_del(&list->next_member);
244 		kfree(list);
245 	}
246 }
247 
248 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
250 {
251 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
252 
253 	if (!qgroup)
254 		return -ENOENT;
255 
256 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257 	__del_qgroup_rb(qgroup);
258 	return 0;
259 }
260 
261 /*
262  * Add relation specified by two qgroups.
263  *
264  * Must be called with qgroup_lock held, the ownership of @prealloc is
265  * transferred to this function and caller should not touch it anymore.
266  *
267  * Return: 0        on success
268  *         -ENOENT  if one of the qgroups is NULL
269  *         <0       other errors
270  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)271 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
272 			     struct btrfs_qgroup *member,
273 			     struct btrfs_qgroup *parent)
274 {
275 	if (!member || !parent) {
276 		kfree(prealloc);
277 		return -ENOENT;
278 	}
279 
280 	prealloc->group = parent;
281 	prealloc->member = member;
282 	list_add_tail(&prealloc->next_group, &member->groups);
283 	list_add_tail(&prealloc->next_member, &parent->members);
284 
285 	return 0;
286 }
287 
288 /*
289  * Add relation specified by two qgroup ids.
290  *
291  * Must be called with qgroup_lock held.
292  *
293  * Return: 0        on success
294  *         -ENOENT  if one of the ids does not exist
295  *         <0       other errors
296  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)297 static int add_relation_rb(struct btrfs_fs_info *fs_info,
298 			   struct btrfs_qgroup_list *prealloc,
299 			   u64 memberid, u64 parentid)
300 {
301 	struct btrfs_qgroup *member;
302 	struct btrfs_qgroup *parent;
303 
304 	member = find_qgroup_rb(fs_info, memberid);
305 	parent = find_qgroup_rb(fs_info, parentid);
306 
307 	return __add_relation_rb(prealloc, member, parent);
308 }
309 
310 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)311 static int del_relation_rb(struct btrfs_fs_info *fs_info,
312 			   u64 memberid, u64 parentid)
313 {
314 	struct btrfs_qgroup *member;
315 	struct btrfs_qgroup *parent;
316 	struct btrfs_qgroup_list *list;
317 
318 	member = find_qgroup_rb(fs_info, memberid);
319 	parent = find_qgroup_rb(fs_info, parentid);
320 	if (!member || !parent)
321 		return -ENOENT;
322 
323 	list_for_each_entry(list, &member->groups, next_group) {
324 		if (list->group == parent) {
325 			list_del(&list->next_group);
326 			list_del(&list->next_member);
327 			kfree(list);
328 			return 0;
329 		}
330 	}
331 	return -ENOENT;
332 }
333 
334 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)335 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
336 			       u64 rfer, u64 excl)
337 {
338 	struct btrfs_qgroup *qgroup;
339 
340 	qgroup = find_qgroup_rb(fs_info, qgroupid);
341 	if (!qgroup)
342 		return -EINVAL;
343 	if (qgroup->rfer != rfer || qgroup->excl != excl)
344 		return -EINVAL;
345 	return 0;
346 }
347 #endif
348 
349 __printf(2, 3)
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info,const char * fmt,...)350 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
351 {
352 	const u64 old_flags = fs_info->qgroup_flags;
353 
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 	if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
360 		struct va_format vaf;
361 		va_list args;
362 
363 		va_start(args, fmt);
364 		vaf.fmt = fmt;
365 		vaf.va = &args;
366 
367 		btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
368 		va_end(args);
369 	}
370 }
371 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)372 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
373 				   struct extent_buffer *leaf, int slot,
374 				   struct btrfs_qgroup_status_item *ptr)
375 {
376 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
377 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
378 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
379 }
380 
381 /*
382  * The full config is read in one go, only called from open_ctree()
383  * It doesn't use any locking, as at this point we're still single-threaded
384  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)385 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
386 {
387 	struct btrfs_key key;
388 	struct btrfs_key found_key;
389 	struct btrfs_root *quota_root = fs_info->quota_root;
390 	struct btrfs_path *path = NULL;
391 	struct extent_buffer *l;
392 	int slot;
393 	int ret = 0;
394 	u64 flags = 0;
395 	u64 rescan_progress = 0;
396 
397 	if (!fs_info->quota_root)
398 		return 0;
399 
400 	path = btrfs_alloc_path();
401 	if (!path) {
402 		ret = -ENOMEM;
403 		goto out;
404 	}
405 
406 	ret = btrfs_sysfs_add_qgroups(fs_info);
407 	if (ret < 0)
408 		goto out;
409 	/* default this to quota off, in case no status key is found */
410 	fs_info->qgroup_flags = 0;
411 
412 	/*
413 	 * pass 1: read status, all qgroup infos and limits
414 	 */
415 	key.objectid = 0;
416 	key.type = 0;
417 	key.offset = 0;
418 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
419 	if (ret)
420 		goto out;
421 
422 	while (1) {
423 		struct btrfs_qgroup *qgroup;
424 
425 		slot = path->slots[0];
426 		l = path->nodes[0];
427 		btrfs_item_key_to_cpu(l, &found_key, slot);
428 
429 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
430 			struct btrfs_qgroup_status_item *ptr;
431 
432 			ptr = btrfs_item_ptr(l, slot,
433 					     struct btrfs_qgroup_status_item);
434 
435 			if (btrfs_qgroup_status_version(l, ptr) !=
436 			    BTRFS_QGROUP_STATUS_VERSION) {
437 				btrfs_err(fs_info,
438 				 "old qgroup version, quota disabled");
439 				goto out;
440 			}
441 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
442 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
443 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
444 			else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
445 				qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
446 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
447 			goto next1;
448 		}
449 
450 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
451 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
452 			goto next1;
453 
454 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
455 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
456 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
457 			qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * reusing that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (btrfs_is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
585 		btrfs_sysfs_del_qgroups(fs_info);
586 	}
587 
588 	return ret < 0 ? ret : 0;
589 }
590 
591 /*
592  * Called in close_ctree() when quota is still enabled.  This verifies we don't
593  * leak some reserved space.
594  *
595  * Return false if no reserved space is left.
596  * Return true if some reserved space is leaked.
597  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)598 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
599 {
600 	struct rb_node *node;
601 	bool ret = false;
602 
603 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
604 		return ret;
605 	/*
606 	 * Since we're unmounting, there is no race and no need to grab qgroup
607 	 * lock.  And here we don't go post-order to provide a more user
608 	 * friendly sorted result.
609 	 */
610 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
611 		struct btrfs_qgroup *qgroup;
612 		int i;
613 
614 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
615 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
616 			if (qgroup->rsv.values[i]) {
617 				ret = true;
618 				btrfs_warn(fs_info,
619 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
620 				   btrfs_qgroup_level(qgroup->qgroupid),
621 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
622 				   i, qgroup->rsv.values[i]);
623 			}
624 		}
625 	}
626 	return ret;
627 }
628 
629 /*
630  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
631  * first two are in single-threaded paths.
632  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)633 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
634 {
635 	struct rb_node *n;
636 	struct btrfs_qgroup *qgroup;
637 
638 	/*
639 	 * btrfs_quota_disable() can be called concurrently with
640 	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
641 	 * lock.
642 	 */
643 	spin_lock(&fs_info->qgroup_lock);
644 	while ((n = rb_first(&fs_info->qgroup_tree))) {
645 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
646 		rb_erase(n, &fs_info->qgroup_tree);
647 		__del_qgroup_rb(qgroup);
648 		spin_unlock(&fs_info->qgroup_lock);
649 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
650 		kfree(qgroup);
651 		spin_lock(&fs_info->qgroup_lock);
652 	}
653 	spin_unlock(&fs_info->qgroup_lock);
654 
655 	btrfs_sysfs_del_qgroups(fs_info);
656 }
657 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)658 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
659 				    u64 dst)
660 {
661 	int ret;
662 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
663 	struct btrfs_path *path;
664 	struct btrfs_key key;
665 
666 	path = btrfs_alloc_path();
667 	if (!path)
668 		return -ENOMEM;
669 
670 	key.objectid = src;
671 	key.type = BTRFS_QGROUP_RELATION_KEY;
672 	key.offset = dst;
673 
674 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
675 	btrfs_free_path(path);
676 	return ret;
677 }
678 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)679 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
680 				    u64 dst)
681 {
682 	int ret;
683 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
684 	struct btrfs_path *path;
685 	struct btrfs_key key;
686 
687 	path = btrfs_alloc_path();
688 	if (!path)
689 		return -ENOMEM;
690 
691 	key.objectid = src;
692 	key.type = BTRFS_QGROUP_RELATION_KEY;
693 	key.offset = dst;
694 
695 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
696 	if (ret < 0)
697 		goto out;
698 
699 	if (ret > 0) {
700 		ret = -ENOENT;
701 		goto out;
702 	}
703 
704 	ret = btrfs_del_item(trans, quota_root, path);
705 out:
706 	btrfs_free_path(path);
707 	return ret;
708 }
709 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)710 static int add_qgroup_item(struct btrfs_trans_handle *trans,
711 			   struct btrfs_root *quota_root, u64 qgroupid)
712 {
713 	int ret;
714 	struct btrfs_path *path;
715 	struct btrfs_qgroup_info_item *qgroup_info;
716 	struct btrfs_qgroup_limit_item *qgroup_limit;
717 	struct extent_buffer *leaf;
718 	struct btrfs_key key;
719 
720 	if (btrfs_is_testing(quota_root->fs_info))
721 		return 0;
722 
723 	path = btrfs_alloc_path();
724 	if (!path)
725 		return -ENOMEM;
726 
727 	key.objectid = 0;
728 	key.type = BTRFS_QGROUP_INFO_KEY;
729 	key.offset = qgroupid;
730 
731 	/*
732 	 * Avoid a transaction abort by catching -EEXIST here. In that
733 	 * case, we proceed by re-initializing the existing structure
734 	 * on disk.
735 	 */
736 
737 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
738 				      sizeof(*qgroup_info));
739 	if (ret && ret != -EEXIST)
740 		goto out;
741 
742 	leaf = path->nodes[0];
743 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
744 				 struct btrfs_qgroup_info_item);
745 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
746 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
747 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
748 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
749 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
750 
751 	btrfs_release_path(path);
752 
753 	key.type = BTRFS_QGROUP_LIMIT_KEY;
754 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
755 				      sizeof(*qgroup_limit));
756 	if (ret && ret != -EEXIST)
757 		goto out;
758 
759 	leaf = path->nodes[0];
760 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
761 				  struct btrfs_qgroup_limit_item);
762 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
763 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
764 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
765 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
766 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
767 
768 	ret = 0;
769 out:
770 	btrfs_free_path(path);
771 	return ret;
772 }
773 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)774 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
775 {
776 	int ret;
777 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
778 	struct btrfs_path *path;
779 	struct btrfs_key key;
780 
781 	path = btrfs_alloc_path();
782 	if (!path)
783 		return -ENOMEM;
784 
785 	key.objectid = 0;
786 	key.type = BTRFS_QGROUP_INFO_KEY;
787 	key.offset = qgroupid;
788 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
789 	if (ret < 0)
790 		goto out;
791 
792 	if (ret > 0) {
793 		ret = -ENOENT;
794 		goto out;
795 	}
796 
797 	ret = btrfs_del_item(trans, quota_root, path);
798 	if (ret)
799 		goto out;
800 
801 	btrfs_release_path(path);
802 
803 	key.type = BTRFS_QGROUP_LIMIT_KEY;
804 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
805 	if (ret < 0)
806 		goto out;
807 
808 	if (ret > 0) {
809 		ret = -ENOENT;
810 		goto out;
811 	}
812 
813 	ret = btrfs_del_item(trans, quota_root, path);
814 
815 out:
816 	btrfs_free_path(path);
817 	return ret;
818 }
819 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)820 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
821 				    struct btrfs_qgroup *qgroup)
822 {
823 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
824 	struct btrfs_path *path;
825 	struct btrfs_key key;
826 	struct extent_buffer *l;
827 	struct btrfs_qgroup_limit_item *qgroup_limit;
828 	int ret;
829 	int slot;
830 
831 	key.objectid = 0;
832 	key.type = BTRFS_QGROUP_LIMIT_KEY;
833 	key.offset = qgroup->qgroupid;
834 
835 	path = btrfs_alloc_path();
836 	if (!path)
837 		return -ENOMEM;
838 
839 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
840 	if (ret > 0)
841 		ret = -ENOENT;
842 
843 	if (ret)
844 		goto out;
845 
846 	l = path->nodes[0];
847 	slot = path->slots[0];
848 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
849 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
850 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
851 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
852 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
853 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
854 out:
855 	btrfs_free_path(path);
856 	return ret;
857 }
858 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)859 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
860 				   struct btrfs_qgroup *qgroup)
861 {
862 	struct btrfs_fs_info *fs_info = trans->fs_info;
863 	struct btrfs_root *quota_root = fs_info->quota_root;
864 	struct btrfs_path *path;
865 	struct btrfs_key key;
866 	struct extent_buffer *l;
867 	struct btrfs_qgroup_info_item *qgroup_info;
868 	int ret;
869 	int slot;
870 
871 	if (btrfs_is_testing(fs_info))
872 		return 0;
873 
874 	key.objectid = 0;
875 	key.type = BTRFS_QGROUP_INFO_KEY;
876 	key.offset = qgroup->qgroupid;
877 
878 	path = btrfs_alloc_path();
879 	if (!path)
880 		return -ENOMEM;
881 
882 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
883 	if (ret > 0)
884 		ret = -ENOENT;
885 
886 	if (ret)
887 		goto out;
888 
889 	l = path->nodes[0];
890 	slot = path->slots[0];
891 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
892 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
893 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
894 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
895 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
896 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
897 out:
898 	btrfs_free_path(path);
899 	return ret;
900 }
901 
update_qgroup_status_item(struct btrfs_trans_handle * trans)902 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
903 {
904 	struct btrfs_fs_info *fs_info = trans->fs_info;
905 	struct btrfs_root *quota_root = fs_info->quota_root;
906 	struct btrfs_path *path;
907 	struct btrfs_key key;
908 	struct extent_buffer *l;
909 	struct btrfs_qgroup_status_item *ptr;
910 	int ret;
911 	int slot;
912 
913 	key.objectid = 0;
914 	key.type = BTRFS_QGROUP_STATUS_KEY;
915 	key.offset = 0;
916 
917 	path = btrfs_alloc_path();
918 	if (!path)
919 		return -ENOMEM;
920 
921 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
922 	if (ret > 0)
923 		ret = -ENOENT;
924 
925 	if (ret)
926 		goto out;
927 
928 	l = path->nodes[0];
929 	slot = path->slots[0];
930 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
931 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
932 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
933 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
934 	btrfs_set_qgroup_status_rescan(l, ptr,
935 				fs_info->qgroup_rescan_progress.objectid);
936 out:
937 	btrfs_free_path(path);
938 	return ret;
939 }
940 
941 /*
942  * called with qgroup_lock held
943  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)944 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
945 				  struct btrfs_root *root)
946 {
947 	struct btrfs_path *path;
948 	struct btrfs_key key;
949 	struct extent_buffer *leaf = NULL;
950 	int ret;
951 	int nr = 0;
952 
953 	path = btrfs_alloc_path();
954 	if (!path)
955 		return -ENOMEM;
956 
957 	key.objectid = 0;
958 	key.type = 0;
959 	key.offset = 0;
960 
961 	while (1) {
962 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
963 		if (ret < 0)
964 			goto out;
965 		leaf = path->nodes[0];
966 		nr = btrfs_header_nritems(leaf);
967 		if (!nr)
968 			break;
969 		/*
970 		 * delete the leaf one by one
971 		 * since the whole tree is going
972 		 * to be deleted.
973 		 */
974 		path->slots[0] = 0;
975 		ret = btrfs_del_items(trans, root, path, 0, nr);
976 		if (ret)
977 			goto out;
978 
979 		btrfs_release_path(path);
980 	}
981 	ret = 0;
982 out:
983 	btrfs_free_path(path);
984 	return ret;
985 }
986 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)987 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
988 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
989 {
990 	struct btrfs_root *quota_root;
991 	struct btrfs_root *tree_root = fs_info->tree_root;
992 	struct btrfs_path *path = NULL;
993 	struct btrfs_qgroup_status_item *ptr;
994 	struct extent_buffer *leaf;
995 	struct btrfs_key key;
996 	struct btrfs_key found_key;
997 	struct btrfs_qgroup *qgroup = NULL;
998 	struct btrfs_qgroup *prealloc = NULL;
999 	struct btrfs_trans_handle *trans = NULL;
1000 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1001 	int ret = 0;
1002 	int slot;
1003 
1004 	/*
1005 	 * We need to have subvol_sem write locked, to prevent races between
1006 	 * concurrent tasks trying to enable quotas, because we will unlock
1007 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1008 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1009 	 */
1010 	lockdep_assert_held_write(&fs_info->subvol_sem);
1011 
1012 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1013 		btrfs_err(fs_info,
1014 			  "qgroups are currently unsupported in extent tree v2");
1015 		return -EINVAL;
1016 	}
1017 
1018 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1019 	if (fs_info->quota_root)
1020 		goto out;
1021 
1022 	ret = btrfs_sysfs_add_qgroups(fs_info);
1023 	if (ret < 0)
1024 		goto out;
1025 
1026 	/*
1027 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1028 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1029 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1030 	 * start a transaction.
1031 	 * After we started the transaction lock qgroup_ioctl_lock again and
1032 	 * check if someone else created the quota root in the meanwhile. If so,
1033 	 * just return success and release the transaction handle.
1034 	 *
1035 	 * Also we don't need to worry about someone else calling
1036 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1037 	 * that function returns 0 (success) when the sysfs entries already exist.
1038 	 */
1039 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1040 
1041 	/*
1042 	 * 1 for quota root item
1043 	 * 1 for BTRFS_QGROUP_STATUS item
1044 	 *
1045 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1046 	 * per subvolume. However those are not currently reserved since it
1047 	 * would be a lot of overkill.
1048 	 */
1049 	trans = btrfs_start_transaction(tree_root, 2);
1050 
1051 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1052 	if (IS_ERR(trans)) {
1053 		ret = PTR_ERR(trans);
1054 		trans = NULL;
1055 		goto out;
1056 	}
1057 
1058 	if (fs_info->quota_root)
1059 		goto out;
1060 
1061 	/*
1062 	 * initially create the quota tree
1063 	 */
1064 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1065 	if (IS_ERR(quota_root)) {
1066 		ret =  PTR_ERR(quota_root);
1067 		btrfs_abort_transaction(trans, ret);
1068 		goto out;
1069 	}
1070 
1071 	path = btrfs_alloc_path();
1072 	if (unlikely(!path)) {
1073 		ret = -ENOMEM;
1074 		btrfs_abort_transaction(trans, ret);
1075 		goto out_free_root;
1076 	}
1077 
1078 	key.objectid = 0;
1079 	key.type = BTRFS_QGROUP_STATUS_KEY;
1080 	key.offset = 0;
1081 
1082 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1083 				      sizeof(*ptr));
1084 	if (unlikely(ret)) {
1085 		btrfs_abort_transaction(trans, ret);
1086 		goto out_free_path;
1087 	}
1088 
1089 	leaf = path->nodes[0];
1090 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1091 				 struct btrfs_qgroup_status_item);
1092 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1093 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1094 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1095 	if (simple) {
1096 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1097 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1098 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1099 	} else {
1100 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1101 	}
1102 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1103 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1104 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1105 
1106 	key.objectid = 0;
1107 	key.type = BTRFS_ROOT_REF_KEY;
1108 	key.offset = 0;
1109 
1110 	btrfs_release_path(path);
1111 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1112 	if (ret > 0)
1113 		goto out_add_root;
1114 	if (unlikely(ret < 0)) {
1115 		btrfs_abort_transaction(trans, ret);
1116 		goto out_free_path;
1117 	}
1118 
1119 	while (1) {
1120 		slot = path->slots[0];
1121 		leaf = path->nodes[0];
1122 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1123 
1124 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1125 
1126 			/* Release locks on tree_root before we access quota_root */
1127 			btrfs_release_path(path);
1128 
1129 			/* We should not have a stray @prealloc pointer. */
1130 			ASSERT(prealloc == NULL);
1131 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1132 			if (unlikely(!prealloc)) {
1133 				ret = -ENOMEM;
1134 				btrfs_abort_transaction(trans, ret);
1135 				goto out_free_path;
1136 			}
1137 
1138 			ret = add_qgroup_item(trans, quota_root,
1139 					      found_key.offset);
1140 			if (unlikely(ret)) {
1141 				btrfs_abort_transaction(trans, ret);
1142 				goto out_free_path;
1143 			}
1144 
1145 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1146 			prealloc = NULL;
1147 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1148 			if (unlikely(ret < 0)) {
1149 				btrfs_abort_transaction(trans, ret);
1150 				goto out_free_path;
1151 			}
1152 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1153 							 path, 1, 0);
1154 			if (unlikely(ret < 0)) {
1155 				btrfs_abort_transaction(trans, ret);
1156 				goto out_free_path;
1157 			}
1158 			if (ret > 0) {
1159 				/*
1160 				 * Shouldn't happen, but in case it does we
1161 				 * don't need to do the btrfs_next_item, just
1162 				 * continue.
1163 				 */
1164 				continue;
1165 			}
1166 		}
1167 		ret = btrfs_next_item(tree_root, path);
1168 		if (unlikely(ret < 0)) {
1169 			btrfs_abort_transaction(trans, ret);
1170 			goto out_free_path;
1171 		}
1172 		if (ret)
1173 			break;
1174 	}
1175 
1176 out_add_root:
1177 	btrfs_release_path(path);
1178 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1179 	if (unlikely(ret)) {
1180 		btrfs_abort_transaction(trans, ret);
1181 		goto out_free_path;
1182 	}
1183 
1184 	ASSERT(prealloc == NULL);
1185 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1186 	if (!prealloc) {
1187 		ret = -ENOMEM;
1188 		goto out_free_path;
1189 	}
1190 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1191 	prealloc = NULL;
1192 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1193 	if (unlikely(ret < 0)) {
1194 		btrfs_abort_transaction(trans, ret);
1195 		goto out_free_path;
1196 	}
1197 
1198 	fs_info->qgroup_enable_gen = trans->transid;
1199 
1200 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1201 	/*
1202 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1203 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1204 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1205 	 * because all qgroup operations first start or join a transaction and then
1206 	 * lock the qgroup_ioctl_lock mutex.
1207 	 * We are safe from a concurrent task trying to enable quotas, by calling
1208 	 * this function, since we are serialized by fs_info->subvol_sem.
1209 	 */
1210 	ret = btrfs_commit_transaction(trans);
1211 	trans = NULL;
1212 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1213 	if (ret)
1214 		goto out_free_path;
1215 
1216 	/*
1217 	 * Set quota enabled flag after committing the transaction, to avoid
1218 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1219 	 * creation.
1220 	 */
1221 	spin_lock(&fs_info->qgroup_lock);
1222 	fs_info->quota_root = quota_root;
1223 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1224 	spin_unlock(&fs_info->qgroup_lock);
1225 
1226 	/* Skip rescan for simple qgroups. */
1227 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1228 		goto out_free_path;
1229 
1230 	ret = qgroup_rescan_init(fs_info, 0, 1);
1231 	if (!ret) {
1232 	        qgroup_rescan_zero_tracking(fs_info);
1233 		fs_info->qgroup_rescan_running = true;
1234 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1235 	                         &fs_info->qgroup_rescan_work);
1236 	} else {
1237 		/*
1238 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1239 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1240 		 * -EINPROGRESS. That can happen because someone started the
1241 		 * rescan worker by calling quota rescan ioctl before we
1242 		 * attempted to initialize the rescan worker. Failure due to
1243 		 * quotas disabled in the meanwhile is not possible, because
1244 		 * we are holding a write lock on fs_info->subvol_sem, which
1245 		 * is also acquired when disabling quotas.
1246 		 * Ignore such error, and any other error would need to undo
1247 		 * everything we did in the transaction we just committed.
1248 		 */
1249 		ASSERT(ret == -EINPROGRESS);
1250 		ret = 0;
1251 	}
1252 
1253 out_free_path:
1254 	btrfs_free_path(path);
1255 out_free_root:
1256 	if (ret)
1257 		btrfs_put_root(quota_root);
1258 out:
1259 	if (ret)
1260 		btrfs_sysfs_del_qgroups(fs_info);
1261 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1262 	if (ret && trans)
1263 		btrfs_end_transaction(trans);
1264 	else if (trans)
1265 		ret = btrfs_end_transaction(trans);
1266 	kfree(prealloc);
1267 	return ret;
1268 }
1269 
1270 /*
1271  * It is possible to have outstanding ordered extents which reserved bytes
1272  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1273  * commit to ensure that we don't leak such reservations, only to have them
1274  * come back if we re-enable.
1275  *
1276  * - enable simple quotas
1277  * - reserve space
1278  * - release it, store rsv_bytes in OE
1279  * - disable quotas
1280  * - enable simple quotas (qgroup rsv are all 0)
1281  * - OE finishes
1282  * - run delayed refs
1283  * - free rsv_bytes, resulting in miscounting or even underflow
1284  */
flush_reservations(struct btrfs_fs_info * fs_info)1285 static int flush_reservations(struct btrfs_fs_info *fs_info)
1286 {
1287 	int ret;
1288 
1289 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1290 	if (ret)
1291 		return ret;
1292 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1293 
1294 	return btrfs_commit_current_transaction(fs_info->tree_root);
1295 }
1296 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1297 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1298 {
1299 	struct btrfs_root *quota_root = NULL;
1300 	struct btrfs_trans_handle *trans = NULL;
1301 	int ret = 0;
1302 
1303 	/*
1304 	 * We need to have subvol_sem write locked to prevent races with
1305 	 * snapshot creation.
1306 	 */
1307 	lockdep_assert_held_write(&fs_info->subvol_sem);
1308 
1309 	/*
1310 	 * Relocation will mess with backrefs, so make sure we have the
1311 	 * cleaner_mutex held to protect us from relocate.
1312 	 */
1313 	lockdep_assert_held(&fs_info->cleaner_mutex);
1314 
1315 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1316 	if (!fs_info->quota_root)
1317 		goto out;
1318 
1319 	/*
1320 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1321 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1322 	 * to lock that mutex while holding a transaction handle and the rescan
1323 	 * worker needs to commit a transaction.
1324 	 */
1325 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1326 
1327 	/*
1328 	 * Request qgroup rescan worker to complete and wait for it. This wait
1329 	 * must be done before transaction start for quota disable since it may
1330 	 * deadlock with transaction by the qgroup rescan worker.
1331 	 */
1332 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1333 	btrfs_qgroup_wait_for_completion(fs_info, false);
1334 
1335 	/*
1336 	 * We have nothing held here and no trans handle, just return the error
1337 	 * if there is one and set back the quota enabled bit since we didn't
1338 	 * actually disable quotas.
1339 	 */
1340 	ret = flush_reservations(fs_info);
1341 	if (ret) {
1342 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1343 		return ret;
1344 	}
1345 
1346 	/*
1347 	 * 1 For the root item
1348 	 *
1349 	 * We should also reserve enough items for the quota tree deletion in
1350 	 * btrfs_clean_quota_tree but this is not done.
1351 	 *
1352 	 * Also, we must always start a transaction without holding the mutex
1353 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1354 	 */
1355 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1356 
1357 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1358 	if (IS_ERR(trans)) {
1359 		ret = PTR_ERR(trans);
1360 		trans = NULL;
1361 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1362 		goto out;
1363 	}
1364 
1365 	if (!fs_info->quota_root)
1366 		goto out;
1367 
1368 	spin_lock(&fs_info->qgroup_lock);
1369 	quota_root = fs_info->quota_root;
1370 	fs_info->quota_root = NULL;
1371 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1372 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1373 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1374 	spin_unlock(&fs_info->qgroup_lock);
1375 
1376 	btrfs_free_qgroup_config(fs_info);
1377 
1378 	ret = btrfs_clean_quota_tree(trans, quota_root);
1379 	if (unlikely(ret)) {
1380 		btrfs_abort_transaction(trans, ret);
1381 		goto out;
1382 	}
1383 
1384 	ret = btrfs_del_root(trans, &quota_root->root_key);
1385 	if (unlikely(ret)) {
1386 		btrfs_abort_transaction(trans, ret);
1387 		goto out;
1388 	}
1389 
1390 	spin_lock(&fs_info->trans_lock);
1391 	list_del(&quota_root->dirty_list);
1392 	spin_unlock(&fs_info->trans_lock);
1393 
1394 	btrfs_tree_lock(quota_root->node);
1395 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1396 	btrfs_tree_unlock(quota_root->node);
1397 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1398 				    quota_root->node, 0, 1);
1399 
1400 	if (ret < 0)
1401 		btrfs_abort_transaction(trans, ret);
1402 
1403 out:
1404 	btrfs_put_root(quota_root);
1405 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1406 	if (ret && trans)
1407 		btrfs_end_transaction(trans);
1408 	else if (trans)
1409 		ret = btrfs_commit_transaction(trans);
1410 	return ret;
1411 }
1412 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1413 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1414 			 struct btrfs_qgroup *qgroup)
1415 {
1416 	if (list_empty(&qgroup->dirty))
1417 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1418 }
1419 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1420 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1421 {
1422 	if (!list_empty(&qgroup->iterator))
1423 		return;
1424 
1425 	list_add_tail(&qgroup->iterator, head);
1426 }
1427 
qgroup_iterator_clean(struct list_head * head)1428 static void qgroup_iterator_clean(struct list_head *head)
1429 {
1430 	while (!list_empty(head)) {
1431 		struct btrfs_qgroup *qgroup;
1432 
1433 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1434 		list_del_init(&qgroup->iterator);
1435 	}
1436 }
1437 
1438 /*
1439  * The easy accounting, we're updating qgroup relationship whose child qgroup
1440  * only has exclusive extents.
1441  *
1442  * In this case, all exclusive extents will also be exclusive for parent, so
1443  * excl/rfer just get added/removed.
1444  *
1445  * So is qgroup reservation space, which should also be added/removed to
1446  * parent.
1447  * Or when child tries to release reservation space, parent will underflow its
1448  * reservation (for relationship adding case).
1449  *
1450  * Caller should hold fs_info->qgroup_lock.
1451  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1452 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1453 				    struct btrfs_qgroup *src, int sign)
1454 {
1455 	struct btrfs_qgroup *qgroup;
1456 	LIST_HEAD(qgroup_list);
1457 	u64 num_bytes = src->excl;
1458 	u64 num_bytes_cmpr = src->excl_cmpr;
1459 	int ret = 0;
1460 
1461 	qgroup = find_qgroup_rb(fs_info, ref_root);
1462 	if (!qgroup)
1463 		goto out;
1464 
1465 	qgroup_iterator_add(&qgroup_list, qgroup);
1466 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
1467 		struct btrfs_qgroup_list *glist;
1468 
1469 		qgroup->rfer += sign * num_bytes;
1470 		qgroup->rfer_cmpr += sign * num_bytes_cmpr;
1471 
1472 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1473 		WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
1474 		qgroup->excl += sign * num_bytes;
1475 		qgroup->excl_cmpr += sign * num_bytes_cmpr;
1476 
1477 		if (sign > 0)
1478 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1479 		else
1480 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1481 		qgroup_dirty(fs_info, qgroup);
1482 
1483 		/* Append parent qgroups to @qgroup_list. */
1484 		list_for_each_entry(glist, &qgroup->groups, next_group)
1485 			qgroup_iterator_add(&qgroup_list, glist->group);
1486 	}
1487 	ret = 0;
1488 out:
1489 	qgroup_iterator_clean(&qgroup_list);
1490 	return ret;
1491 }
1492 
1493 
1494 /*
1495  * Quick path for updating qgroup with only excl refs.
1496  *
1497  * In that case, just update all parent will be enough.
1498  * Or we needs to do a full rescan.
1499  * Caller should also hold fs_info->qgroup_lock.
1500  *
1501  * Return 0 for quick update, return >0 for need to full rescan
1502  * and mark INCONSISTENT flag.
1503  * Return < 0 for other error.
1504  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1505 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1506 				   u64 src, u64 dst, int sign)
1507 {
1508 	struct btrfs_qgroup *qgroup;
1509 	int ret = 1;
1510 
1511 	qgroup = find_qgroup_rb(fs_info, src);
1512 	if (!qgroup)
1513 		goto out;
1514 	if (qgroup->excl == qgroup->rfer) {
1515 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1516 		if (ret < 0)
1517 			goto out;
1518 		ret = 0;
1519 	}
1520 out:
1521 	if (ret)
1522 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1523 	return ret;
1524 }
1525 
1526 /*
1527  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1528  * callers and transferred here (either used or freed on error).
1529  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1530 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1531 			      struct btrfs_qgroup_list *prealloc)
1532 {
1533 	struct btrfs_fs_info *fs_info = trans->fs_info;
1534 	struct btrfs_qgroup *parent;
1535 	struct btrfs_qgroup *member;
1536 	struct btrfs_qgroup_list *list;
1537 	int ret = 0;
1538 
1539 	ASSERT(prealloc);
1540 
1541 	/* Check the level of src and dst first */
1542 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
1543 		kfree(prealloc);
1544 		return -EINVAL;
1545 	}
1546 
1547 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1548 	if (!fs_info->quota_root) {
1549 		ret = -ENOTCONN;
1550 		goto out;
1551 	}
1552 	member = find_qgroup_rb(fs_info, src);
1553 	parent = find_qgroup_rb(fs_info, dst);
1554 	if (!member || !parent) {
1555 		ret = -EINVAL;
1556 		goto out;
1557 	}
1558 
1559 	/* check if such qgroup relation exist firstly */
1560 	list_for_each_entry(list, &member->groups, next_group) {
1561 		if (list->group == parent) {
1562 			ret = -EEXIST;
1563 			goto out;
1564 		}
1565 	}
1566 
1567 	ret = add_qgroup_relation_item(trans, src, dst);
1568 	if (ret)
1569 		goto out;
1570 
1571 	ret = add_qgroup_relation_item(trans, dst, src);
1572 	if (ret) {
1573 		del_qgroup_relation_item(trans, src, dst);
1574 		goto out;
1575 	}
1576 
1577 	spin_lock(&fs_info->qgroup_lock);
1578 	ret = __add_relation_rb(prealloc, member, parent);
1579 	prealloc = NULL;
1580 	if (ret < 0) {
1581 		spin_unlock(&fs_info->qgroup_lock);
1582 		goto out;
1583 	}
1584 	ret = quick_update_accounting(fs_info, src, dst, 1);
1585 	spin_unlock(&fs_info->qgroup_lock);
1586 out:
1587 	kfree(prealloc);
1588 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1589 	return ret;
1590 }
1591 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1592 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1593 				 u64 dst)
1594 {
1595 	struct btrfs_fs_info *fs_info = trans->fs_info;
1596 	struct btrfs_qgroup *parent;
1597 	struct btrfs_qgroup *member;
1598 	struct btrfs_qgroup_list *list;
1599 	bool found = false;
1600 	int ret = 0;
1601 	int ret2;
1602 
1603 	if (!fs_info->quota_root) {
1604 		ret = -ENOTCONN;
1605 		goto out;
1606 	}
1607 
1608 	member = find_qgroup_rb(fs_info, src);
1609 	parent = find_qgroup_rb(fs_info, dst);
1610 	/*
1611 	 * The parent/member pair doesn't exist, then try to delete the dead
1612 	 * relation items only.
1613 	 */
1614 	if (!member || !parent)
1615 		goto delete_item;
1616 
1617 	/* check if such qgroup relation exist firstly */
1618 	list_for_each_entry(list, &member->groups, next_group) {
1619 		if (list->group == parent) {
1620 			found = true;
1621 			break;
1622 		}
1623 	}
1624 
1625 delete_item:
1626 	ret = del_qgroup_relation_item(trans, src, dst);
1627 	if (ret < 0 && ret != -ENOENT)
1628 		goto out;
1629 	ret2 = del_qgroup_relation_item(trans, dst, src);
1630 	if (ret2 < 0 && ret2 != -ENOENT)
1631 		goto out;
1632 
1633 	/* At least one deletion succeeded, return 0 */
1634 	if (!ret || !ret2)
1635 		ret = 0;
1636 
1637 	if (found) {
1638 		spin_lock(&fs_info->qgroup_lock);
1639 		del_relation_rb(fs_info, src, dst);
1640 		ret = quick_update_accounting(fs_info, src, dst, -1);
1641 		spin_unlock(&fs_info->qgroup_lock);
1642 	}
1643 out:
1644 	return ret;
1645 }
1646 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1647 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1648 			      u64 dst)
1649 {
1650 	struct btrfs_fs_info *fs_info = trans->fs_info;
1651 	int ret = 0;
1652 
1653 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1654 	ret = __del_qgroup_relation(trans, src, dst);
1655 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1656 
1657 	return ret;
1658 }
1659 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1660 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1661 {
1662 	struct btrfs_fs_info *fs_info = trans->fs_info;
1663 	struct btrfs_root *quota_root;
1664 	struct btrfs_qgroup *qgroup;
1665 	struct btrfs_qgroup *prealloc = NULL;
1666 	int ret = 0;
1667 
1668 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1669 	if (!fs_info->quota_root) {
1670 		ret = -ENOTCONN;
1671 		goto out;
1672 	}
1673 	quota_root = fs_info->quota_root;
1674 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1675 	if (qgroup) {
1676 		ret = -EEXIST;
1677 		goto out;
1678 	}
1679 
1680 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1681 	if (!prealloc) {
1682 		ret = -ENOMEM;
1683 		goto out;
1684 	}
1685 
1686 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1687 	if (ret)
1688 		goto out;
1689 
1690 	spin_lock(&fs_info->qgroup_lock);
1691 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1692 	spin_unlock(&fs_info->qgroup_lock);
1693 	prealloc = NULL;
1694 
1695 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1696 out:
1697 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1698 	kfree(prealloc);
1699 	return ret;
1700 }
1701 
1702 /*
1703  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1704  * Return >0 if we can delete the qgroup.
1705  * Return <0 for other errors during tree search.
1706  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1707 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1708 {
1709 	struct btrfs_key key;
1710 	struct btrfs_path *path;
1711 	int ret;
1712 
1713 	/*
1714 	 * Squota would never be inconsistent, but there can still be case
1715 	 * where a dropped subvolume still has qgroup numbers, and squota
1716 	 * relies on such qgroup for future accounting.
1717 	 *
1718 	 * So for squota, do not allow dropping any non-zero qgroup.
1719 	 */
1720 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1721 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1722 		return 0;
1723 
1724 	/* For higher level qgroup, we can only delete it if it has no child. */
1725 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1726 		if (!list_empty(&qgroup->members))
1727 			return 0;
1728 		return 1;
1729 	}
1730 
1731 	/*
1732 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1733 	 * for it.
1734 	 * This means even a subvolume is unlinked but not yet fully dropped,
1735 	 * we can not delete the qgroup.
1736 	 */
1737 	key.objectid = qgroup->qgroupid;
1738 	key.type = BTRFS_ROOT_ITEM_KEY;
1739 	key.offset = -1ULL;
1740 	path = btrfs_alloc_path();
1741 	if (!path)
1742 		return -ENOMEM;
1743 
1744 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1745 	btrfs_free_path(path);
1746 	/*
1747 	 * The @ret from btrfs_find_root() exactly matches our definition for
1748 	 * the return value, thus can be returned directly.
1749 	 */
1750 	return ret;
1751 }
1752 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1753 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1754 {
1755 	struct btrfs_fs_info *fs_info = trans->fs_info;
1756 	struct btrfs_qgroup *qgroup;
1757 	struct btrfs_qgroup_list *list;
1758 	int ret = 0;
1759 
1760 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1761 	if (!fs_info->quota_root) {
1762 		ret = -ENOTCONN;
1763 		goto out;
1764 	}
1765 
1766 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1767 	if (!qgroup) {
1768 		ret = -ENOENT;
1769 		goto out;
1770 	}
1771 
1772 	ret = can_delete_qgroup(fs_info, qgroup);
1773 	if (ret < 0)
1774 		goto out;
1775 	if (ret == 0) {
1776 		ret = -EBUSY;
1777 		goto out;
1778 	}
1779 
1780 	/* Check if there are no children of this qgroup */
1781 	if (!list_empty(&qgroup->members)) {
1782 		ret = -EBUSY;
1783 		goto out;
1784 	}
1785 
1786 	ret = del_qgroup_item(trans, qgroupid);
1787 	if (ret && ret != -ENOENT)
1788 		goto out;
1789 
1790 	while (!list_empty(&qgroup->groups)) {
1791 		list = list_first_entry(&qgroup->groups,
1792 					struct btrfs_qgroup_list, next_group);
1793 		ret = __del_qgroup_relation(trans, qgroupid,
1794 					    list->group->qgroupid);
1795 		if (ret)
1796 			goto out;
1797 	}
1798 
1799 	spin_lock(&fs_info->qgroup_lock);
1800 	/*
1801 	 * Warn on reserved space. The subvolume should has no child nor
1802 	 * corresponding subvolume.
1803 	 * Thus its reserved space should all be zero, no matter if qgroup
1804 	 * is consistent or the mode.
1805 	 */
1806 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1807 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1808 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1809 		DEBUG_WARN();
1810 		btrfs_warn_rl(fs_info,
1811 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1812 			      btrfs_qgroup_level(qgroup->qgroupid),
1813 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1814 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1815 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1816 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1817 
1818 	}
1819 	/*
1820 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1821 	 * consistent and if it's in regular qgroup mode.
1822 	 * For simple mode it's not as accurate thus we can hit non-zero values
1823 	 * very frequently.
1824 	 */
1825 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1826 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1827 		if (qgroup->rfer || qgroup->excl ||
1828 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1829 			DEBUG_WARN();
1830 			qgroup_mark_inconsistent(fs_info,
1831 				"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1832 				btrfs_qgroup_level(qgroup->qgroupid),
1833 				btrfs_qgroup_subvolid(qgroup->qgroupid),
1834 				qgroup->rfer, qgroup->rfer_cmpr,
1835 				qgroup->excl, qgroup->excl_cmpr);
1836 		}
1837 	}
1838 	del_qgroup_rb(fs_info, qgroupid);
1839 	spin_unlock(&fs_info->qgroup_lock);
1840 
1841 	/*
1842 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1843 	 * spinlock, since the sysfs_remove_group() function needs to take
1844 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1845 	 */
1846 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1847 	kfree(qgroup);
1848 out:
1849 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1850 	return ret;
1851 }
1852 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1853 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1854 {
1855 	struct btrfs_trans_handle *trans;
1856 	int ret;
1857 
1858 	if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
1859 	    !fs_info->quota_root)
1860 		return 0;
1861 
1862 	/*
1863 	 * Commit current transaction to make sure all the rfer/excl numbers
1864 	 * get updated.
1865 	 */
1866 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1867 	if (ret < 0)
1868 		return ret;
1869 
1870 	/* Start new trans to delete the qgroup info and limit items. */
1871 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1872 	if (IS_ERR(trans))
1873 		return PTR_ERR(trans);
1874 	ret = btrfs_remove_qgroup(trans, subvolid);
1875 	btrfs_end_transaction(trans);
1876 	/*
1877 	 * It's squota and the subvolume still has numbers needed for future
1878 	 * accounting, in this case we can not delete it.  Just skip it.
1879 	 *
1880 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1881 	 * safe to ignore them.
1882 	 */
1883 	if (ret == -EBUSY || ret == -ENOENT)
1884 		ret = 0;
1885 	return ret;
1886 }
1887 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1888 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1889 		       struct btrfs_qgroup_limit *limit)
1890 {
1891 	struct btrfs_fs_info *fs_info = trans->fs_info;
1892 	struct btrfs_qgroup *qgroup;
1893 	int ret = 0;
1894 	/* Sometimes we would want to clear the limit on this qgroup.
1895 	 * To meet this requirement, we treat the -1 as a special value
1896 	 * which tell kernel to clear the limit on this qgroup.
1897 	 */
1898 	const u64 CLEAR_VALUE = -1;
1899 
1900 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1901 	if (!fs_info->quota_root) {
1902 		ret = -ENOTCONN;
1903 		goto out;
1904 	}
1905 
1906 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1907 	if (!qgroup) {
1908 		ret = -ENOENT;
1909 		goto out;
1910 	}
1911 
1912 	spin_lock(&fs_info->qgroup_lock);
1913 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1914 		if (limit->max_rfer == CLEAR_VALUE) {
1915 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1916 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1917 			qgroup->max_rfer = 0;
1918 		} else {
1919 			qgroup->max_rfer = limit->max_rfer;
1920 		}
1921 	}
1922 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1923 		if (limit->max_excl == CLEAR_VALUE) {
1924 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1925 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1926 			qgroup->max_excl = 0;
1927 		} else {
1928 			qgroup->max_excl = limit->max_excl;
1929 		}
1930 	}
1931 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1932 		if (limit->rsv_rfer == CLEAR_VALUE) {
1933 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1934 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1935 			qgroup->rsv_rfer = 0;
1936 		} else {
1937 			qgroup->rsv_rfer = limit->rsv_rfer;
1938 		}
1939 	}
1940 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1941 		if (limit->rsv_excl == CLEAR_VALUE) {
1942 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1943 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1944 			qgroup->rsv_excl = 0;
1945 		} else {
1946 			qgroup->rsv_excl = limit->rsv_excl;
1947 		}
1948 	}
1949 	qgroup->lim_flags |= limit->flags;
1950 
1951 	spin_unlock(&fs_info->qgroup_lock);
1952 
1953 	ret = update_qgroup_limit_item(trans, qgroup);
1954 	if (ret)
1955 		qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
1956 
1957 out:
1958 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1959 	return ret;
1960 }
1961 
1962 /*
1963  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1964  * So qgroup can account it at transaction committing time.
1965  *
1966  * No lock version, caller must acquire delayed ref lock and allocated memory,
1967  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1968  *
1969  * Return 0 for success insert
1970  * Return >0 for existing record, caller can free @record safely.
1971  * Return <0 for insertion failure, caller can free @record safely.
1972  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record,u64 bytenr)1973 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1974 				     struct btrfs_delayed_ref_root *delayed_refs,
1975 				     struct btrfs_qgroup_extent_record *record,
1976 				     u64 bytenr)
1977 {
1978 	struct btrfs_qgroup_extent_record *existing, *ret;
1979 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1980 
1981 	if (!btrfs_qgroup_full_accounting(fs_info))
1982 		return 1;
1983 
1984 #if BITS_PER_LONG == 32
1985 	if (bytenr >= MAX_LFS_FILESIZE) {
1986 		btrfs_err_rl(fs_info,
1987 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
1988 			     bytenr);
1989 		btrfs_err_32bit_limit(fs_info);
1990 		return -EOVERFLOW;
1991 	}
1992 #endif
1993 
1994 	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
1995 
1996 	xa_lock(&delayed_refs->dirty_extents);
1997 	existing = xa_load(&delayed_refs->dirty_extents, index);
1998 	if (existing) {
1999 		if (record->data_rsv && !existing->data_rsv) {
2000 			existing->data_rsv = record->data_rsv;
2001 			existing->data_rsv_refroot = record->data_rsv_refroot;
2002 		}
2003 		xa_unlock(&delayed_refs->dirty_extents);
2004 		return 1;
2005 	}
2006 
2007 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2008 	xa_unlock(&delayed_refs->dirty_extents);
2009 	if (xa_is_err(ret)) {
2010 		qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
2011 		return xa_err(ret);
2012 	}
2013 
2014 	return 0;
2015 }
2016 
2017 /*
2018  * Post handler after qgroup_trace_extent_nolock().
2019  *
2020  * NOTE: Current qgroup does the expensive backref walk at transaction
2021  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2022  * new transaction.
2023  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2024  * result.
2025  *
2026  * However for old_roots there is no need to do backref walk at that time,
2027  * since we search commit roots to walk backref and result will always be
2028  * correct.
2029  *
2030  * Due to the nature of no lock version, we can't do backref there.
2031  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2032  * spinlock context.
2033  *
2034  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2035  * using current root, then we can move all expensive backref walk out of
2036  * transaction committing, but not now as qgroup accounting will be wrong again.
2037  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr)2038 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2039 				   struct btrfs_qgroup_extent_record *qrecord,
2040 				   u64 bytenr)
2041 {
2042 	struct btrfs_fs_info *fs_info = trans->fs_info;
2043 	struct btrfs_backref_walk_ctx ctx = {
2044 		.bytenr = bytenr,
2045 		.fs_info = fs_info,
2046 	};
2047 	int ret;
2048 
2049 	if (!btrfs_qgroup_full_accounting(fs_info))
2050 		return 0;
2051 	/*
2052 	 * We are always called in a context where we are already holding a
2053 	 * transaction handle. Often we are called when adding a data delayed
2054 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2055 	 * in which case we will be holding a write lock on extent buffer from a
2056 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2057 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2058 	 * that must be acquired before locking any extent buffers.
2059 	 *
2060 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2061 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2062 	 * it would not use commit roots and would lock extent buffers, causing
2063 	 * a deadlock if it ends up trying to read lock the same extent buffer
2064 	 * that was previously write locked at btrfs_truncate_inode_items().
2065 	 *
2066 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2067 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2068 	 * holding a transaction handle we don't need its protection.
2069 	 */
2070 	ASSERT(trans != NULL);
2071 
2072 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2073 		return 0;
2074 
2075 	ret = btrfs_find_all_roots(&ctx, true);
2076 	if (ret < 0) {
2077 		qgroup_mark_inconsistent(fs_info,
2078 				"error accounting new delayed refs extent: %d", ret);
2079 		return 0;
2080 	}
2081 
2082 	/*
2083 	 * Here we don't need to get the lock of
2084 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2085 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2086 	 *
2087 	 * So modifying qrecord->old_roots is safe here
2088 	 */
2089 	qrecord->old_roots = ctx.roots;
2090 	return 0;
2091 }
2092 
2093 /*
2094  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2095  * @num_bytes.
2096  * So qgroup can account it at commit trans time.
2097  *
2098  * Better encapsulated version, with memory allocation and backref walk for
2099  * commit roots.
2100  * So this can sleep.
2101  *
2102  * Return 0 if the operation is done.
2103  * Return <0 for error, like memory allocation failure or invalid parameter
2104  * (NULL trans)
2105  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2106 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2107 			      u64 num_bytes)
2108 {
2109 	struct btrfs_fs_info *fs_info = trans->fs_info;
2110 	struct btrfs_qgroup_extent_record *record;
2111 	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2112 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2113 	int ret;
2114 
2115 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2116 		return 0;
2117 	record = kzalloc(sizeof(*record), GFP_NOFS);
2118 	if (!record)
2119 		return -ENOMEM;
2120 
2121 	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2122 		kfree(record);
2123 		return -ENOMEM;
2124 	}
2125 
2126 	record->num_bytes = num_bytes;
2127 
2128 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2129 	if (ret) {
2130 		/* Clean up if insertion fails or item exists. */
2131 		xa_release(&delayed_refs->dirty_extents, index);
2132 		kfree(record);
2133 		return 0;
2134 	}
2135 	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2136 }
2137 
2138 /*
2139  * Inform qgroup to trace all leaf items of data
2140  *
2141  * Return 0 for success
2142  * Return <0 for error(ENOMEM)
2143  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2144 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2145 				  struct extent_buffer *eb)
2146 {
2147 	struct btrfs_fs_info *fs_info = trans->fs_info;
2148 	int nr = btrfs_header_nritems(eb);
2149 	int i, extent_type, ret;
2150 	struct btrfs_key key;
2151 	struct btrfs_file_extent_item *fi;
2152 	u64 bytenr, num_bytes;
2153 
2154 	/* We can be called directly from walk_up_proc() */
2155 	if (!btrfs_qgroup_full_accounting(fs_info))
2156 		return 0;
2157 
2158 	for (i = 0; i < nr; i++) {
2159 		btrfs_item_key_to_cpu(eb, &key, i);
2160 
2161 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2162 			continue;
2163 
2164 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2165 		/* filter out non qgroup-accountable extents  */
2166 		extent_type = btrfs_file_extent_type(eb, fi);
2167 
2168 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2169 			continue;
2170 
2171 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2172 		if (!bytenr)
2173 			continue;
2174 
2175 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2176 
2177 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2178 		if (ret)
2179 			return ret;
2180 	}
2181 	cond_resched();
2182 	return 0;
2183 }
2184 
2185 /*
2186  * Walk up the tree from the bottom, freeing leaves and any interior
2187  * nodes which have had all slots visited. If a node (leaf or
2188  * interior) is freed, the node above it will have it's slot
2189  * incremented. The root node will never be freed.
2190  *
2191  * At the end of this function, we should have a path which has all
2192  * slots incremented to the next position for a search. If we need to
2193  * read a new node it will be NULL and the node above it will have the
2194  * correct slot selected for a later read.
2195  *
2196  * If we increment the root nodes slot counter past the number of
2197  * elements, 1 is returned to signal completion of the search.
2198  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2199 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2200 {
2201 	int level = 0;
2202 	int nr, slot;
2203 	struct extent_buffer *eb;
2204 
2205 	if (root_level == 0)
2206 		return 1;
2207 
2208 	while (level <= root_level) {
2209 		eb = path->nodes[level];
2210 		nr = btrfs_header_nritems(eb);
2211 		path->slots[level]++;
2212 		slot = path->slots[level];
2213 		if (slot >= nr || level == 0) {
2214 			/*
2215 			 * Don't free the root -  we will detect this
2216 			 * condition after our loop and return a
2217 			 * positive value for caller to stop walking the tree.
2218 			 */
2219 			if (level != root_level) {
2220 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2221 				path->locks[level] = 0;
2222 
2223 				free_extent_buffer(eb);
2224 				path->nodes[level] = NULL;
2225 				path->slots[level] = 0;
2226 			}
2227 		} else {
2228 			/*
2229 			 * We have a valid slot to walk back down
2230 			 * from. Stop here so caller can process these
2231 			 * new nodes.
2232 			 */
2233 			break;
2234 		}
2235 
2236 		level++;
2237 	}
2238 
2239 	eb = path->nodes[root_level];
2240 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2241 		return 1;
2242 
2243 	return 0;
2244 }
2245 
2246 /*
2247  * Helper function to trace a subtree tree block swap.
2248  *
2249  * The swap will happen in highest tree block, but there may be a lot of
2250  * tree blocks involved.
2251  *
2252  * For example:
2253  *  OO = Old tree blocks
2254  *  NN = New tree blocks allocated during balance
2255  *
2256  *           File tree (257)                  Reloc tree for 257
2257  * L2              OO                                NN
2258  *               /    \                            /    \
2259  * L1          OO      OO (a)                    OO      NN (a)
2260  *            / \     / \                       / \     / \
2261  * L0       OO   OO OO   OO                   OO   OO NN   NN
2262  *                  (b)  (c)                          (b)  (c)
2263  *
2264  * When calling qgroup_trace_extent_swap(), we will pass:
2265  * @src_eb = OO(a)
2266  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2267  * @dst_level = 0
2268  * @root_level = 1
2269  *
2270  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2271  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2272  *
2273  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2274  *
2275  * 1) Tree search from @src_eb
2276  *    It should acts as a simplified btrfs_search_slot().
2277  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2278  *    (first key).
2279  *
2280  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2281  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2282  *    They should be marked during previous (@dst_level = 1) iteration.
2283  *
2284  * 3) Mark file extents in leaves dirty
2285  *    We don't have good way to pick out new file extents only.
2286  *    So we still follow the old method by scanning all file extents in
2287  *    the leave.
2288  *
2289  * This function can free us from keeping two paths, thus later we only need
2290  * to care about how to iterate all new tree blocks in reloc tree.
2291  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2292 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2293 				    struct extent_buffer *src_eb,
2294 				    struct btrfs_path *dst_path,
2295 				    int dst_level, int root_level,
2296 				    bool trace_leaf)
2297 {
2298 	struct btrfs_key key;
2299 	struct btrfs_path *src_path;
2300 	struct btrfs_fs_info *fs_info = trans->fs_info;
2301 	u32 nodesize = fs_info->nodesize;
2302 	int cur_level = root_level;
2303 	int ret;
2304 
2305 	BUG_ON(dst_level > root_level);
2306 	/* Level mismatch */
2307 	if (btrfs_header_level(src_eb) != root_level)
2308 		return -EINVAL;
2309 
2310 	src_path = btrfs_alloc_path();
2311 	if (!src_path) {
2312 		ret = -ENOMEM;
2313 		goto out;
2314 	}
2315 
2316 	if (dst_level)
2317 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2318 	else
2319 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2320 
2321 	/* For src_path */
2322 	refcount_inc(&src_eb->refs);
2323 	src_path->nodes[root_level] = src_eb;
2324 	src_path->slots[root_level] = dst_path->slots[root_level];
2325 	src_path->locks[root_level] = 0;
2326 
2327 	/* A simplified version of btrfs_search_slot() */
2328 	while (cur_level >= dst_level) {
2329 		struct btrfs_key src_key;
2330 		struct btrfs_key dst_key;
2331 
2332 		if (src_path->nodes[cur_level] == NULL) {
2333 			struct extent_buffer *eb;
2334 			int parent_slot;
2335 
2336 			eb = src_path->nodes[cur_level + 1];
2337 			parent_slot = src_path->slots[cur_level + 1];
2338 
2339 			eb = btrfs_read_node_slot(eb, parent_slot);
2340 			if (IS_ERR(eb)) {
2341 				ret = PTR_ERR(eb);
2342 				goto out;
2343 			}
2344 
2345 			src_path->nodes[cur_level] = eb;
2346 
2347 			btrfs_tree_read_lock(eb);
2348 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2349 		}
2350 
2351 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2352 		if (cur_level) {
2353 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2354 					&dst_key, dst_path->slots[cur_level]);
2355 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2356 					&src_key, src_path->slots[cur_level]);
2357 		} else {
2358 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2359 					&dst_key, dst_path->slots[cur_level]);
2360 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2361 					&src_key, src_path->slots[cur_level]);
2362 		}
2363 		/* Content mismatch, something went wrong */
2364 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2365 			ret = -ENOENT;
2366 			goto out;
2367 		}
2368 		cur_level--;
2369 	}
2370 
2371 	/*
2372 	 * Now both @dst_path and @src_path have been populated, record the tree
2373 	 * blocks for qgroup accounting.
2374 	 */
2375 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2376 					nodesize);
2377 	if (ret < 0)
2378 		goto out;
2379 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2380 					nodesize);
2381 	if (ret < 0)
2382 		goto out;
2383 
2384 	/* Record leaf file extents */
2385 	if (dst_level == 0 && trace_leaf) {
2386 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2387 		if (ret < 0)
2388 			goto out;
2389 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2390 	}
2391 out:
2392 	btrfs_free_path(src_path);
2393 	return ret;
2394 }
2395 
2396 /*
2397  * Helper function to do recursive generation-aware depth-first search, to
2398  * locate all new tree blocks in a subtree of reloc tree.
2399  *
2400  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2401  *         reloc tree
2402  * L2         NN (a)
2403  *          /    \
2404  * L1    OO        NN (b)
2405  *      /  \      /  \
2406  * L0  OO  OO    OO  NN
2407  *               (c) (d)
2408  * If we pass:
2409  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2410  * @cur_level = 1
2411  * @root_level = 1
2412  *
2413  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2414  * above tree blocks along with their counter parts in file tree.
2415  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2416  * won't affect OO(c).
2417  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2418 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2419 					   struct extent_buffer *src_eb,
2420 					   struct btrfs_path *dst_path,
2421 					   int cur_level, int root_level,
2422 					   u64 last_snapshot, bool trace_leaf)
2423 {
2424 	struct btrfs_fs_info *fs_info = trans->fs_info;
2425 	struct extent_buffer *eb;
2426 	bool need_cleanup = false;
2427 	int ret = 0;
2428 	int i;
2429 
2430 	/* Level sanity check */
2431 	if (unlikely(cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2432 		     root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2433 		     root_level < cur_level)) {
2434 		btrfs_err_rl(fs_info,
2435 			"%s: bad levels, cur_level=%d root_level=%d",
2436 			__func__, cur_level, root_level);
2437 		return -EUCLEAN;
2438 	}
2439 
2440 	/* Read the tree block if needed */
2441 	if (dst_path->nodes[cur_level] == NULL) {
2442 		int parent_slot;
2443 		u64 child_gen;
2444 
2445 		/*
2446 		 * dst_path->nodes[root_level] must be initialized before
2447 		 * calling this function.
2448 		 */
2449 		if (unlikely(cur_level == root_level)) {
2450 			btrfs_err_rl(fs_info,
2451 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2452 				__func__, root_level, root_level, cur_level);
2453 			return -EUCLEAN;
2454 		}
2455 
2456 		/*
2457 		 * We need to get child blockptr/gen from parent before we can
2458 		 * read it.
2459 		  */
2460 		eb = dst_path->nodes[cur_level + 1];
2461 		parent_slot = dst_path->slots[cur_level + 1];
2462 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2463 
2464 		/* This node is old, no need to trace */
2465 		if (child_gen < last_snapshot)
2466 			goto out;
2467 
2468 		eb = btrfs_read_node_slot(eb, parent_slot);
2469 		if (IS_ERR(eb)) {
2470 			ret = PTR_ERR(eb);
2471 			goto out;
2472 		}
2473 
2474 		dst_path->nodes[cur_level] = eb;
2475 		dst_path->slots[cur_level] = 0;
2476 
2477 		btrfs_tree_read_lock(eb);
2478 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2479 		need_cleanup = true;
2480 	}
2481 
2482 	/* Now record this tree block and its counter part for qgroups */
2483 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2484 				       root_level, trace_leaf);
2485 	if (ret < 0)
2486 		goto cleanup;
2487 
2488 	eb = dst_path->nodes[cur_level];
2489 
2490 	if (cur_level > 0) {
2491 		/* Iterate all child tree blocks */
2492 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2493 			/* Skip old tree blocks as they won't be swapped */
2494 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2495 				continue;
2496 			dst_path->slots[cur_level] = i;
2497 
2498 			/* Recursive call (at most 7 times) */
2499 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2500 					dst_path, cur_level - 1, root_level,
2501 					last_snapshot, trace_leaf);
2502 			if (ret < 0)
2503 				goto cleanup;
2504 		}
2505 	}
2506 
2507 cleanup:
2508 	if (need_cleanup) {
2509 		/* Clean up */
2510 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2511 				     dst_path->locks[cur_level]);
2512 		free_extent_buffer(dst_path->nodes[cur_level]);
2513 		dst_path->nodes[cur_level] = NULL;
2514 		dst_path->slots[cur_level] = 0;
2515 		dst_path->locks[cur_level] = 0;
2516 	}
2517 out:
2518 	return ret;
2519 }
2520 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2521 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2522 				struct extent_buffer *src_eb,
2523 				struct extent_buffer *dst_eb,
2524 				u64 last_snapshot, bool trace_leaf)
2525 {
2526 	struct btrfs_fs_info *fs_info = trans->fs_info;
2527 	struct btrfs_path *dst_path = NULL;
2528 	int level;
2529 	int ret;
2530 
2531 	if (!btrfs_qgroup_full_accounting(fs_info))
2532 		return 0;
2533 
2534 	/* Wrong parameter order */
2535 	if (unlikely(btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb))) {
2536 		btrfs_err_rl(fs_info,
2537 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2538 			     btrfs_header_generation(src_eb),
2539 			     btrfs_header_generation(dst_eb));
2540 		return -EUCLEAN;
2541 	}
2542 
2543 	if (unlikely(!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb))) {
2544 		ret = -EIO;
2545 		goto out;
2546 	}
2547 
2548 	level = btrfs_header_level(dst_eb);
2549 	dst_path = btrfs_alloc_path();
2550 	if (!dst_path) {
2551 		ret = -ENOMEM;
2552 		goto out;
2553 	}
2554 	/* For dst_path */
2555 	refcount_inc(&dst_eb->refs);
2556 	dst_path->nodes[level] = dst_eb;
2557 	dst_path->slots[level] = 0;
2558 	dst_path->locks[level] = 0;
2559 
2560 	/* Do the generation aware breadth-first search */
2561 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2562 					      level, last_snapshot, trace_leaf);
2563 	if (ret < 0)
2564 		goto out;
2565 	ret = 0;
2566 
2567 out:
2568 	btrfs_free_path(dst_path);
2569 	if (ret < 0)
2570 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
2571 	return ret;
2572 }
2573 
2574 /*
2575  * Inform qgroup to trace a whole subtree, including all its child tree
2576  * blocks and data.
2577  * The root tree block is specified by @root_eb.
2578  *
2579  * Normally used by relocation(tree block swap) and subvolume deletion.
2580  *
2581  * Return 0 for success
2582  * Return <0 for error(ENOMEM or tree search error)
2583  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2584 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2585 			       struct extent_buffer *root_eb,
2586 			       u64 root_gen, int root_level)
2587 {
2588 	struct btrfs_fs_info *fs_info = trans->fs_info;
2589 	int ret = 0;
2590 	int level;
2591 	u8 drop_subptree_thres;
2592 	struct extent_buffer *eb = root_eb;
2593 	struct btrfs_path *path = NULL;
2594 
2595 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2596 	ASSERT(root_eb != NULL);
2597 
2598 	if (!btrfs_qgroup_full_accounting(fs_info))
2599 		return 0;
2600 
2601 	spin_lock(&fs_info->qgroup_lock);
2602 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2603 	spin_unlock(&fs_info->qgroup_lock);
2604 
2605 	/*
2606 	 * This function only gets called for snapshot drop, if we hit a high
2607 	 * node here, it means we are going to change ownership for quite a lot
2608 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2609 	 *
2610 	 * So here if we find a high tree here, we just skip the accounting and
2611 	 * mark qgroup inconsistent.
2612 	 */
2613 	if (root_level >= drop_subptree_thres) {
2614 		qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
2615 		return 0;
2616 	}
2617 
2618 	if (!extent_buffer_uptodate(root_eb)) {
2619 		struct btrfs_tree_parent_check check = {
2620 			.transid = root_gen,
2621 			.level = root_level
2622 		};
2623 
2624 		ret = btrfs_read_extent_buffer(root_eb, &check);
2625 		if (ret)
2626 			goto out;
2627 	}
2628 
2629 	if (root_level == 0) {
2630 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2631 		goto out;
2632 	}
2633 
2634 	path = btrfs_alloc_path();
2635 	if (!path)
2636 		return -ENOMEM;
2637 
2638 	/*
2639 	 * Walk down the tree.  Missing extent blocks are filled in as
2640 	 * we go. Metadata is accounted every time we read a new
2641 	 * extent block.
2642 	 *
2643 	 * When we reach a leaf, we account for file extent items in it,
2644 	 * walk back up the tree (adjusting slot pointers as we go)
2645 	 * and restart the search process.
2646 	 */
2647 	refcount_inc(&root_eb->refs);	/* For path */
2648 	path->nodes[root_level] = root_eb;
2649 	path->slots[root_level] = 0;
2650 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2651 walk_down:
2652 	level = root_level;
2653 	while (level >= 0) {
2654 		if (path->nodes[level] == NULL) {
2655 			int parent_slot;
2656 			u64 child_bytenr;
2657 
2658 			/*
2659 			 * We need to get child blockptr from parent before we
2660 			 * can read it.
2661 			  */
2662 			eb = path->nodes[level + 1];
2663 			parent_slot = path->slots[level + 1];
2664 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2665 
2666 			eb = btrfs_read_node_slot(eb, parent_slot);
2667 			if (IS_ERR(eb)) {
2668 				ret = PTR_ERR(eb);
2669 				goto out;
2670 			}
2671 
2672 			path->nodes[level] = eb;
2673 			path->slots[level] = 0;
2674 
2675 			btrfs_tree_read_lock(eb);
2676 			path->locks[level] = BTRFS_READ_LOCK;
2677 
2678 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2679 							fs_info->nodesize);
2680 			if (ret)
2681 				goto out;
2682 		}
2683 
2684 		if (level == 0) {
2685 			ret = btrfs_qgroup_trace_leaf_items(trans,
2686 							    path->nodes[level]);
2687 			if (ret)
2688 				goto out;
2689 
2690 			/* Nonzero return here means we completed our search */
2691 			ret = adjust_slots_upwards(path, root_level);
2692 			if (ret)
2693 				break;
2694 
2695 			/* Restart search with new slots */
2696 			goto walk_down;
2697 		}
2698 
2699 		level--;
2700 	}
2701 
2702 	ret = 0;
2703 out:
2704 	btrfs_free_path(path);
2705 
2706 	return ret;
2707 }
2708 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2709 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2710 {
2711 	if (!list_empty(&qgroup->nested_iterator))
2712 		return;
2713 
2714 	list_add_tail(&qgroup->nested_iterator, head);
2715 }
2716 
qgroup_iterator_nested_clean(struct list_head * head)2717 static void qgroup_iterator_nested_clean(struct list_head *head)
2718 {
2719 	while (!list_empty(head)) {
2720 		struct btrfs_qgroup *qgroup;
2721 
2722 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2723 		list_del_init(&qgroup->nested_iterator);
2724 	}
2725 }
2726 
2727 #define UPDATE_NEW	0
2728 #define UPDATE_OLD	1
2729 /*
2730  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2731  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,bool update_old)2732 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2733 				 struct ulist *roots, struct list_head *qgroups,
2734 				 u64 seq, bool update_old)
2735 {
2736 	struct ulist_node *unode;
2737 	struct ulist_iterator uiter;
2738 	struct btrfs_qgroup *qg;
2739 
2740 	if (!roots)
2741 		return;
2742 	ULIST_ITER_INIT(&uiter);
2743 	while ((unode = ulist_next(roots, &uiter))) {
2744 		LIST_HEAD(tmp);
2745 
2746 		qg = find_qgroup_rb(fs_info, unode->val);
2747 		if (!qg)
2748 			continue;
2749 
2750 		qgroup_iterator_nested_add(qgroups, qg);
2751 		qgroup_iterator_add(&tmp, qg);
2752 		list_for_each_entry(qg, &tmp, iterator) {
2753 			struct btrfs_qgroup_list *glist;
2754 
2755 			if (update_old)
2756 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2757 			else
2758 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2759 
2760 			list_for_each_entry(glist, &qg->groups, next_group) {
2761 				qgroup_iterator_nested_add(qgroups, glist->group);
2762 				qgroup_iterator_add(&tmp, glist->group);
2763 			}
2764 		}
2765 		qgroup_iterator_clean(&tmp);
2766 	}
2767 }
2768 
2769 /*
2770  * Update qgroup rfer/excl counters.
2771  * Rfer update is easy, codes can explain themselves.
2772  *
2773  * Excl update is tricky, the update is split into 2 parts.
2774  * Part 1: Possible exclusive <-> sharing detect:
2775  *	|	A	|	!A	|
2776  *  -------------------------------------
2777  *  B	|	*	|	-	|
2778  *  -------------------------------------
2779  *  !B	|	+	|	**	|
2780  *  -------------------------------------
2781  *
2782  * Conditions:
2783  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2784  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2785  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2786  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2787  *
2788  * Results:
2789  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2790  * *: Definitely not changed.		**: Possible unchanged.
2791  *
2792  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2793  *
2794  * To make the logic clear, we first use condition A and B to split
2795  * combination into 4 results.
2796  *
2797  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2798  * only on variant maybe 0.
2799  *
2800  * Lastly, check result **, since there are 2 variants maybe 0, split them
2801  * again(2x2).
2802  * But this time we don't need to consider other things, the codes and logic
2803  * is easy to understand now.
2804  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2805 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2806 				   struct list_head *qgroups, u64 nr_old_roots,
2807 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2808 {
2809 	struct btrfs_qgroup *qg;
2810 
2811 	list_for_each_entry(qg, qgroups, nested_iterator) {
2812 		u64 cur_new_count, cur_old_count;
2813 		bool dirty = false;
2814 
2815 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2816 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2817 
2818 		trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
2819 						   cur_new_count);
2820 
2821 		/* Rfer update part */
2822 		if (cur_old_count == 0 && cur_new_count > 0) {
2823 			qg->rfer += num_bytes;
2824 			qg->rfer_cmpr += num_bytes;
2825 			dirty = true;
2826 		}
2827 		if (cur_old_count > 0 && cur_new_count == 0) {
2828 			qg->rfer -= num_bytes;
2829 			qg->rfer_cmpr -= num_bytes;
2830 			dirty = true;
2831 		}
2832 
2833 		/* Excl update part */
2834 		/* Exclusive/none -> shared case */
2835 		if (cur_old_count == nr_old_roots &&
2836 		    cur_new_count < nr_new_roots) {
2837 			/* Exclusive -> shared */
2838 			if (cur_old_count != 0) {
2839 				qg->excl -= num_bytes;
2840 				qg->excl_cmpr -= num_bytes;
2841 				dirty = true;
2842 			}
2843 		}
2844 
2845 		/* Shared -> exclusive/none case */
2846 		if (cur_old_count < nr_old_roots &&
2847 		    cur_new_count == nr_new_roots) {
2848 			/* Shared->exclusive */
2849 			if (cur_new_count != 0) {
2850 				qg->excl += num_bytes;
2851 				qg->excl_cmpr += num_bytes;
2852 				dirty = true;
2853 			}
2854 		}
2855 
2856 		/* Exclusive/none -> exclusive/none case */
2857 		if (cur_old_count == nr_old_roots &&
2858 		    cur_new_count == nr_new_roots) {
2859 			if (cur_old_count == 0) {
2860 				/* None -> exclusive/none */
2861 
2862 				if (cur_new_count != 0) {
2863 					/* None -> exclusive */
2864 					qg->excl += num_bytes;
2865 					qg->excl_cmpr += num_bytes;
2866 					dirty = true;
2867 				}
2868 				/* None -> none, nothing changed */
2869 			} else {
2870 				/* Exclusive -> exclusive/none */
2871 
2872 				if (cur_new_count == 0) {
2873 					/* Exclusive -> none */
2874 					qg->excl -= num_bytes;
2875 					qg->excl_cmpr -= num_bytes;
2876 					dirty = true;
2877 				}
2878 				/* Exclusive -> exclusive, nothing changed */
2879 			}
2880 		}
2881 
2882 		if (dirty)
2883 			qgroup_dirty(fs_info, qg);
2884 	}
2885 }
2886 
2887 /*
2888  * Check if the @roots potentially is a list of fs tree roots
2889  *
2890  * Return 0 for definitely not a fs/subvol tree roots ulist
2891  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2892  *          one as well)
2893  */
maybe_fs_roots(struct ulist * roots)2894 static int maybe_fs_roots(struct ulist *roots)
2895 {
2896 	struct ulist_node *unode;
2897 	struct ulist_iterator uiter;
2898 
2899 	/* Empty one, still possible for fs roots */
2900 	if (!roots || roots->nnodes == 0)
2901 		return 1;
2902 
2903 	ULIST_ITER_INIT(&uiter);
2904 	unode = ulist_next(roots, &uiter);
2905 	if (!unode)
2906 		return 1;
2907 
2908 	/*
2909 	 * If it contains fs tree roots, then it must belong to fs/subvol
2910 	 * trees.
2911 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2912 	 */
2913 	return btrfs_is_fstree(unode->val);
2914 }
2915 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2916 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2917 				u64 num_bytes, struct ulist *old_roots,
2918 				struct ulist *new_roots)
2919 {
2920 	struct btrfs_fs_info *fs_info = trans->fs_info;
2921 	LIST_HEAD(qgroups);
2922 	u64 seq;
2923 	u64 nr_new_roots = 0;
2924 	u64 nr_old_roots = 0;
2925 	int ret = 0;
2926 
2927 	/*
2928 	 * If quotas get disabled meanwhile, the resources need to be freed and
2929 	 * we can't just exit here.
2930 	 */
2931 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2932 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2933 		goto out_free;
2934 
2935 	if (new_roots) {
2936 		if (!maybe_fs_roots(new_roots))
2937 			goto out_free;
2938 		nr_new_roots = new_roots->nnodes;
2939 	}
2940 	if (old_roots) {
2941 		if (!maybe_fs_roots(old_roots))
2942 			goto out_free;
2943 		nr_old_roots = old_roots->nnodes;
2944 	}
2945 
2946 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2947 	if (nr_old_roots == 0 && nr_new_roots == 0)
2948 		goto out_free;
2949 
2950 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2951 					num_bytes, nr_old_roots, nr_new_roots);
2952 
2953 	mutex_lock(&fs_info->qgroup_rescan_lock);
2954 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2955 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2956 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2957 			ret = 0;
2958 			goto out_free;
2959 		}
2960 	}
2961 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2962 
2963 	spin_lock(&fs_info->qgroup_lock);
2964 	seq = fs_info->qgroup_seq;
2965 
2966 	/* Update old refcnts using old_roots */
2967 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2968 
2969 	/* Update new refcnts using new_roots */
2970 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2971 
2972 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2973 			       num_bytes, seq);
2974 
2975 	/*
2976 	 * We're done using the iterator, release all its qgroups while holding
2977 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
2978 	 * and trigger use-after-free accesses to qgroups.
2979 	 */
2980 	qgroup_iterator_nested_clean(&qgroups);
2981 
2982 	/*
2983 	 * Bump qgroup_seq to avoid seq overlap
2984 	 */
2985 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2986 	spin_unlock(&fs_info->qgroup_lock);
2987 out_free:
2988 	ulist_free(old_roots);
2989 	ulist_free(new_roots);
2990 	return ret;
2991 }
2992 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)2993 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2994 {
2995 	struct btrfs_fs_info *fs_info = trans->fs_info;
2996 	struct btrfs_qgroup_extent_record *record;
2997 	struct btrfs_delayed_ref_root *delayed_refs;
2998 	struct ulist *new_roots = NULL;
2999 	unsigned long index;
3000 	u64 num_dirty_extents = 0;
3001 	u64 qgroup_to_skip;
3002 	int ret = 0;
3003 
3004 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3005 		return 0;
3006 
3007 	delayed_refs = &trans->transaction->delayed_refs;
3008 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3009 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3010 		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
3011 
3012 		num_dirty_extents++;
3013 		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
3014 
3015 		if (!ret && !(fs_info->qgroup_flags &
3016 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3017 			struct btrfs_backref_walk_ctx ctx = { 0 };
3018 
3019 			ctx.bytenr = bytenr;
3020 			ctx.fs_info = fs_info;
3021 
3022 			/*
3023 			 * Old roots should be searched when inserting qgroup
3024 			 * extent record.
3025 			 *
3026 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3027 			 * we may have some record inserted during
3028 			 * NO_ACCOUNTING (thus no old_roots populated), but
3029 			 * later we start rescan, which clears NO_ACCOUNTING,
3030 			 * leaving some inserted records without old_roots
3031 			 * populated.
3032 			 *
3033 			 * Those cases are rare and should not cause too much
3034 			 * time spent during commit_transaction().
3035 			 */
3036 			if (!record->old_roots) {
3037 				/* Search commit root to find old_roots */
3038 				ret = btrfs_find_all_roots(&ctx, false);
3039 				if (ret < 0)
3040 					goto cleanup;
3041 				record->old_roots = ctx.roots;
3042 				ctx.roots = NULL;
3043 			}
3044 
3045 			/*
3046 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3047 			 * which doesn't lock tree or delayed_refs and search
3048 			 * current root. It's safe inside commit_transaction().
3049 			 */
3050 			ctx.trans = trans;
3051 			ctx.time_seq = BTRFS_SEQ_LAST;
3052 			ret = btrfs_find_all_roots(&ctx, false);
3053 			if (ret < 0)
3054 				goto cleanup;
3055 			new_roots = ctx.roots;
3056 			if (qgroup_to_skip) {
3057 				ulist_del(new_roots, qgroup_to_skip, 0);
3058 				ulist_del(record->old_roots, qgroup_to_skip,
3059 					  0);
3060 			}
3061 			ret = btrfs_qgroup_account_extent(trans, bytenr,
3062 							  record->num_bytes,
3063 							  record->old_roots,
3064 							  new_roots);
3065 			record->old_roots = NULL;
3066 			new_roots = NULL;
3067 		}
3068 		/* Free the reserved data space */
3069 		btrfs_qgroup_free_refroot(fs_info,
3070 				record->data_rsv_refroot,
3071 				record->data_rsv,
3072 				BTRFS_QGROUP_RSV_DATA);
3073 cleanup:
3074 		ulist_free(record->old_roots);
3075 		ulist_free(new_roots);
3076 		new_roots = NULL;
3077 		xa_erase(&delayed_refs->dirty_extents, index);
3078 		kfree(record);
3079 
3080 	}
3081 	trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
3082 	return ret;
3083 }
3084 
3085 /*
3086  * Writes all changed qgroups to disk.
3087  * Called by the transaction commit path and the qgroup assign ioctl.
3088  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3089 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3090 {
3091 	struct btrfs_fs_info *fs_info = trans->fs_info;
3092 	int ret = 0;
3093 
3094 	/*
3095 	 * In case we are called from the qgroup assign ioctl, assert that we
3096 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3097 	 * disable operation (ioctl) and access a freed quota root.
3098 	 */
3099 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3100 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3101 
3102 	if (!fs_info->quota_root)
3103 		return ret;
3104 
3105 	spin_lock(&fs_info->qgroup_lock);
3106 	while (!list_empty(&fs_info->dirty_qgroups)) {
3107 		struct btrfs_qgroup *qgroup;
3108 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3109 					  struct btrfs_qgroup, dirty);
3110 		list_del_init(&qgroup->dirty);
3111 		spin_unlock(&fs_info->qgroup_lock);
3112 		ret = update_qgroup_info_item(trans, qgroup);
3113 		if (ret)
3114 			qgroup_mark_inconsistent(fs_info,
3115 						 "qgroup info item update error %d", ret);
3116 		ret = update_qgroup_limit_item(trans, qgroup);
3117 		if (ret)
3118 			qgroup_mark_inconsistent(fs_info,
3119 						 "qgroup limit item update error %d", ret);
3120 		spin_lock(&fs_info->qgroup_lock);
3121 	}
3122 	if (btrfs_qgroup_enabled(fs_info))
3123 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3124 	else
3125 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3126 	spin_unlock(&fs_info->qgroup_lock);
3127 
3128 	ret = update_qgroup_status_item(trans);
3129 	if (ret)
3130 		qgroup_mark_inconsistent(fs_info,
3131 					 "qgroup status item update error %d", ret);
3132 
3133 	return ret;
3134 }
3135 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3136 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3137 			       struct btrfs_qgroup_inherit *inherit,
3138 			       size_t size)
3139 {
3140 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3141 		return -EOPNOTSUPP;
3142 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3143 		return -EINVAL;
3144 
3145 	/*
3146 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3147 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3148 	 * been disabled in userspace for a very long time, but here we should
3149 	 * also disable it in kernel, as this behavior is known to mark qgroup
3150 	 * inconsistent, and a rescan would wipe out the changes anyway.
3151 	 *
3152 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3153 	 */
3154 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3155 		return -EINVAL;
3156 
3157 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3158 		return -EINVAL;
3159 
3160 	/*
3161 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3162 	 * Qgroup can still be later enabled causing problems, but in that case
3163 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3164 	 */
3165 	if (!btrfs_qgroup_enabled(fs_info))
3166 		return 0;
3167 
3168 	/*
3169 	 * Now check all the remaining qgroups, they should all:
3170 	 *
3171 	 * - Exist
3172 	 * - Be higher level qgroups.
3173 	 */
3174 	for (int i = 0; i < inherit->num_qgroups; i++) {
3175 		struct btrfs_qgroup *qgroup;
3176 		u64 qgroupid = inherit->qgroups[i];
3177 
3178 		if (btrfs_qgroup_level(qgroupid) == 0)
3179 			return -EINVAL;
3180 
3181 		spin_lock(&fs_info->qgroup_lock);
3182 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3183 		if (!qgroup) {
3184 			spin_unlock(&fs_info->qgroup_lock);
3185 			return -ENOENT;
3186 		}
3187 		spin_unlock(&fs_info->qgroup_lock);
3188 	}
3189 	return 0;
3190 }
3191 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3192 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3193 			       u64 inode_rootid,
3194 			       struct btrfs_qgroup_inherit **inherit)
3195 {
3196 	int i = 0;
3197 	u64 num_qgroups = 0;
3198 	struct btrfs_qgroup *inode_qg;
3199 	struct btrfs_qgroup_list *qg_list;
3200 	struct btrfs_qgroup_inherit *res;
3201 	size_t struct_sz;
3202 	u64 *qgids;
3203 
3204 	if (*inherit)
3205 		return -EEXIST;
3206 
3207 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3208 	if (!inode_qg)
3209 		return -ENOENT;
3210 
3211 	num_qgroups = list_count_nodes(&inode_qg->groups);
3212 
3213 	if (!num_qgroups)
3214 		return 0;
3215 
3216 	struct_sz = struct_size(res, qgroups, num_qgroups);
3217 	if (struct_sz == SIZE_MAX)
3218 		return -ERANGE;
3219 
3220 	res = kzalloc(struct_sz, GFP_NOFS);
3221 	if (!res)
3222 		return -ENOMEM;
3223 	res->num_qgroups = num_qgroups;
3224 	qgids = res->qgroups;
3225 
3226 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3227 		qgids[i++] = qg_list->group->qgroupid;
3228 
3229 	*inherit = res;
3230 	return 0;
3231 }
3232 
3233 /*
3234  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3235  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3236  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3237  *
3238  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3239  * Return 0 if a quick inherit is done.
3240  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3241  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3242 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3243 					 u64 srcid, u64 parentid)
3244 {
3245 	struct btrfs_qgroup *src;
3246 	struct btrfs_qgroup *parent;
3247 	struct btrfs_qgroup_list *list;
3248 	int nr_parents = 0;
3249 
3250 	src = find_qgroup_rb(fs_info, srcid);
3251 	if (!src)
3252 		return -ENOENT;
3253 	parent = find_qgroup_rb(fs_info, parentid);
3254 	if (!parent)
3255 		return -ENOENT;
3256 
3257 	/*
3258 	 * Source has no parent qgroup, but our new qgroup would have one.
3259 	 * Qgroup numbers would become inconsistent.
3260 	 */
3261 	if (list_empty(&src->groups))
3262 		return 1;
3263 
3264 	list_for_each_entry(list, &src->groups, next_group) {
3265 		/* The parent is not the same, quick update is not possible. */
3266 		if (list->group->qgroupid != parentid)
3267 			return 1;
3268 		nr_parents++;
3269 		/*
3270 		 * More than one parent qgroup, we can't be sure about accounting
3271 		 * consistency.
3272 		 */
3273 		if (nr_parents > 1)
3274 			return 1;
3275 	}
3276 
3277 	/*
3278 	 * The parent is not exclusively owning all its bytes.  We're not sure
3279 	 * if the source has any bytes not fully owned by the parent.
3280 	 */
3281 	if (parent->excl != parent->rfer)
3282 		return 1;
3283 
3284 	parent->excl += fs_info->nodesize;
3285 	parent->rfer += fs_info->nodesize;
3286 	return 0;
3287 }
3288 
3289 /*
3290  * Copy the accounting information between qgroups. This is necessary
3291  * when a snapshot or a subvolume is created. Throwing an error will
3292  * cause a transaction abort so we take extra care here to only error
3293  * when a readonly fs is a reasonable outcome.
3294  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3295 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3296 			 u64 objectid, u64 inode_rootid,
3297 			 struct btrfs_qgroup_inherit *inherit)
3298 {
3299 	int ret = 0;
3300 	u64 *i_qgroups;
3301 	bool committing = false;
3302 	struct btrfs_fs_info *fs_info = trans->fs_info;
3303 	struct btrfs_root *quota_root;
3304 	struct btrfs_qgroup *srcgroup;
3305 	struct btrfs_qgroup *dstgroup;
3306 	struct btrfs_qgroup *prealloc;
3307 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3308 	bool free_inherit = false;
3309 	bool need_rescan = false;
3310 	u32 level_size = 0;
3311 	u64 nums;
3312 
3313 	if (!btrfs_qgroup_enabled(fs_info))
3314 		return 0;
3315 
3316 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3317 	if (!prealloc)
3318 		return -ENOMEM;
3319 
3320 	/*
3321 	 * There are only two callers of this function.
3322 	 *
3323 	 * One in create_subvol() in the ioctl context, which needs to hold
3324 	 * the qgroup_ioctl_lock.
3325 	 *
3326 	 * The other one in create_pending_snapshot() where no other qgroup
3327 	 * code can modify the fs as they all need to either start a new trans
3328 	 * or hold a trans handler, thus we don't need to hold
3329 	 * qgroup_ioctl_lock.
3330 	 * This would avoid long and complex lock chain and make lockdep happy.
3331 	 */
3332 	spin_lock(&fs_info->trans_lock);
3333 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3334 		committing = true;
3335 	spin_unlock(&fs_info->trans_lock);
3336 
3337 	if (!committing)
3338 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3339 
3340 	quota_root = fs_info->quota_root;
3341 	if (!quota_root) {
3342 		ret = -EINVAL;
3343 		goto out;
3344 	}
3345 
3346 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3347 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3348 		if (ret)
3349 			goto out;
3350 		free_inherit = true;
3351 	}
3352 
3353 	if (inherit) {
3354 		i_qgroups = (u64 *)(inherit + 1);
3355 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3356 		       2 * inherit->num_excl_copies;
3357 		for (int i = 0; i < nums; i++) {
3358 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3359 
3360 			/*
3361 			 * Zero out invalid groups so we can ignore
3362 			 * them later.
3363 			 */
3364 			if (!srcgroup ||
3365 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3366 				*i_qgroups = 0ULL;
3367 
3368 			++i_qgroups;
3369 		}
3370 	}
3371 
3372 	/*
3373 	 * create a tracking group for the subvol itself
3374 	 */
3375 	ret = add_qgroup_item(trans, quota_root, objectid);
3376 	if (ret)
3377 		goto out;
3378 
3379 	/*
3380 	 * add qgroup to all inherited groups
3381 	 */
3382 	if (inherit) {
3383 		i_qgroups = (u64 *)(inherit + 1);
3384 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3385 			if (*i_qgroups == 0)
3386 				continue;
3387 			ret = add_qgroup_relation_item(trans, objectid,
3388 						       *i_qgroups);
3389 			if (ret && ret != -EEXIST)
3390 				goto out;
3391 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3392 						       objectid);
3393 			if (ret && ret != -EEXIST)
3394 				goto out;
3395 		}
3396 		ret = 0;
3397 
3398 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3399 					 sizeof(struct btrfs_qgroup_list *),
3400 					 GFP_NOFS);
3401 		if (!qlist_prealloc) {
3402 			ret = -ENOMEM;
3403 			goto out;
3404 		}
3405 		for (int i = 0; i < inherit->num_qgroups; i++) {
3406 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3407 						    GFP_NOFS);
3408 			if (!qlist_prealloc[i]) {
3409 				ret = -ENOMEM;
3410 				goto out;
3411 			}
3412 		}
3413 	}
3414 
3415 	spin_lock(&fs_info->qgroup_lock);
3416 
3417 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3418 	prealloc = NULL;
3419 
3420 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3421 		dstgroup->lim_flags = inherit->lim.flags;
3422 		dstgroup->max_rfer = inherit->lim.max_rfer;
3423 		dstgroup->max_excl = inherit->lim.max_excl;
3424 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3425 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3426 
3427 		qgroup_dirty(fs_info, dstgroup);
3428 	}
3429 
3430 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3431 		srcgroup = find_qgroup_rb(fs_info, srcid);
3432 		if (!srcgroup)
3433 			goto unlock;
3434 
3435 		/*
3436 		 * We call inherit after we clone the root in order to make sure
3437 		 * our counts don't go crazy, so at this point the only
3438 		 * difference between the two roots should be the root node.
3439 		 */
3440 		level_size = fs_info->nodesize;
3441 		dstgroup->rfer = srcgroup->rfer;
3442 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3443 		dstgroup->excl = level_size;
3444 		dstgroup->excl_cmpr = level_size;
3445 		srcgroup->excl = level_size;
3446 		srcgroup->excl_cmpr = level_size;
3447 
3448 		/* inherit the limit info */
3449 		dstgroup->lim_flags = srcgroup->lim_flags;
3450 		dstgroup->max_rfer = srcgroup->max_rfer;
3451 		dstgroup->max_excl = srcgroup->max_excl;
3452 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3453 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3454 
3455 		qgroup_dirty(fs_info, dstgroup);
3456 		qgroup_dirty(fs_info, srcgroup);
3457 
3458 		/*
3459 		 * If the source qgroup has parent but the new one doesn't,
3460 		 * we need a full rescan.
3461 		 */
3462 		if (!inherit && !list_empty(&srcgroup->groups))
3463 			need_rescan = true;
3464 	}
3465 
3466 	if (!inherit)
3467 		goto unlock;
3468 
3469 	i_qgroups = (u64 *)(inherit + 1);
3470 	for (int i = 0; i < inherit->num_qgroups; i++) {
3471 		if (*i_qgroups) {
3472 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3473 					      *i_qgroups);
3474 			qlist_prealloc[i] = NULL;
3475 			if (ret)
3476 				goto unlock;
3477 		}
3478 		if (srcid) {
3479 			/* Check if we can do a quick inherit. */
3480 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3481 			if (ret < 0)
3482 				goto unlock;
3483 			if (ret > 0)
3484 				need_rescan = true;
3485 			ret = 0;
3486 		}
3487 		++i_qgroups;
3488 	}
3489 
3490 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3491 		struct btrfs_qgroup *src;
3492 		struct btrfs_qgroup *dst;
3493 
3494 		if (!i_qgroups[0] || !i_qgroups[1])
3495 			continue;
3496 
3497 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3498 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3499 
3500 		if (!src || !dst) {
3501 			ret = -EINVAL;
3502 			goto unlock;
3503 		}
3504 
3505 		dst->rfer = src->rfer - level_size;
3506 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3507 
3508 		/* Manually tweaking numbers certainly needs a rescan */
3509 		need_rescan = true;
3510 	}
3511 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3512 		struct btrfs_qgroup *src;
3513 		struct btrfs_qgroup *dst;
3514 
3515 		if (!i_qgroups[0] || !i_qgroups[1])
3516 			continue;
3517 
3518 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3519 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3520 
3521 		if (!src || !dst) {
3522 			ret = -EINVAL;
3523 			goto unlock;
3524 		}
3525 
3526 		dst->excl = src->excl + level_size;
3527 		dst->excl_cmpr = src->excl_cmpr + level_size;
3528 		need_rescan = true;
3529 	}
3530 
3531 unlock:
3532 	spin_unlock(&fs_info->qgroup_lock);
3533 	if (!ret)
3534 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3535 out:
3536 	if (!committing)
3537 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3538 	if (need_rescan)
3539 		qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
3540 	if (qlist_prealloc) {
3541 		for (int i = 0; i < inherit->num_qgroups; i++)
3542 			kfree(qlist_prealloc[i]);
3543 		kfree(qlist_prealloc);
3544 	}
3545 	if (free_inherit)
3546 		kfree(inherit);
3547 	kfree(prealloc);
3548 	return ret;
3549 }
3550 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3551 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3552 {
3553 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3554 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3555 		return false;
3556 
3557 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3558 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3559 		return false;
3560 
3561 	return true;
3562 }
3563 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3564 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3565 			  enum btrfs_qgroup_rsv_type type)
3566 {
3567 	struct btrfs_qgroup *qgroup;
3568 	struct btrfs_fs_info *fs_info = root->fs_info;
3569 	u64 ref_root = btrfs_root_id(root);
3570 	int ret = 0;
3571 	LIST_HEAD(qgroup_list);
3572 
3573 	if (!btrfs_is_fstree(ref_root))
3574 		return 0;
3575 
3576 	if (num_bytes == 0)
3577 		return 0;
3578 
3579 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3580 	    capable(CAP_SYS_RESOURCE))
3581 		enforce = false;
3582 
3583 	spin_lock(&fs_info->qgroup_lock);
3584 	if (!fs_info->quota_root)
3585 		goto out;
3586 
3587 	qgroup = find_qgroup_rb(fs_info, ref_root);
3588 	if (!qgroup)
3589 		goto out;
3590 
3591 	qgroup_iterator_add(&qgroup_list, qgroup);
3592 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3593 		struct btrfs_qgroup_list *glist;
3594 
3595 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3596 			ret = -EDQUOT;
3597 			goto out;
3598 		}
3599 
3600 		list_for_each_entry(glist, &qgroup->groups, next_group)
3601 			qgroup_iterator_add(&qgroup_list, glist->group);
3602 	}
3603 
3604 	ret = 0;
3605 	/*
3606 	 * no limits exceeded, now record the reservation into all qgroups
3607 	 */
3608 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3609 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3610 
3611 out:
3612 	qgroup_iterator_clean(&qgroup_list);
3613 	spin_unlock(&fs_info->qgroup_lock);
3614 	return ret;
3615 }
3616 
3617 /*
3618  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3619  * qgroup).
3620  *
3621  * Will handle all higher level qgroup too.
3622  *
3623  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3624  * This special case is only used for META_PERTRANS type.
3625  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3626 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3627 			       u64 ref_root, u64 num_bytes,
3628 			       enum btrfs_qgroup_rsv_type type)
3629 {
3630 	struct btrfs_qgroup *qgroup;
3631 	LIST_HEAD(qgroup_list);
3632 
3633 	if (!btrfs_is_fstree(ref_root))
3634 		return;
3635 
3636 	if (num_bytes == 0)
3637 		return;
3638 
3639 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3640 		WARN(1, "%s: Invalid type to free", __func__);
3641 		return;
3642 	}
3643 	spin_lock(&fs_info->qgroup_lock);
3644 
3645 	if (!fs_info->quota_root)
3646 		goto out;
3647 
3648 	qgroup = find_qgroup_rb(fs_info, ref_root);
3649 	if (!qgroup)
3650 		goto out;
3651 
3652 	if (num_bytes == (u64)-1)
3653 		/*
3654 		 * We're freeing all pertrans rsv, get reserved value from
3655 		 * level 0 qgroup as real num_bytes to free.
3656 		 */
3657 		num_bytes = qgroup->rsv.values[type];
3658 
3659 	qgroup_iterator_add(&qgroup_list, qgroup);
3660 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3661 		struct btrfs_qgroup_list *glist;
3662 
3663 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3664 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3665 			qgroup_iterator_add(&qgroup_list, glist->group);
3666 		}
3667 	}
3668 out:
3669 	qgroup_iterator_clean(&qgroup_list);
3670 	spin_unlock(&fs_info->qgroup_lock);
3671 }
3672 
3673 /*
3674  * Check if the leaf is the last leaf. Which means all node pointers
3675  * are at their last position.
3676  */
is_last_leaf(struct btrfs_path * path)3677 static bool is_last_leaf(struct btrfs_path *path)
3678 {
3679 	int i;
3680 
3681 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3682 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3683 			return false;
3684 	}
3685 	return true;
3686 }
3687 
3688 /*
3689  * returns < 0 on error, 0 when more leafs are to be scanned.
3690  * returns 1 when done.
3691  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3692 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3693 			      struct btrfs_path *path)
3694 {
3695 	struct btrfs_fs_info *fs_info = trans->fs_info;
3696 	struct btrfs_root *extent_root;
3697 	struct btrfs_key found;
3698 	struct extent_buffer *scratch_leaf = NULL;
3699 	u64 num_bytes;
3700 	bool done;
3701 	int slot;
3702 	int ret;
3703 
3704 	if (!btrfs_qgroup_full_accounting(fs_info))
3705 		return 1;
3706 
3707 	mutex_lock(&fs_info->qgroup_rescan_lock);
3708 	extent_root = btrfs_extent_root(fs_info,
3709 				fs_info->qgroup_rescan_progress.objectid);
3710 	ret = btrfs_search_slot_for_read(extent_root,
3711 					 &fs_info->qgroup_rescan_progress,
3712 					 path, 1, 0);
3713 
3714 	btrfs_debug(fs_info,
3715 		"current progress key (%llu %u %llu), search_slot ret %d",
3716 		fs_info->qgroup_rescan_progress.objectid,
3717 		fs_info->qgroup_rescan_progress.type,
3718 		fs_info->qgroup_rescan_progress.offset, ret);
3719 
3720 	if (ret) {
3721 		/*
3722 		 * The rescan is about to end, we will not be scanning any
3723 		 * further blocks. We cannot unset the RESCAN flag here, because
3724 		 * we want to commit the transaction if everything went well.
3725 		 * To make the live accounting work in this phase, we set our
3726 		 * scan progress pointer such that every real extent objectid
3727 		 * will be smaller.
3728 		 */
3729 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3730 		btrfs_release_path(path);
3731 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3732 		return ret;
3733 	}
3734 	done = is_last_leaf(path);
3735 
3736 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3737 			      btrfs_header_nritems(path->nodes[0]) - 1);
3738 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3739 
3740 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3741 	if (!scratch_leaf) {
3742 		ret = -ENOMEM;
3743 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3744 		goto out;
3745 	}
3746 	slot = path->slots[0];
3747 	btrfs_release_path(path);
3748 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3749 
3750 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3751 		struct btrfs_backref_walk_ctx ctx = { 0 };
3752 
3753 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3754 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3755 		    found.type != BTRFS_METADATA_ITEM_KEY)
3756 			continue;
3757 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3758 			num_bytes = fs_info->nodesize;
3759 		else
3760 			num_bytes = found.offset;
3761 
3762 		ctx.bytenr = found.objectid;
3763 		ctx.fs_info = fs_info;
3764 
3765 		ret = btrfs_find_all_roots(&ctx, false);
3766 		if (ret < 0)
3767 			goto out;
3768 		/* For rescan, just pass old_roots as NULL */
3769 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3770 						  num_bytes, NULL, ctx.roots);
3771 		if (ret < 0)
3772 			goto out;
3773 	}
3774 out:
3775 	if (scratch_leaf)
3776 		free_extent_buffer(scratch_leaf);
3777 
3778 	if (done && !ret) {
3779 		ret = 1;
3780 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3781 	}
3782 	return ret;
3783 }
3784 
rescan_should_stop(struct btrfs_fs_info * fs_info)3785 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3786 {
3787 	if (btrfs_fs_closing(fs_info))
3788 		return true;
3789 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3790 		return true;
3791 	if (!btrfs_qgroup_enabled(fs_info))
3792 		return true;
3793 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3794 		return true;
3795 	return false;
3796 }
3797 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3798 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3799 {
3800 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3801 						     qgroup_rescan_work);
3802 	struct btrfs_path *path;
3803 	struct btrfs_trans_handle *trans = NULL;
3804 	int ret = 0;
3805 	bool stopped = false;
3806 	bool did_leaf_rescans = false;
3807 
3808 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3809 		return;
3810 
3811 	path = btrfs_alloc_path();
3812 	if (!path) {
3813 		ret = -ENOMEM;
3814 		goto out;
3815 	}
3816 	/*
3817 	 * Rescan should only search for commit root, and any later difference
3818 	 * should be recorded by qgroup
3819 	 */
3820 	path->search_commit_root = 1;
3821 	path->skip_locking = 1;
3822 
3823 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3824 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3825 		if (IS_ERR(trans)) {
3826 			ret = PTR_ERR(trans);
3827 			break;
3828 		}
3829 
3830 		ret = qgroup_rescan_leaf(trans, path);
3831 		did_leaf_rescans = true;
3832 
3833 		if (ret > 0)
3834 			btrfs_commit_transaction(trans);
3835 		else
3836 			btrfs_end_transaction(trans);
3837 	}
3838 
3839 out:
3840 	btrfs_free_path(path);
3841 
3842 	mutex_lock(&fs_info->qgroup_rescan_lock);
3843 	if (ret > 0 &&
3844 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3845 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3846 	} else if (ret < 0 || stopped) {
3847 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3848 	}
3849 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3850 
3851 	/*
3852 	 * Only update status, since the previous part has already updated the
3853 	 * qgroup info, and only if we did any actual work. This also prevents
3854 	 * race with a concurrent quota disable, which has already set
3855 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3856 	 * btrfs_quota_disable().
3857 	 */
3858 	if (did_leaf_rescans) {
3859 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3860 		if (IS_ERR(trans)) {
3861 			ret = PTR_ERR(trans);
3862 			trans = NULL;
3863 			btrfs_err(fs_info,
3864 				  "fail to start transaction for status update: %d",
3865 				  ret);
3866 		}
3867 	} else {
3868 		trans = NULL;
3869 	}
3870 
3871 	mutex_lock(&fs_info->qgroup_rescan_lock);
3872 	if (!stopped ||
3873 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3874 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3875 	if (trans) {
3876 		int ret2 = update_qgroup_status_item(trans);
3877 
3878 		if (ret2 < 0) {
3879 			ret = ret2;
3880 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3881 		}
3882 	}
3883 	fs_info->qgroup_rescan_running = false;
3884 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3885 	complete_all(&fs_info->qgroup_rescan_completion);
3886 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3887 
3888 	if (!trans)
3889 		return;
3890 
3891 	btrfs_end_transaction(trans);
3892 
3893 	if (stopped) {
3894 		btrfs_info(fs_info, "qgroup scan paused");
3895 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3896 		btrfs_info(fs_info, "qgroup scan cancelled");
3897 	} else if (ret >= 0) {
3898 		btrfs_info(fs_info, "qgroup scan completed%s",
3899 			ret > 0 ? " (inconsistency flag cleared)" : "");
3900 	} else {
3901 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3902 	}
3903 }
3904 
3905 /*
3906  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3907  * memory required for the rescan context.
3908  */
3909 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3910 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3911 		   int init_flags)
3912 {
3913 	int ret = 0;
3914 
3915 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3916 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3917 		return -EINVAL;
3918 	}
3919 
3920 	if (!init_flags) {
3921 		/* we're resuming qgroup rescan at mount time */
3922 		if (!(fs_info->qgroup_flags &
3923 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3924 			btrfs_debug(fs_info,
3925 			"qgroup rescan init failed, qgroup rescan is not queued");
3926 			ret = -EINVAL;
3927 		} else if (!(fs_info->qgroup_flags &
3928 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3929 			btrfs_debug(fs_info,
3930 			"qgroup rescan init failed, qgroup is not enabled");
3931 			ret = -ENOTCONN;
3932 		}
3933 
3934 		if (ret)
3935 			return ret;
3936 	}
3937 
3938 	mutex_lock(&fs_info->qgroup_rescan_lock);
3939 
3940 	if (init_flags) {
3941 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3942 			ret = -EINPROGRESS;
3943 		} else if (!(fs_info->qgroup_flags &
3944 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3945 			btrfs_debug(fs_info,
3946 			"qgroup rescan init failed, qgroup is not enabled");
3947 			ret = -ENOTCONN;
3948 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3949 			/* Quota disable is in progress */
3950 			ret = -EBUSY;
3951 		}
3952 
3953 		if (ret) {
3954 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3955 			return ret;
3956 		}
3957 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3958 	}
3959 
3960 	memset(&fs_info->qgroup_rescan_progress, 0,
3961 		sizeof(fs_info->qgroup_rescan_progress));
3962 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3963 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3964 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3965 	init_completion(&fs_info->qgroup_rescan_completion);
3966 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3967 
3968 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3969 			btrfs_qgroup_rescan_worker, NULL);
3970 	return 0;
3971 }
3972 
3973 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3974 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3975 {
3976 	struct rb_node *n;
3977 	struct btrfs_qgroup *qgroup;
3978 
3979 	spin_lock(&fs_info->qgroup_lock);
3980 	/* clear all current qgroup tracking information */
3981 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3982 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3983 		qgroup->rfer = 0;
3984 		qgroup->rfer_cmpr = 0;
3985 		qgroup->excl = 0;
3986 		qgroup->excl_cmpr = 0;
3987 		qgroup_dirty(fs_info, qgroup);
3988 	}
3989 	spin_unlock(&fs_info->qgroup_lock);
3990 }
3991 
3992 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)3993 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3994 {
3995 	int ret = 0;
3996 
3997 	ret = qgroup_rescan_init(fs_info, 0, 1);
3998 	if (ret)
3999 		return ret;
4000 
4001 	/*
4002 	 * We have set the rescan_progress to 0, which means no more
4003 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4004 	 * However, btrfs_qgroup_account_ref may be right after its call
4005 	 * to btrfs_find_all_roots, in which case it would still do the
4006 	 * accounting.
4007 	 * To solve this, we're committing the transaction, which will
4008 	 * ensure we run all delayed refs and only after that, we are
4009 	 * going to clear all tracking information for a clean start.
4010 	 */
4011 
4012 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4013 	if (ret) {
4014 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4015 		return ret;
4016 	}
4017 
4018 	qgroup_rescan_zero_tracking(fs_info);
4019 
4020 	mutex_lock(&fs_info->qgroup_rescan_lock);
4021 	/*
4022 	 * The rescan worker is only for full accounting qgroups, check if it's
4023 	 * enabled as it is pointless to queue it otherwise. A concurrent quota
4024 	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
4025 	 */
4026 	if (btrfs_qgroup_full_accounting(fs_info)) {
4027 		fs_info->qgroup_rescan_running = true;
4028 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4029 				 &fs_info->qgroup_rescan_work);
4030 	} else {
4031 		ret = -ENOTCONN;
4032 	}
4033 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4034 
4035 	return ret;
4036 }
4037 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4038 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4039 				     bool interruptible)
4040 {
4041 	int running;
4042 	int ret = 0;
4043 
4044 	mutex_lock(&fs_info->qgroup_rescan_lock);
4045 	running = fs_info->qgroup_rescan_running;
4046 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4047 
4048 	if (!running)
4049 		return 0;
4050 
4051 	if (interruptible)
4052 		ret = wait_for_completion_interruptible(
4053 					&fs_info->qgroup_rescan_completion);
4054 	else
4055 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4056 
4057 	return ret;
4058 }
4059 
4060 /*
4061  * this is only called from open_ctree where we're still single threaded, thus
4062  * locking is omitted here.
4063  */
4064 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4065 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4066 {
4067 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4068 		mutex_lock(&fs_info->qgroup_rescan_lock);
4069 		fs_info->qgroup_rescan_running = true;
4070 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4071 				 &fs_info->qgroup_rescan_work);
4072 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4073 	}
4074 }
4075 
4076 #define rbtree_iterate_from_safe(node, next, start)				\
4077        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4078 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4079 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4080 				  struct extent_changeset *reserved, u64 start,
4081 				  u64 len)
4082 {
4083 	struct rb_node *node;
4084 	struct rb_node *next;
4085 	struct ulist_node *entry;
4086 	int ret = 0;
4087 
4088 	node = reserved->range_changed.root.rb_node;
4089 	if (!node)
4090 		return 0;
4091 	while (node) {
4092 		entry = rb_entry(node, struct ulist_node, rb_node);
4093 		if (entry->val < start)
4094 			node = node->rb_right;
4095 		else
4096 			node = node->rb_left;
4097 	}
4098 
4099 	if (entry->val > start && rb_prev(&entry->rb_node))
4100 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4101 				 rb_node);
4102 
4103 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4104 		u64 entry_start;
4105 		u64 entry_end;
4106 		u64 entry_len;
4107 		int clear_ret;
4108 
4109 		entry = rb_entry(node, struct ulist_node, rb_node);
4110 		entry_start = entry->val;
4111 		entry_end = entry->aux;
4112 		entry_len = entry_end - entry_start + 1;
4113 
4114 		if (entry_start >= start + len)
4115 			break;
4116 		if (entry_start + entry_len <= start)
4117 			continue;
4118 		/*
4119 		 * Now the entry is in [start, start + len), revert the
4120 		 * EXTENT_QGROUP_RESERVED bit.
4121 		 */
4122 		clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
4123 						   EXTENT_QGROUP_RESERVED, NULL);
4124 		if (!ret && clear_ret < 0)
4125 			ret = clear_ret;
4126 
4127 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4128 		if (likely(reserved->bytes_changed >= entry_len)) {
4129 			reserved->bytes_changed -= entry_len;
4130 		} else {
4131 			WARN_ON(1);
4132 			reserved->bytes_changed = 0;
4133 		}
4134 	}
4135 
4136 	return ret;
4137 }
4138 
4139 /*
4140  * Try to free some space for qgroup.
4141  *
4142  * For qgroup, there are only 3 ways to free qgroup space:
4143  * - Flush nodatacow write
4144  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4145  *   In theory, we should only flush nodatacow inodes, but it's not yet
4146  *   possible, so we need to flush the whole root.
4147  *
4148  * - Wait for ordered extents
4149  *   When ordered extents are finished, their reserved metadata is finally
4150  *   converted to per_trans status, which can be freed by later commit
4151  *   transaction.
4152  *
4153  * - Commit transaction
4154  *   This would free the meta_per_trans space.
4155  *   In theory this shouldn't provide much space, but any more qgroup space
4156  *   is needed.
4157  */
try_flush_qgroup(struct btrfs_root * root)4158 static int try_flush_qgroup(struct btrfs_root *root)
4159 {
4160 	int ret;
4161 
4162 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4163 	ASSERT(current->journal_info == NULL);
4164 	if (WARN_ON(current->journal_info))
4165 		return 0;
4166 
4167 	/*
4168 	 * We don't want to run flush again and again, so if there is a running
4169 	 * one, we won't try to start a new flush, but exit directly.
4170 	 */
4171 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4172 		wait_event(root->qgroup_flush_wait,
4173 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4174 		return 0;
4175 	}
4176 
4177 	ret = btrfs_start_delalloc_snapshot(root, true);
4178 	if (ret < 0)
4179 		goto out;
4180 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4181 
4182 	/*
4183 	 * After waiting for ordered extents run delayed iputs in order to free
4184 	 * space from unlinked files before committing the current transaction,
4185 	 * as ordered extents may have been holding the last reference of an
4186 	 * inode and they add a delayed iput when they complete.
4187 	 */
4188 	btrfs_run_delayed_iputs(root->fs_info);
4189 	btrfs_wait_on_delayed_iputs(root->fs_info);
4190 
4191 	ret = btrfs_commit_current_transaction(root);
4192 out:
4193 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4194 	wake_up(&root->qgroup_flush_wait);
4195 	return ret;
4196 }
4197 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4198 static int qgroup_reserve_data(struct btrfs_inode *inode,
4199 			struct extent_changeset **reserved_ret, u64 start,
4200 			u64 len)
4201 {
4202 	struct btrfs_root *root = inode->root;
4203 	struct extent_changeset *reserved;
4204 	bool new_reserved = false;
4205 	u64 orig_reserved;
4206 	u64 to_reserve;
4207 	int ret;
4208 
4209 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4210 	    !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
4211 		return 0;
4212 
4213 	/* @reserved parameter is mandatory for qgroup */
4214 	if (WARN_ON(!reserved_ret))
4215 		return -EINVAL;
4216 	if (!*reserved_ret) {
4217 		new_reserved = true;
4218 		*reserved_ret = extent_changeset_alloc();
4219 		if (!*reserved_ret)
4220 			return -ENOMEM;
4221 	}
4222 	reserved = *reserved_ret;
4223 	/* Record already reserved space */
4224 	orig_reserved = reserved->bytes_changed;
4225 	ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
4226 					   start + len - 1, EXTENT_QGROUP_RESERVED,
4227 					   reserved);
4228 
4229 	/* Newly reserved space */
4230 	to_reserve = reserved->bytes_changed - orig_reserved;
4231 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4232 					to_reserve, QGROUP_RESERVE);
4233 	if (ret < 0)
4234 		goto out;
4235 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4236 	if (ret < 0)
4237 		goto cleanup;
4238 
4239 	return ret;
4240 
4241 cleanup:
4242 	qgroup_unreserve_range(inode, reserved, start, len);
4243 out:
4244 	if (new_reserved) {
4245 		extent_changeset_free(reserved);
4246 		*reserved_ret = NULL;
4247 	}
4248 	return ret;
4249 }
4250 
4251 /*
4252  * Reserve qgroup space for range [start, start + len).
4253  *
4254  * This function will either reserve space from related qgroups or do nothing
4255  * if the range is already reserved.
4256  *
4257  * Return 0 for successful reservation
4258  * Return <0 for error (including -EQUOT)
4259  *
4260  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4261  *	 commit transaction. So caller should not hold any dirty page locked.
4262  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4263 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4264 			struct extent_changeset **reserved_ret, u64 start,
4265 			u64 len)
4266 {
4267 	int ret;
4268 
4269 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4270 	if (ret <= 0 && ret != -EDQUOT)
4271 		return ret;
4272 
4273 	ret = try_flush_qgroup(inode->root);
4274 	if (ret < 0)
4275 		return ret;
4276 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4277 }
4278 
4279 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4280 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4281 				     struct extent_changeset *reserved,
4282 				     u64 start, u64 len, u64 *freed_ret)
4283 {
4284 	struct btrfs_root *root = inode->root;
4285 	struct ulist_node *unode;
4286 	struct ulist_iterator uiter;
4287 	struct extent_changeset changeset;
4288 	u64 freed = 0;
4289 	int ret;
4290 
4291 	extent_changeset_init(&changeset);
4292 	len = round_up(start + len, root->fs_info->sectorsize);
4293 	start = round_down(start, root->fs_info->sectorsize);
4294 
4295 	ULIST_ITER_INIT(&uiter);
4296 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4297 		u64 range_start = unode->val;
4298 		/* unode->aux is the inclusive end */
4299 		u64 range_len = unode->aux - range_start + 1;
4300 		u64 free_start;
4301 		u64 free_len;
4302 
4303 		extent_changeset_release(&changeset);
4304 
4305 		/* Only free range in range [start, start + len) */
4306 		if (range_start >= start + len ||
4307 		    range_start + range_len <= start)
4308 			continue;
4309 		free_start = max(range_start, start);
4310 		free_len = min(start + len, range_start + range_len) -
4311 			   free_start;
4312 		/*
4313 		 * TODO: To also modify reserved->ranges_reserved to reflect
4314 		 * the modification.
4315 		 *
4316 		 * However as long as we free qgroup reserved according to
4317 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4318 		 * So not need to rush.
4319 		 */
4320 		ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
4321 						     free_start + free_len - 1,
4322 						     EXTENT_QGROUP_RESERVED,
4323 						     &changeset);
4324 		if (ret < 0)
4325 			goto out;
4326 		freed += changeset.bytes_changed;
4327 	}
4328 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4329 				  BTRFS_QGROUP_RSV_DATA);
4330 	if (freed_ret)
4331 		*freed_ret = freed;
4332 	ret = 0;
4333 out:
4334 	extent_changeset_release(&changeset);
4335 	return ret;
4336 }
4337 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4338 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4339 			struct extent_changeset *reserved, u64 start, u64 len,
4340 			u64 *released, int free)
4341 {
4342 	struct extent_changeset changeset;
4343 	int trace_op = QGROUP_RELEASE;
4344 	int ret;
4345 
4346 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4347 		return btrfs_clear_record_extent_bits(&inode->io_tree, start,
4348 						      start + len - 1,
4349 						      EXTENT_QGROUP_RESERVED, NULL);
4350 	}
4351 
4352 	/* In release case, we shouldn't have @reserved */
4353 	WARN_ON(!free && reserved);
4354 	if (free && reserved)
4355 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4356 	extent_changeset_init(&changeset);
4357 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
4358 					     EXTENT_QGROUP_RESERVED, &changeset);
4359 	if (ret < 0)
4360 		goto out;
4361 
4362 	if (free)
4363 		trace_op = QGROUP_FREE;
4364 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4365 					changeset.bytes_changed, trace_op);
4366 	if (free)
4367 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4368 				btrfs_root_id(inode->root),
4369 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4370 	if (released)
4371 		*released = changeset.bytes_changed;
4372 out:
4373 	extent_changeset_release(&changeset);
4374 	return ret;
4375 }
4376 
4377 /*
4378  * Free a reserved space range from io_tree and related qgroups
4379  *
4380  * Should be called when a range of pages get invalidated before reaching disk.
4381  * Or for error cleanup case.
4382  * if @reserved is given, only reserved range in [@start, @start + @len) will
4383  * be freed.
4384  *
4385  * For data written to disk, use btrfs_qgroup_release_data().
4386  *
4387  * NOTE: This function may sleep for memory allocation.
4388  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4389 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4390 			   struct extent_changeset *reserved,
4391 			   u64 start, u64 len, u64 *freed)
4392 {
4393 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4394 }
4395 
4396 /*
4397  * Release a reserved space range from io_tree only.
4398  *
4399  * Should be called when a range of pages get written to disk and corresponding
4400  * FILE_EXTENT is inserted into corresponding root.
4401  *
4402  * Since new qgroup accounting framework will only update qgroup numbers at
4403  * commit_transaction() time, its reserved space shouldn't be freed from
4404  * related qgroups.
4405  *
4406  * But we should release the range from io_tree, to allow further write to be
4407  * COWed.
4408  *
4409  * NOTE: This function may sleep for memory allocation.
4410  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4411 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4412 {
4413 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4414 }
4415 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4416 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4417 			      enum btrfs_qgroup_rsv_type type)
4418 {
4419 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4420 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4421 		return;
4422 	if (num_bytes == 0)
4423 		return;
4424 
4425 	spin_lock(&root->qgroup_meta_rsv_lock);
4426 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4427 		root->qgroup_meta_rsv_prealloc += num_bytes;
4428 	else
4429 		root->qgroup_meta_rsv_pertrans += num_bytes;
4430 	spin_unlock(&root->qgroup_meta_rsv_lock);
4431 }
4432 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4433 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4434 			     enum btrfs_qgroup_rsv_type type)
4435 {
4436 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4437 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4438 		return 0;
4439 	if (num_bytes == 0)
4440 		return 0;
4441 
4442 	spin_lock(&root->qgroup_meta_rsv_lock);
4443 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4444 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4445 				  num_bytes);
4446 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4447 	} else {
4448 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4449 				  num_bytes);
4450 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4451 	}
4452 	spin_unlock(&root->qgroup_meta_rsv_lock);
4453 	return num_bytes;
4454 }
4455 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4456 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4457 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4458 {
4459 	struct btrfs_fs_info *fs_info = root->fs_info;
4460 	int ret;
4461 
4462 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4463 	    !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4464 		return 0;
4465 
4466 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4467 	trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
4468 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4469 	if (ret < 0)
4470 		return ret;
4471 	/*
4472 	 * Record what we have reserved into root.
4473 	 *
4474 	 * To avoid quota disabled->enabled underflow.
4475 	 * In that case, we may try to free space we haven't reserved
4476 	 * (since quota was disabled), so record what we reserved into root.
4477 	 * And ensure later release won't underflow this number.
4478 	 */
4479 	add_root_meta_rsv(root, num_bytes, type);
4480 	return ret;
4481 }
4482 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4483 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4484 				enum btrfs_qgroup_rsv_type type, bool enforce,
4485 				bool noflush)
4486 {
4487 	int ret;
4488 
4489 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4490 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4491 		return ret;
4492 
4493 	ret = try_flush_qgroup(root);
4494 	if (ret < 0)
4495 		return ret;
4496 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4497 }
4498 
4499 /*
4500  * Per-transaction meta reservation should be all freed at transaction commit
4501  * time
4502  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4503 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4504 {
4505 	struct btrfs_fs_info *fs_info = root->fs_info;
4506 
4507 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4508 	    !btrfs_is_fstree(btrfs_root_id(root)))
4509 		return;
4510 
4511 	/* TODO: Update trace point to handle such free */
4512 	trace_btrfs_qgroup_meta_free_all_pertrans(root);
4513 	/* Special value -1 means to free all reserved space */
4514 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4515 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4516 }
4517 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4518 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4519 			      enum btrfs_qgroup_rsv_type type)
4520 {
4521 	struct btrfs_fs_info *fs_info = root->fs_info;
4522 
4523 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4524 	    !btrfs_is_fstree(btrfs_root_id(root)))
4525 		return;
4526 
4527 	/*
4528 	 * reservation for META_PREALLOC can happen before quota is enabled,
4529 	 * which can lead to underflow.
4530 	 * Here ensure we will only free what we really have reserved.
4531 	 */
4532 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4533 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4534 	trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4535 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4536 }
4537 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4538 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4539 				int num_bytes)
4540 {
4541 	struct btrfs_qgroup *qgroup;
4542 	LIST_HEAD(qgroup_list);
4543 
4544 	if (num_bytes == 0)
4545 		return;
4546 	if (!fs_info->quota_root)
4547 		return;
4548 
4549 	spin_lock(&fs_info->qgroup_lock);
4550 	qgroup = find_qgroup_rb(fs_info, ref_root);
4551 	if (!qgroup)
4552 		goto out;
4553 
4554 	qgroup_iterator_add(&qgroup_list, qgroup);
4555 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4556 		struct btrfs_qgroup_list *glist;
4557 
4558 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4559 				BTRFS_QGROUP_RSV_META_PREALLOC);
4560 		if (!sb_rdonly(fs_info->sb))
4561 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4562 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4563 
4564 		list_for_each_entry(glist, &qgroup->groups, next_group)
4565 			qgroup_iterator_add(&qgroup_list, glist->group);
4566 	}
4567 out:
4568 	qgroup_iterator_clean(&qgroup_list);
4569 	spin_unlock(&fs_info->qgroup_lock);
4570 }
4571 
4572 /*
4573  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4574  *
4575  * This is called when preallocated meta reservation needs to be used.
4576  * Normally after btrfs_join_transaction() call.
4577  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4578 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4579 {
4580 	struct btrfs_fs_info *fs_info = root->fs_info;
4581 
4582 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4583 	    !btrfs_is_fstree(btrfs_root_id(root)))
4584 		return;
4585 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4586 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4587 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4588 	trace_btrfs_qgroup_meta_convert(root, num_bytes);
4589 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4590 	if (!sb_rdonly(fs_info->sb))
4591 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4592 }
4593 
4594 /*
4595  * Check qgroup reserved space leaking, normally at destroy inode
4596  * time
4597  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4598 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4599 {
4600 	struct extent_changeset changeset;
4601 	struct ulist_node *unode;
4602 	struct ulist_iterator iter;
4603 	int ret;
4604 
4605 	extent_changeset_init(&changeset);
4606 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4607 					     EXTENT_QGROUP_RESERVED, &changeset);
4608 
4609 	WARN_ON(ret < 0);
4610 	if (WARN_ON(changeset.bytes_changed)) {
4611 		ULIST_ITER_INIT(&iter);
4612 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4613 			btrfs_warn(inode->root->fs_info,
4614 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4615 				btrfs_ino(inode), unode->val, unode->aux);
4616 		}
4617 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4618 				btrfs_root_id(inode->root),
4619 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4620 
4621 	}
4622 	extent_changeset_release(&changeset);
4623 }
4624 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4625 void btrfs_qgroup_init_swapped_blocks(
4626 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4627 {
4628 	int i;
4629 
4630 	spin_lock_init(&swapped_blocks->lock);
4631 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4632 		swapped_blocks->blocks[i] = RB_ROOT;
4633 	swapped_blocks->swapped = false;
4634 }
4635 
4636 /*
4637  * Delete all swapped blocks record of @root.
4638  * Every record here means we skipped a full subtree scan for qgroup.
4639  *
4640  * Gets called when committing one transaction.
4641  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4642 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4643 {
4644 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4645 	int i;
4646 
4647 	swapped_blocks = &root->swapped_blocks;
4648 
4649 	spin_lock(&swapped_blocks->lock);
4650 	if (!swapped_blocks->swapped)
4651 		goto out;
4652 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4653 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4654 		struct btrfs_qgroup_swapped_block *entry;
4655 		struct btrfs_qgroup_swapped_block *next;
4656 
4657 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4658 						     node)
4659 			kfree(entry);
4660 		swapped_blocks->blocks[i] = RB_ROOT;
4661 	}
4662 	swapped_blocks->swapped = false;
4663 out:
4664 	spin_unlock(&swapped_blocks->lock);
4665 }
4666 
qgroup_swapped_block_bytenr_key_cmp(const void * key,const struct rb_node * node)4667 static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
4668 {
4669 	const u64 *bytenr = key;
4670 	const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
4671 					  struct btrfs_qgroup_swapped_block, node);
4672 
4673 	if (block->subvol_bytenr < *bytenr)
4674 		return -1;
4675 	else if (block->subvol_bytenr > *bytenr)
4676 		return 1;
4677 
4678 	return 0;
4679 }
4680 
qgroup_swapped_block_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)4681 static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
4682 {
4683 	const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
4684 					      struct btrfs_qgroup_swapped_block, node);
4685 
4686 	return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
4687 }
4688 
4689 /*
4690  * Add subtree roots record into @subvol_root.
4691  *
4692  * @subvol_root:	tree root of the subvolume tree get swapped
4693  * @bg:			block group under balance
4694  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4695  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4696  *			BOTH POINTERS ARE BEFORE TREE SWAP
4697  * @last_snapshot:	last snapshot generation of the subvolume tree
4698  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4699 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
4700 		struct btrfs_block_group *bg,
4701 		struct extent_buffer *subvol_parent, int subvol_slot,
4702 		struct extent_buffer *reloc_parent, int reloc_slot,
4703 		u64 last_snapshot)
4704 {
4705 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4706 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4707 	struct btrfs_qgroup_swapped_block *block;
4708 	struct rb_node *node;
4709 	int level = btrfs_header_level(subvol_parent) - 1;
4710 	int ret = 0;
4711 
4712 	if (!btrfs_qgroup_full_accounting(fs_info))
4713 		return 0;
4714 
4715 	if (unlikely(btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4716 		     btrfs_node_ptr_generation(reloc_parent, reloc_slot))) {
4717 		btrfs_err_rl(fs_info,
4718 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4719 			__func__,
4720 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4721 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4722 		return -EUCLEAN;
4723 	}
4724 
4725 	block = kmalloc(sizeof(*block), GFP_NOFS);
4726 	if (!block) {
4727 		ret = -ENOMEM;
4728 		goto out;
4729 	}
4730 
4731 	/*
4732 	 * @reloc_parent/slot is still before swap, while @block is going to
4733 	 * record the bytenr after swap, so we do the swap here.
4734 	 */
4735 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4736 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4737 							     reloc_slot);
4738 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4739 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4740 							    subvol_slot);
4741 	block->last_snapshot = last_snapshot;
4742 	block->level = level;
4743 
4744 	/*
4745 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4746 	 * no one else can modify tree blocks thus we qgroup will not change
4747 	 * no matter the value of trace_leaf.
4748 	 */
4749 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4750 		block->trace_leaf = true;
4751 	else
4752 		block->trace_leaf = false;
4753 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4754 
4755 	/* Insert @block into @blocks */
4756 	spin_lock(&blocks->lock);
4757 	node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
4758 	if (node) {
4759 		struct btrfs_qgroup_swapped_block *entry;
4760 
4761 		entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4762 
4763 		if (entry->subvol_generation != block->subvol_generation ||
4764 		    entry->reloc_bytenr != block->reloc_bytenr ||
4765 		    entry->reloc_generation != block->reloc_generation) {
4766 			/*
4767 			 * Duplicated but mismatch entry found.  Shouldn't happen.
4768 			 * Marking qgroup inconsistent should be enough for end
4769 			 * users.
4770 			 */
4771 			DEBUG_WARN("duplicated but mismatched entry found");
4772 			ret = -EEXIST;
4773 		}
4774 		kfree(block);
4775 		goto out_unlock;
4776 	}
4777 	blocks->swapped = true;
4778 out_unlock:
4779 	spin_unlock(&blocks->lock);
4780 out:
4781 	if (ret < 0)
4782 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
4783 	return ret;
4784 }
4785 
4786 /*
4787  * Check if the tree block is a subtree root, and if so do the needed
4788  * delayed subtree trace for qgroup.
4789  *
4790  * This is called during btrfs_cow_block().
4791  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4792 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4793 					 struct btrfs_root *root,
4794 					 struct extent_buffer *subvol_eb)
4795 {
4796 	struct btrfs_fs_info *fs_info = root->fs_info;
4797 	struct btrfs_tree_parent_check check = { 0 };
4798 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4799 	struct btrfs_qgroup_swapped_block *block;
4800 	struct extent_buffer *reloc_eb = NULL;
4801 	struct rb_node *node;
4802 	bool swapped = false;
4803 	int level = btrfs_header_level(subvol_eb);
4804 	int ret = 0;
4805 	int i;
4806 
4807 	if (!btrfs_qgroup_full_accounting(fs_info))
4808 		return 0;
4809 	if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4810 		return 0;
4811 
4812 	spin_lock(&blocks->lock);
4813 	if (!blocks->swapped) {
4814 		spin_unlock(&blocks->lock);
4815 		return 0;
4816 	}
4817 	node = rb_find(&subvol_eb->start, &blocks->blocks[level],
4818 			qgroup_swapped_block_bytenr_key_cmp);
4819 	if (!node) {
4820 		spin_unlock(&blocks->lock);
4821 		goto out;
4822 	}
4823 	block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4824 
4825 	/* Found one, remove it from @blocks first and update blocks->swapped */
4826 	rb_erase(&block->node, &blocks->blocks[level]);
4827 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4828 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4829 			swapped = true;
4830 			break;
4831 		}
4832 	}
4833 	blocks->swapped = swapped;
4834 	spin_unlock(&blocks->lock);
4835 
4836 	check.level = block->level;
4837 	check.transid = block->reloc_generation;
4838 	check.has_first_key = true;
4839 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4840 
4841 	/* Read out reloc subtree root */
4842 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4843 	if (IS_ERR(reloc_eb)) {
4844 		ret = PTR_ERR(reloc_eb);
4845 		reloc_eb = NULL;
4846 		goto free_out;
4847 	}
4848 	if (unlikely(!extent_buffer_uptodate(reloc_eb))) {
4849 		ret = -EIO;
4850 		goto free_out;
4851 	}
4852 
4853 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4854 			block->last_snapshot, block->trace_leaf);
4855 free_out:
4856 	kfree(block);
4857 	free_extent_buffer(reloc_eb);
4858 out:
4859 	if (ret < 0) {
4860 		qgroup_mark_inconsistent(fs_info,
4861 				"failed to account subtree at bytenr %llu: %d",
4862 				subvol_eb->start, ret);
4863 	}
4864 	return ret;
4865 }
4866 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4867 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4868 {
4869 	struct btrfs_qgroup_extent_record *entry;
4870 	unsigned long index;
4871 
4872 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4873 		ulist_free(entry->old_roots);
4874 		kfree(entry);
4875 	}
4876 	xa_destroy(&trans->delayed_refs.dirty_extents);
4877 }
4878 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4879 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4880 			      const struct btrfs_squota_delta *delta)
4881 {
4882 	int ret;
4883 	struct btrfs_qgroup *qgroup;
4884 	struct btrfs_qgroup *qg;
4885 	LIST_HEAD(qgroup_list);
4886 	u64 root = delta->root;
4887 	u64 num_bytes = delta->num_bytes;
4888 	const int sign = (delta->is_inc ? 1 : -1);
4889 
4890 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4891 		return 0;
4892 
4893 	if (!btrfs_is_fstree(root))
4894 		return 0;
4895 
4896 	/* If the extent predates enabling quotas, don't count it. */
4897 	if (delta->generation < fs_info->qgroup_enable_gen)
4898 		return 0;
4899 
4900 	spin_lock(&fs_info->qgroup_lock);
4901 	qgroup = find_qgroup_rb(fs_info, root);
4902 	if (!qgroup) {
4903 		ret = -ENOENT;
4904 		goto out;
4905 	}
4906 
4907 	ret = 0;
4908 	qgroup_iterator_add(&qgroup_list, qgroup);
4909 	list_for_each_entry(qg, &qgroup_list, iterator) {
4910 		struct btrfs_qgroup_list *glist;
4911 
4912 		qg->excl += num_bytes * sign;
4913 		qg->rfer += num_bytes * sign;
4914 		qgroup_dirty(fs_info, qg);
4915 
4916 		list_for_each_entry(glist, &qg->groups, next_group)
4917 			qgroup_iterator_add(&qgroup_list, glist->group);
4918 	}
4919 	qgroup_iterator_clean(&qgroup_list);
4920 
4921 out:
4922 	spin_unlock(&fs_info->qgroup_lock);
4923 	return ret;
4924 }
4925