xref: /linux/fs/btrfs/qgroup.c (revision f92b71ffca8c7e45e194aecc85e31bd11582f4d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
btrfs_qgroup_qgroupid_key_cmp(const void * key,const struct rb_node * node)163 static int btrfs_qgroup_qgroupid_key_cmp(const void *key, const struct rb_node *node)
164 {
165 	const u64 *qgroupid = key;
166 	const struct btrfs_qgroup *qgroup = rb_entry(node, struct btrfs_qgroup, node);
167 
168 	if (qgroup->qgroupid < *qgroupid)
169 		return -1;
170 	else if (qgroup->qgroupid > *qgroupid)
171 		return 1;
172 
173 	return 0;
174 }
175 
176 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)177 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
178 					   u64 qgroupid)
179 {
180 	struct rb_node *node;
181 
182 	node = rb_find(&qgroupid, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_key_cmp);
183 	return rb_entry_safe(node, struct btrfs_qgroup, node);
184 }
185 
btrfs_qgroup_qgroupid_cmp(struct rb_node * new,const struct rb_node * existing)186 static int btrfs_qgroup_qgroupid_cmp(struct rb_node *new, const struct rb_node *existing)
187 {
188 	const struct btrfs_qgroup *new_qgroup = rb_entry(new, struct btrfs_qgroup, node);
189 
190 	return btrfs_qgroup_qgroupid_key_cmp(&new_qgroup->qgroupid, existing);
191 }
192 
193 /*
194  * Add qgroup to the filesystem's qgroup tree.
195  *
196  * Must be called with qgroup_lock held and @prealloc preallocated.
197  *
198  * The control on the lifespan of @prealloc would be transferred to this
199  * function, thus caller should no longer touch @prealloc.
200  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)201 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
202 					  struct btrfs_qgroup *prealloc,
203 					  u64 qgroupid)
204 {
205 	struct rb_node *node;
206 
207 	/* Caller must have pre-allocated @prealloc. */
208 	ASSERT(prealloc);
209 
210 	prealloc->qgroupid = qgroupid;
211 	node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp);
212 	if (node) {
213 		kfree(prealloc);
214 		return rb_entry(node, struct btrfs_qgroup, node);
215 	}
216 
217 	INIT_LIST_HEAD(&prealloc->groups);
218 	INIT_LIST_HEAD(&prealloc->members);
219 	INIT_LIST_HEAD(&prealloc->dirty);
220 	INIT_LIST_HEAD(&prealloc->iterator);
221 	INIT_LIST_HEAD(&prealloc->nested_iterator);
222 
223 	return prealloc;
224 }
225 
__del_qgroup_rb(struct btrfs_qgroup * qgroup)226 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
227 {
228 	struct btrfs_qgroup_list *list;
229 
230 	list_del(&qgroup->dirty);
231 	while (!list_empty(&qgroup->groups)) {
232 		list = list_first_entry(&qgroup->groups,
233 					struct btrfs_qgroup_list, next_group);
234 		list_del(&list->next_group);
235 		list_del(&list->next_member);
236 		kfree(list);
237 	}
238 
239 	while (!list_empty(&qgroup->members)) {
240 		list = list_first_entry(&qgroup->members,
241 					struct btrfs_qgroup_list, next_member);
242 		list_del(&list->next_group);
243 		list_del(&list->next_member);
244 		kfree(list);
245 	}
246 }
247 
248 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)249 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
250 {
251 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
252 
253 	if (!qgroup)
254 		return -ENOENT;
255 
256 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
257 	__del_qgroup_rb(qgroup);
258 	return 0;
259 }
260 
261 /*
262  * Add relation specified by two qgroups.
263  *
264  * Must be called with qgroup_lock held, the ownership of @prealloc is
265  * transferred to this function and caller should not touch it anymore.
266  *
267  * Return: 0        on success
268  *         -ENOENT  if one of the qgroups is NULL
269  *         <0       other errors
270  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)271 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
272 			     struct btrfs_qgroup *member,
273 			     struct btrfs_qgroup *parent)
274 {
275 	if (!member || !parent) {
276 		kfree(prealloc);
277 		return -ENOENT;
278 	}
279 
280 	prealloc->group = parent;
281 	prealloc->member = member;
282 	list_add_tail(&prealloc->next_group, &member->groups);
283 	list_add_tail(&prealloc->next_member, &parent->members);
284 
285 	return 0;
286 }
287 
288 /*
289  * Add relation specified by two qgroup ids.
290  *
291  * Must be called with qgroup_lock held.
292  *
293  * Return: 0        on success
294  *         -ENOENT  if one of the ids does not exist
295  *         <0       other errors
296  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)297 static int add_relation_rb(struct btrfs_fs_info *fs_info,
298 			   struct btrfs_qgroup_list *prealloc,
299 			   u64 memberid, u64 parentid)
300 {
301 	struct btrfs_qgroup *member;
302 	struct btrfs_qgroup *parent;
303 
304 	member = find_qgroup_rb(fs_info, memberid);
305 	parent = find_qgroup_rb(fs_info, parentid);
306 
307 	return __add_relation_rb(prealloc, member, parent);
308 }
309 
310 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)311 static int del_relation_rb(struct btrfs_fs_info *fs_info,
312 			   u64 memberid, u64 parentid)
313 {
314 	struct btrfs_qgroup *member;
315 	struct btrfs_qgroup *parent;
316 	struct btrfs_qgroup_list *list;
317 
318 	member = find_qgroup_rb(fs_info, memberid);
319 	parent = find_qgroup_rb(fs_info, parentid);
320 	if (!member || !parent)
321 		return -ENOENT;
322 
323 	list_for_each_entry(list, &member->groups, next_group) {
324 		if (list->group == parent) {
325 			list_del(&list->next_group);
326 			list_del(&list->next_member);
327 			kfree(list);
328 			return 0;
329 		}
330 	}
331 	return -ENOENT;
332 }
333 
334 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)335 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
336 			       u64 rfer, u64 excl)
337 {
338 	struct btrfs_qgroup *qgroup;
339 
340 	qgroup = find_qgroup_rb(fs_info, qgroupid);
341 	if (!qgroup)
342 		return -EINVAL;
343 	if (qgroup->rfer != rfer || qgroup->excl != excl)
344 		return -EINVAL;
345 	return 0;
346 }
347 #endif
348 
349 __printf(2, 3)
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info,const char * fmt,...)350 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info, const char *fmt, ...)
351 {
352 	const u64 old_flags = fs_info->qgroup_flags;
353 
354 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
355 		return;
356 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
357 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
359 	if (!(old_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
360 		struct va_format vaf;
361 		va_list args;
362 
363 		va_start(args, fmt);
364 		vaf.fmt = fmt;
365 		vaf.va = &args;
366 
367 		btrfs_warn_rl(fs_info, "qgroup marked inconsistent, %pV", &vaf);
368 		va_end(args);
369 	}
370 }
371 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)372 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
373 				   struct extent_buffer *leaf, int slot,
374 				   struct btrfs_qgroup_status_item *ptr)
375 {
376 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
377 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
378 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
379 }
380 
381 /*
382  * The full config is read in one go, only called from open_ctree()
383  * It doesn't use any locking, as at this point we're still single-threaded
384  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)385 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
386 {
387 	struct btrfs_key key;
388 	struct btrfs_key found_key;
389 	struct btrfs_root *quota_root = fs_info->quota_root;
390 	struct btrfs_path *path = NULL;
391 	struct extent_buffer *l;
392 	int slot;
393 	int ret = 0;
394 	u64 flags = 0;
395 	u64 rescan_progress = 0;
396 
397 	if (!fs_info->quota_root)
398 		return 0;
399 
400 	path = btrfs_alloc_path();
401 	if (!path) {
402 		ret = -ENOMEM;
403 		goto out;
404 	}
405 
406 	ret = btrfs_sysfs_add_qgroups(fs_info);
407 	if (ret < 0)
408 		goto out;
409 	/* default this to quota off, in case no status key is found */
410 	fs_info->qgroup_flags = 0;
411 
412 	/*
413 	 * pass 1: read status, all qgroup infos and limits
414 	 */
415 	key.objectid = 0;
416 	key.type = 0;
417 	key.offset = 0;
418 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
419 	if (ret)
420 		goto out;
421 
422 	while (1) {
423 		struct btrfs_qgroup *qgroup;
424 
425 		slot = path->slots[0];
426 		l = path->nodes[0];
427 		btrfs_item_key_to_cpu(l, &found_key, slot);
428 
429 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
430 			struct btrfs_qgroup_status_item *ptr;
431 
432 			ptr = btrfs_item_ptr(l, slot,
433 					     struct btrfs_qgroup_status_item);
434 
435 			if (btrfs_qgroup_status_version(l, ptr) !=
436 			    BTRFS_QGROUP_STATUS_VERSION) {
437 				btrfs_err(fs_info,
438 				 "old qgroup version, quota disabled");
439 				goto out;
440 			}
441 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
442 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
443 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
444 			else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation)
445 				qgroup_mark_inconsistent(fs_info, "qgroup generation mismatch");
446 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
447 			goto next1;
448 		}
449 
450 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
451 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
452 			goto next1;
453 
454 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
455 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
456 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY))
457 			qgroup_mark_inconsistent(fs_info, "inconsistent qgroup config");
458 		if (!qgroup) {
459 			struct btrfs_qgroup *prealloc;
460 			struct btrfs_root *tree_root = fs_info->tree_root;
461 
462 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
463 			if (!prealloc) {
464 				ret = -ENOMEM;
465 				goto out;
466 			}
467 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
468 			/*
469 			 * If a qgroup exists for a subvolume ID, it is possible
470 			 * that subvolume has been deleted, in which case
471 			 * reusing that ID would lead to incorrect accounting.
472 			 *
473 			 * Ensure that we skip any such subvol ids.
474 			 *
475 			 * We don't need to lock because this is only called
476 			 * during mount before we start doing things like creating
477 			 * subvolumes.
478 			 */
479 			if (btrfs_is_fstree(qgroup->qgroupid) &&
480 			    qgroup->qgroupid > tree_root->free_objectid)
481 				/*
482 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
483 				 * as it will get checked on the next call to
484 				 * btrfs_get_free_objectid.
485 				 */
486 				tree_root->free_objectid = qgroup->qgroupid + 1;
487 		}
488 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
489 		if (ret < 0)
490 			goto out;
491 
492 		switch (found_key.type) {
493 		case BTRFS_QGROUP_INFO_KEY: {
494 			struct btrfs_qgroup_info_item *ptr;
495 
496 			ptr = btrfs_item_ptr(l, slot,
497 					     struct btrfs_qgroup_info_item);
498 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
499 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
500 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
501 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
502 			/* generation currently unused */
503 			break;
504 		}
505 		case BTRFS_QGROUP_LIMIT_KEY: {
506 			struct btrfs_qgroup_limit_item *ptr;
507 
508 			ptr = btrfs_item_ptr(l, slot,
509 					     struct btrfs_qgroup_limit_item);
510 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
511 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
512 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
513 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
514 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
515 			break;
516 		}
517 		}
518 next1:
519 		ret = btrfs_next_item(quota_root, path);
520 		if (ret < 0)
521 			goto out;
522 		if (ret)
523 			break;
524 	}
525 	btrfs_release_path(path);
526 
527 	/*
528 	 * pass 2: read all qgroup relations
529 	 */
530 	key.objectid = 0;
531 	key.type = BTRFS_QGROUP_RELATION_KEY;
532 	key.offset = 0;
533 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
534 	if (ret)
535 		goto out;
536 	while (1) {
537 		struct btrfs_qgroup_list *list = NULL;
538 
539 		slot = path->slots[0];
540 		l = path->nodes[0];
541 		btrfs_item_key_to_cpu(l, &found_key, slot);
542 
543 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
544 			goto next2;
545 
546 		if (found_key.objectid > found_key.offset) {
547 			/* parent <- member, not needed to build config */
548 			/* FIXME should we omit the key completely? */
549 			goto next2;
550 		}
551 
552 		list = kzalloc(sizeof(*list), GFP_KERNEL);
553 		if (!list) {
554 			ret = -ENOMEM;
555 			goto out;
556 		}
557 		ret = add_relation_rb(fs_info, list, found_key.objectid,
558 				      found_key.offset);
559 		list = NULL;
560 		if (ret == -ENOENT) {
561 			btrfs_warn(fs_info,
562 				"orphan qgroup relation 0x%llx->0x%llx",
563 				found_key.objectid, found_key.offset);
564 			ret = 0;	/* ignore the error */
565 		}
566 		if (ret)
567 			goto out;
568 next2:
569 		ret = btrfs_next_item(quota_root, path);
570 		if (ret < 0)
571 			goto out;
572 		if (ret)
573 			break;
574 	}
575 out:
576 	btrfs_free_path(path);
577 	fs_info->qgroup_flags |= flags;
578 	if (ret >= 0) {
579 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
580 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
581 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
582 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
583 	} else {
584 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
585 		btrfs_sysfs_del_qgroups(fs_info);
586 	}
587 
588 	return ret < 0 ? ret : 0;
589 }
590 
591 /*
592  * Called in close_ctree() when quota is still enabled.  This verifies we don't
593  * leak some reserved space.
594  *
595  * Return false if no reserved space is left.
596  * Return true if some reserved space is leaked.
597  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)598 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
599 {
600 	struct rb_node *node;
601 	bool ret = false;
602 
603 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
604 		return ret;
605 	/*
606 	 * Since we're unmounting, there is no race and no need to grab qgroup
607 	 * lock.  And here we don't go post-order to provide a more user
608 	 * friendly sorted result.
609 	 */
610 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
611 		struct btrfs_qgroup *qgroup;
612 		int i;
613 
614 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
615 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
616 			if (qgroup->rsv.values[i]) {
617 				ret = true;
618 				btrfs_warn(fs_info,
619 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
620 				   btrfs_qgroup_level(qgroup->qgroupid),
621 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
622 				   i, qgroup->rsv.values[i]);
623 			}
624 		}
625 	}
626 	return ret;
627 }
628 
629 /*
630  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
631  * first two are in single-threaded paths.
632  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)633 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
634 {
635 	struct rb_node *n;
636 	struct btrfs_qgroup *qgroup;
637 
638 	/*
639 	 * btrfs_quota_disable() can be called concurrently with
640 	 * btrfs_qgroup_rescan() -> qgroup_rescan_zero_tracking(), so take the
641 	 * lock.
642 	 */
643 	spin_lock(&fs_info->qgroup_lock);
644 	while ((n = rb_first(&fs_info->qgroup_tree))) {
645 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
646 		rb_erase(n, &fs_info->qgroup_tree);
647 		__del_qgroup_rb(qgroup);
648 		spin_unlock(&fs_info->qgroup_lock);
649 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
650 		kfree(qgroup);
651 		spin_lock(&fs_info->qgroup_lock);
652 	}
653 	spin_unlock(&fs_info->qgroup_lock);
654 
655 	btrfs_sysfs_del_qgroups(fs_info);
656 }
657 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)658 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
659 				    u64 dst)
660 {
661 	int ret;
662 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
663 	struct btrfs_path *path;
664 	struct btrfs_key key;
665 
666 	path = btrfs_alloc_path();
667 	if (!path)
668 		return -ENOMEM;
669 
670 	key.objectid = src;
671 	key.type = BTRFS_QGROUP_RELATION_KEY;
672 	key.offset = dst;
673 
674 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
675 	btrfs_free_path(path);
676 	return ret;
677 }
678 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)679 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
680 				    u64 dst)
681 {
682 	int ret;
683 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
684 	struct btrfs_path *path;
685 	struct btrfs_key key;
686 
687 	path = btrfs_alloc_path();
688 	if (!path)
689 		return -ENOMEM;
690 
691 	key.objectid = src;
692 	key.type = BTRFS_QGROUP_RELATION_KEY;
693 	key.offset = dst;
694 
695 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
696 	if (ret < 0)
697 		goto out;
698 
699 	if (ret > 0) {
700 		ret = -ENOENT;
701 		goto out;
702 	}
703 
704 	ret = btrfs_del_item(trans, quota_root, path);
705 out:
706 	btrfs_free_path(path);
707 	return ret;
708 }
709 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)710 static int add_qgroup_item(struct btrfs_trans_handle *trans,
711 			   struct btrfs_root *quota_root, u64 qgroupid)
712 {
713 	int ret;
714 	struct btrfs_path *path;
715 	struct btrfs_qgroup_info_item *qgroup_info;
716 	struct btrfs_qgroup_limit_item *qgroup_limit;
717 	struct extent_buffer *leaf;
718 	struct btrfs_key key;
719 
720 	if (btrfs_is_testing(quota_root->fs_info))
721 		return 0;
722 
723 	path = btrfs_alloc_path();
724 	if (!path)
725 		return -ENOMEM;
726 
727 	key.objectid = 0;
728 	key.type = BTRFS_QGROUP_INFO_KEY;
729 	key.offset = qgroupid;
730 
731 	/*
732 	 * Avoid a transaction abort by catching -EEXIST here. In that
733 	 * case, we proceed by re-initializing the existing structure
734 	 * on disk.
735 	 */
736 
737 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
738 				      sizeof(*qgroup_info));
739 	if (ret && ret != -EEXIST)
740 		goto out;
741 
742 	leaf = path->nodes[0];
743 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
744 				 struct btrfs_qgroup_info_item);
745 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
746 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
747 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
748 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
749 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
750 
751 	btrfs_release_path(path);
752 
753 	key.type = BTRFS_QGROUP_LIMIT_KEY;
754 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
755 				      sizeof(*qgroup_limit));
756 	if (ret && ret != -EEXIST)
757 		goto out;
758 
759 	leaf = path->nodes[0];
760 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
761 				  struct btrfs_qgroup_limit_item);
762 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
763 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
764 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
765 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
766 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
767 
768 	ret = 0;
769 out:
770 	btrfs_free_path(path);
771 	return ret;
772 }
773 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)774 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
775 {
776 	int ret;
777 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
778 	struct btrfs_path *path;
779 	struct btrfs_key key;
780 
781 	path = btrfs_alloc_path();
782 	if (!path)
783 		return -ENOMEM;
784 
785 	key.objectid = 0;
786 	key.type = BTRFS_QGROUP_INFO_KEY;
787 	key.offset = qgroupid;
788 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
789 	if (ret < 0)
790 		goto out;
791 
792 	if (ret > 0) {
793 		ret = -ENOENT;
794 		goto out;
795 	}
796 
797 	ret = btrfs_del_item(trans, quota_root, path);
798 	if (ret)
799 		goto out;
800 
801 	btrfs_release_path(path);
802 
803 	key.type = BTRFS_QGROUP_LIMIT_KEY;
804 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
805 	if (ret < 0)
806 		goto out;
807 
808 	if (ret > 0) {
809 		ret = -ENOENT;
810 		goto out;
811 	}
812 
813 	ret = btrfs_del_item(trans, quota_root, path);
814 
815 out:
816 	btrfs_free_path(path);
817 	return ret;
818 }
819 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)820 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
821 				    struct btrfs_qgroup *qgroup)
822 {
823 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
824 	struct btrfs_path *path;
825 	struct btrfs_key key;
826 	struct extent_buffer *l;
827 	struct btrfs_qgroup_limit_item *qgroup_limit;
828 	int ret;
829 	int slot;
830 
831 	key.objectid = 0;
832 	key.type = BTRFS_QGROUP_LIMIT_KEY;
833 	key.offset = qgroup->qgroupid;
834 
835 	path = btrfs_alloc_path();
836 	if (!path)
837 		return -ENOMEM;
838 
839 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
840 	if (ret > 0)
841 		ret = -ENOENT;
842 
843 	if (ret)
844 		goto out;
845 
846 	l = path->nodes[0];
847 	slot = path->slots[0];
848 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
849 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
850 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
851 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
852 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
853 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
854 out:
855 	btrfs_free_path(path);
856 	return ret;
857 }
858 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)859 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
860 				   struct btrfs_qgroup *qgroup)
861 {
862 	struct btrfs_fs_info *fs_info = trans->fs_info;
863 	struct btrfs_root *quota_root = fs_info->quota_root;
864 	struct btrfs_path *path;
865 	struct btrfs_key key;
866 	struct extent_buffer *l;
867 	struct btrfs_qgroup_info_item *qgroup_info;
868 	int ret;
869 	int slot;
870 
871 	if (btrfs_is_testing(fs_info))
872 		return 0;
873 
874 	key.objectid = 0;
875 	key.type = BTRFS_QGROUP_INFO_KEY;
876 	key.offset = qgroup->qgroupid;
877 
878 	path = btrfs_alloc_path();
879 	if (!path)
880 		return -ENOMEM;
881 
882 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
883 	if (ret > 0)
884 		ret = -ENOENT;
885 
886 	if (ret)
887 		goto out;
888 
889 	l = path->nodes[0];
890 	slot = path->slots[0];
891 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
892 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
893 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
894 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
895 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
896 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
897 out:
898 	btrfs_free_path(path);
899 	return ret;
900 }
901 
update_qgroup_status_item(struct btrfs_trans_handle * trans)902 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
903 {
904 	struct btrfs_fs_info *fs_info = trans->fs_info;
905 	struct btrfs_root *quota_root = fs_info->quota_root;
906 	struct btrfs_path *path;
907 	struct btrfs_key key;
908 	struct extent_buffer *l;
909 	struct btrfs_qgroup_status_item *ptr;
910 	int ret;
911 	int slot;
912 
913 	key.objectid = 0;
914 	key.type = BTRFS_QGROUP_STATUS_KEY;
915 	key.offset = 0;
916 
917 	path = btrfs_alloc_path();
918 	if (!path)
919 		return -ENOMEM;
920 
921 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
922 	if (ret > 0)
923 		ret = -ENOENT;
924 
925 	if (ret)
926 		goto out;
927 
928 	l = path->nodes[0];
929 	slot = path->slots[0];
930 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
931 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
932 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
933 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
934 	btrfs_set_qgroup_status_rescan(l, ptr,
935 				fs_info->qgroup_rescan_progress.objectid);
936 out:
937 	btrfs_free_path(path);
938 	return ret;
939 }
940 
941 /*
942  * called with qgroup_lock held
943  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)944 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
945 				  struct btrfs_root *root)
946 {
947 	struct btrfs_path *path;
948 	struct btrfs_key key;
949 	struct extent_buffer *leaf = NULL;
950 	int ret;
951 	int nr = 0;
952 
953 	path = btrfs_alloc_path();
954 	if (!path)
955 		return -ENOMEM;
956 
957 	key.objectid = 0;
958 	key.type = 0;
959 	key.offset = 0;
960 
961 	while (1) {
962 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
963 		if (ret < 0)
964 			goto out;
965 		leaf = path->nodes[0];
966 		nr = btrfs_header_nritems(leaf);
967 		if (!nr)
968 			break;
969 		/*
970 		 * delete the leaf one by one
971 		 * since the whole tree is going
972 		 * to be deleted.
973 		 */
974 		path->slots[0] = 0;
975 		ret = btrfs_del_items(trans, root, path, 0, nr);
976 		if (ret)
977 			goto out;
978 
979 		btrfs_release_path(path);
980 	}
981 	ret = 0;
982 out:
983 	btrfs_free_path(path);
984 	return ret;
985 }
986 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)987 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
988 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
989 {
990 	struct btrfs_root *quota_root;
991 	struct btrfs_root *tree_root = fs_info->tree_root;
992 	struct btrfs_path *path = NULL;
993 	struct btrfs_qgroup_status_item *ptr;
994 	struct extent_buffer *leaf;
995 	struct btrfs_key key;
996 	struct btrfs_key found_key;
997 	struct btrfs_qgroup *qgroup = NULL;
998 	struct btrfs_qgroup *prealloc = NULL;
999 	struct btrfs_trans_handle *trans = NULL;
1000 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1001 	int ret = 0;
1002 	int slot;
1003 
1004 	/*
1005 	 * We need to have subvol_sem write locked, to prevent races between
1006 	 * concurrent tasks trying to enable quotas, because we will unlock
1007 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1008 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1009 	 */
1010 	lockdep_assert_held_write(&fs_info->subvol_sem);
1011 
1012 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1013 		btrfs_err(fs_info,
1014 			  "qgroups are currently unsupported in extent tree v2");
1015 		return -EINVAL;
1016 	}
1017 
1018 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1019 	if (fs_info->quota_root)
1020 		goto out;
1021 
1022 	ret = btrfs_sysfs_add_qgroups(fs_info);
1023 	if (ret < 0)
1024 		goto out;
1025 
1026 	/*
1027 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1028 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1029 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1030 	 * start a transaction.
1031 	 * After we started the transaction lock qgroup_ioctl_lock again and
1032 	 * check if someone else created the quota root in the meanwhile. If so,
1033 	 * just return success and release the transaction handle.
1034 	 *
1035 	 * Also we don't need to worry about someone else calling
1036 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1037 	 * that function returns 0 (success) when the sysfs entries already exist.
1038 	 */
1039 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1040 
1041 	/*
1042 	 * 1 for quota root item
1043 	 * 1 for BTRFS_QGROUP_STATUS item
1044 	 *
1045 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1046 	 * per subvolume. However those are not currently reserved since it
1047 	 * would be a lot of overkill.
1048 	 */
1049 	trans = btrfs_start_transaction(tree_root, 2);
1050 
1051 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1052 	if (IS_ERR(trans)) {
1053 		ret = PTR_ERR(trans);
1054 		trans = NULL;
1055 		goto out;
1056 	}
1057 
1058 	if (fs_info->quota_root)
1059 		goto out;
1060 
1061 	/*
1062 	 * initially create the quota tree
1063 	 */
1064 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1065 	if (IS_ERR(quota_root)) {
1066 		ret =  PTR_ERR(quota_root);
1067 		btrfs_abort_transaction(trans, ret);
1068 		goto out;
1069 	}
1070 
1071 	path = btrfs_alloc_path();
1072 	if (!path) {
1073 		ret = -ENOMEM;
1074 		btrfs_abort_transaction(trans, ret);
1075 		goto out_free_root;
1076 	}
1077 
1078 	key.objectid = 0;
1079 	key.type = BTRFS_QGROUP_STATUS_KEY;
1080 	key.offset = 0;
1081 
1082 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1083 				      sizeof(*ptr));
1084 	if (ret) {
1085 		btrfs_abort_transaction(trans, ret);
1086 		goto out_free_path;
1087 	}
1088 
1089 	leaf = path->nodes[0];
1090 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1091 				 struct btrfs_qgroup_status_item);
1092 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1093 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1094 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1095 	if (simple) {
1096 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1097 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1098 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1099 	} else {
1100 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1101 	}
1102 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1103 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1104 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1105 
1106 	key.objectid = 0;
1107 	key.type = BTRFS_ROOT_REF_KEY;
1108 	key.offset = 0;
1109 
1110 	btrfs_release_path(path);
1111 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1112 	if (ret > 0)
1113 		goto out_add_root;
1114 	if (ret < 0) {
1115 		btrfs_abort_transaction(trans, ret);
1116 		goto out_free_path;
1117 	}
1118 
1119 	while (1) {
1120 		slot = path->slots[0];
1121 		leaf = path->nodes[0];
1122 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1123 
1124 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1125 
1126 			/* Release locks on tree_root before we access quota_root */
1127 			btrfs_release_path(path);
1128 
1129 			/* We should not have a stray @prealloc pointer. */
1130 			ASSERT(prealloc == NULL);
1131 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1132 			if (!prealloc) {
1133 				ret = -ENOMEM;
1134 				btrfs_abort_transaction(trans, ret);
1135 				goto out_free_path;
1136 			}
1137 
1138 			ret = add_qgroup_item(trans, quota_root,
1139 					      found_key.offset);
1140 			if (ret) {
1141 				btrfs_abort_transaction(trans, ret);
1142 				goto out_free_path;
1143 			}
1144 
1145 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1146 			prealloc = NULL;
1147 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1148 			if (ret < 0) {
1149 				btrfs_abort_transaction(trans, ret);
1150 				goto out_free_path;
1151 			}
1152 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1153 							 path, 1, 0);
1154 			if (ret < 0) {
1155 				btrfs_abort_transaction(trans, ret);
1156 				goto out_free_path;
1157 			}
1158 			if (ret > 0) {
1159 				/*
1160 				 * Shouldn't happen, but in case it does we
1161 				 * don't need to do the btrfs_next_item, just
1162 				 * continue.
1163 				 */
1164 				continue;
1165 			}
1166 		}
1167 		ret = btrfs_next_item(tree_root, path);
1168 		if (ret < 0) {
1169 			btrfs_abort_transaction(trans, ret);
1170 			goto out_free_path;
1171 		}
1172 		if (ret)
1173 			break;
1174 	}
1175 
1176 out_add_root:
1177 	btrfs_release_path(path);
1178 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1179 	if (ret) {
1180 		btrfs_abort_transaction(trans, ret);
1181 		goto out_free_path;
1182 	}
1183 
1184 	ASSERT(prealloc == NULL);
1185 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1186 	if (!prealloc) {
1187 		ret = -ENOMEM;
1188 		goto out_free_path;
1189 	}
1190 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1191 	prealloc = NULL;
1192 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1193 	if (ret < 0) {
1194 		btrfs_abort_transaction(trans, ret);
1195 		goto out_free_path;
1196 	}
1197 
1198 	fs_info->qgroup_enable_gen = trans->transid;
1199 
1200 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1201 	/*
1202 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1203 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1204 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1205 	 * because all qgroup operations first start or join a transaction and then
1206 	 * lock the qgroup_ioctl_lock mutex.
1207 	 * We are safe from a concurrent task trying to enable quotas, by calling
1208 	 * this function, since we are serialized by fs_info->subvol_sem.
1209 	 */
1210 	ret = btrfs_commit_transaction(trans);
1211 	trans = NULL;
1212 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1213 	if (ret)
1214 		goto out_free_path;
1215 
1216 	/*
1217 	 * Set quota enabled flag after committing the transaction, to avoid
1218 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1219 	 * creation.
1220 	 */
1221 	spin_lock(&fs_info->qgroup_lock);
1222 	fs_info->quota_root = quota_root;
1223 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1224 	spin_unlock(&fs_info->qgroup_lock);
1225 
1226 	/* Skip rescan for simple qgroups. */
1227 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1228 		goto out_free_path;
1229 
1230 	ret = qgroup_rescan_init(fs_info, 0, 1);
1231 	if (!ret) {
1232 	        qgroup_rescan_zero_tracking(fs_info);
1233 		fs_info->qgroup_rescan_running = true;
1234 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1235 	                         &fs_info->qgroup_rescan_work);
1236 	} else {
1237 		/*
1238 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1239 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1240 		 * -EINPROGRESS. That can happen because someone started the
1241 		 * rescan worker by calling quota rescan ioctl before we
1242 		 * attempted to initialize the rescan worker. Failure due to
1243 		 * quotas disabled in the meanwhile is not possible, because
1244 		 * we are holding a write lock on fs_info->subvol_sem, which
1245 		 * is also acquired when disabling quotas.
1246 		 * Ignore such error, and any other error would need to undo
1247 		 * everything we did in the transaction we just committed.
1248 		 */
1249 		ASSERT(ret == -EINPROGRESS);
1250 		ret = 0;
1251 	}
1252 
1253 out_free_path:
1254 	btrfs_free_path(path);
1255 out_free_root:
1256 	if (ret)
1257 		btrfs_put_root(quota_root);
1258 out:
1259 	if (ret)
1260 		btrfs_sysfs_del_qgroups(fs_info);
1261 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1262 	if (ret && trans)
1263 		btrfs_end_transaction(trans);
1264 	else if (trans)
1265 		ret = btrfs_end_transaction(trans);
1266 	kfree(prealloc);
1267 	return ret;
1268 }
1269 
1270 /*
1271  * It is possible to have outstanding ordered extents which reserved bytes
1272  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1273  * commit to ensure that we don't leak such reservations, only to have them
1274  * come back if we re-enable.
1275  *
1276  * - enable simple quotas
1277  * - reserve space
1278  * - release it, store rsv_bytes in OE
1279  * - disable quotas
1280  * - enable simple quotas (qgroup rsv are all 0)
1281  * - OE finishes
1282  * - run delayed refs
1283  * - free rsv_bytes, resulting in miscounting or even underflow
1284  */
flush_reservations(struct btrfs_fs_info * fs_info)1285 static int flush_reservations(struct btrfs_fs_info *fs_info)
1286 {
1287 	int ret;
1288 
1289 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1290 	if (ret)
1291 		return ret;
1292 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1293 
1294 	return btrfs_commit_current_transaction(fs_info->tree_root);
1295 }
1296 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1297 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1298 {
1299 	struct btrfs_root *quota_root = NULL;
1300 	struct btrfs_trans_handle *trans = NULL;
1301 	int ret = 0;
1302 
1303 	/*
1304 	 * We need to have subvol_sem write locked to prevent races with
1305 	 * snapshot creation.
1306 	 */
1307 	lockdep_assert_held_write(&fs_info->subvol_sem);
1308 
1309 	/*
1310 	 * Relocation will mess with backrefs, so make sure we have the
1311 	 * cleaner_mutex held to protect us from relocate.
1312 	 */
1313 	lockdep_assert_held(&fs_info->cleaner_mutex);
1314 
1315 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1316 	if (!fs_info->quota_root)
1317 		goto out;
1318 
1319 	/*
1320 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1321 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1322 	 * to lock that mutex while holding a transaction handle and the rescan
1323 	 * worker needs to commit a transaction.
1324 	 */
1325 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1326 
1327 	/*
1328 	 * Request qgroup rescan worker to complete and wait for it. This wait
1329 	 * must be done before transaction start for quota disable since it may
1330 	 * deadlock with transaction by the qgroup rescan worker.
1331 	 */
1332 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1333 	btrfs_qgroup_wait_for_completion(fs_info, false);
1334 
1335 	/*
1336 	 * We have nothing held here and no trans handle, just return the error
1337 	 * if there is one and set back the quota enabled bit since we didn't
1338 	 * actually disable quotas.
1339 	 */
1340 	ret = flush_reservations(fs_info);
1341 	if (ret) {
1342 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1343 		return ret;
1344 	}
1345 
1346 	/*
1347 	 * 1 For the root item
1348 	 *
1349 	 * We should also reserve enough items for the quota tree deletion in
1350 	 * btrfs_clean_quota_tree but this is not done.
1351 	 *
1352 	 * Also, we must always start a transaction without holding the mutex
1353 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1354 	 */
1355 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1356 
1357 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1358 	if (IS_ERR(trans)) {
1359 		ret = PTR_ERR(trans);
1360 		trans = NULL;
1361 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1362 		goto out;
1363 	}
1364 
1365 	if (!fs_info->quota_root)
1366 		goto out;
1367 
1368 	spin_lock(&fs_info->qgroup_lock);
1369 	quota_root = fs_info->quota_root;
1370 	fs_info->quota_root = NULL;
1371 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1372 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1373 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1374 	spin_unlock(&fs_info->qgroup_lock);
1375 
1376 	btrfs_free_qgroup_config(fs_info);
1377 
1378 	ret = btrfs_clean_quota_tree(trans, quota_root);
1379 	if (ret) {
1380 		btrfs_abort_transaction(trans, ret);
1381 		goto out;
1382 	}
1383 
1384 	ret = btrfs_del_root(trans, &quota_root->root_key);
1385 	if (ret) {
1386 		btrfs_abort_transaction(trans, ret);
1387 		goto out;
1388 	}
1389 
1390 	spin_lock(&fs_info->trans_lock);
1391 	list_del(&quota_root->dirty_list);
1392 	spin_unlock(&fs_info->trans_lock);
1393 
1394 	btrfs_tree_lock(quota_root->node);
1395 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1396 	btrfs_tree_unlock(quota_root->node);
1397 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1398 				    quota_root->node, 0, 1);
1399 
1400 	if (ret < 0)
1401 		btrfs_abort_transaction(trans, ret);
1402 
1403 out:
1404 	btrfs_put_root(quota_root);
1405 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1406 	if (ret && trans)
1407 		btrfs_end_transaction(trans);
1408 	else if (trans)
1409 		ret = btrfs_commit_transaction(trans);
1410 	return ret;
1411 }
1412 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1413 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1414 			 struct btrfs_qgroup *qgroup)
1415 {
1416 	if (list_empty(&qgroup->dirty))
1417 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1418 }
1419 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1420 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1421 {
1422 	if (!list_empty(&qgroup->iterator))
1423 		return;
1424 
1425 	list_add_tail(&qgroup->iterator, head);
1426 }
1427 
qgroup_iterator_clean(struct list_head * head)1428 static void qgroup_iterator_clean(struct list_head *head)
1429 {
1430 	while (!list_empty(head)) {
1431 		struct btrfs_qgroup *qgroup;
1432 
1433 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1434 		list_del_init(&qgroup->iterator);
1435 	}
1436 }
1437 
1438 /*
1439  * The easy accounting, we're updating qgroup relationship whose child qgroup
1440  * only has exclusive extents.
1441  *
1442  * In this case, all exclusive extents will also be exclusive for parent, so
1443  * excl/rfer just get added/removed.
1444  *
1445  * So is qgroup reservation space, which should also be added/removed to
1446  * parent.
1447  * Or when child tries to release reservation space, parent will underflow its
1448  * reservation (for relationship adding case).
1449  *
1450  * Caller should hold fs_info->qgroup_lock.
1451  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1452 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1453 				    struct btrfs_qgroup *src, int sign)
1454 {
1455 	struct btrfs_qgroup *qgroup;
1456 	struct btrfs_qgroup *cur;
1457 	LIST_HEAD(qgroup_list);
1458 	u64 num_bytes = src->excl;
1459 	int ret = 0;
1460 
1461 	qgroup = find_qgroup_rb(fs_info, ref_root);
1462 	if (!qgroup)
1463 		goto out;
1464 
1465 	qgroup_iterator_add(&qgroup_list, qgroup);
1466 	list_for_each_entry(cur, &qgroup_list, iterator) {
1467 		struct btrfs_qgroup_list *glist;
1468 
1469 		qgroup->rfer += sign * num_bytes;
1470 		qgroup->rfer_cmpr += sign * num_bytes;
1471 
1472 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1473 		qgroup->excl += sign * num_bytes;
1474 		qgroup->excl_cmpr += sign * num_bytes;
1475 
1476 		if (sign > 0)
1477 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1478 		else
1479 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1480 		qgroup_dirty(fs_info, qgroup);
1481 
1482 		/* Append parent qgroups to @qgroup_list. */
1483 		list_for_each_entry(glist, &qgroup->groups, next_group)
1484 			qgroup_iterator_add(&qgroup_list, glist->group);
1485 	}
1486 	ret = 0;
1487 out:
1488 	qgroup_iterator_clean(&qgroup_list);
1489 	return ret;
1490 }
1491 
1492 
1493 /*
1494  * Quick path for updating qgroup with only excl refs.
1495  *
1496  * In that case, just update all parent will be enough.
1497  * Or we needs to do a full rescan.
1498  * Caller should also hold fs_info->qgroup_lock.
1499  *
1500  * Return 0 for quick update, return >0 for need to full rescan
1501  * and mark INCONSISTENT flag.
1502  * Return < 0 for other error.
1503  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1504 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1505 				   u64 src, u64 dst, int sign)
1506 {
1507 	struct btrfs_qgroup *qgroup;
1508 	int ret = 1;
1509 
1510 	qgroup = find_qgroup_rb(fs_info, src);
1511 	if (!qgroup)
1512 		goto out;
1513 	if (qgroup->excl == qgroup->rfer) {
1514 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1515 		if (ret < 0)
1516 			goto out;
1517 		ret = 0;
1518 	}
1519 out:
1520 	if (ret)
1521 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1522 	return ret;
1523 }
1524 
1525 /*
1526  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1527  * callers and transferred here (either used or freed on error).
1528  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1529 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1530 			      struct btrfs_qgroup_list *prealloc)
1531 {
1532 	struct btrfs_fs_info *fs_info = trans->fs_info;
1533 	struct btrfs_qgroup *parent;
1534 	struct btrfs_qgroup *member;
1535 	struct btrfs_qgroup_list *list;
1536 	int ret = 0;
1537 
1538 	ASSERT(prealloc);
1539 
1540 	/* Check the level of src and dst first */
1541 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1542 		return -EINVAL;
1543 
1544 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1545 	if (!fs_info->quota_root) {
1546 		ret = -ENOTCONN;
1547 		goto out;
1548 	}
1549 	member = find_qgroup_rb(fs_info, src);
1550 	parent = find_qgroup_rb(fs_info, dst);
1551 	if (!member || !parent) {
1552 		ret = -EINVAL;
1553 		goto out;
1554 	}
1555 
1556 	/* check if such qgroup relation exist firstly */
1557 	list_for_each_entry(list, &member->groups, next_group) {
1558 		if (list->group == parent) {
1559 			ret = -EEXIST;
1560 			goto out;
1561 		}
1562 	}
1563 
1564 	ret = add_qgroup_relation_item(trans, src, dst);
1565 	if (ret)
1566 		goto out;
1567 
1568 	ret = add_qgroup_relation_item(trans, dst, src);
1569 	if (ret) {
1570 		del_qgroup_relation_item(trans, src, dst);
1571 		goto out;
1572 	}
1573 
1574 	spin_lock(&fs_info->qgroup_lock);
1575 	ret = __add_relation_rb(prealloc, member, parent);
1576 	prealloc = NULL;
1577 	if (ret < 0) {
1578 		spin_unlock(&fs_info->qgroup_lock);
1579 		goto out;
1580 	}
1581 	ret = quick_update_accounting(fs_info, src, dst, 1);
1582 	spin_unlock(&fs_info->qgroup_lock);
1583 out:
1584 	kfree(prealloc);
1585 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1586 	return ret;
1587 }
1588 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1589 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1590 				 u64 dst)
1591 {
1592 	struct btrfs_fs_info *fs_info = trans->fs_info;
1593 	struct btrfs_qgroup *parent;
1594 	struct btrfs_qgroup *member;
1595 	struct btrfs_qgroup_list *list;
1596 	bool found = false;
1597 	int ret = 0;
1598 	int ret2;
1599 
1600 	if (!fs_info->quota_root) {
1601 		ret = -ENOTCONN;
1602 		goto out;
1603 	}
1604 
1605 	member = find_qgroup_rb(fs_info, src);
1606 	parent = find_qgroup_rb(fs_info, dst);
1607 	/*
1608 	 * The parent/member pair doesn't exist, then try to delete the dead
1609 	 * relation items only.
1610 	 */
1611 	if (!member || !parent)
1612 		goto delete_item;
1613 
1614 	/* check if such qgroup relation exist firstly */
1615 	list_for_each_entry(list, &member->groups, next_group) {
1616 		if (list->group == parent) {
1617 			found = true;
1618 			break;
1619 		}
1620 	}
1621 
1622 delete_item:
1623 	ret = del_qgroup_relation_item(trans, src, dst);
1624 	if (ret < 0 && ret != -ENOENT)
1625 		goto out;
1626 	ret2 = del_qgroup_relation_item(trans, dst, src);
1627 	if (ret2 < 0 && ret2 != -ENOENT)
1628 		goto out;
1629 
1630 	/* At least one deletion succeeded, return 0 */
1631 	if (!ret || !ret2)
1632 		ret = 0;
1633 
1634 	if (found) {
1635 		spin_lock(&fs_info->qgroup_lock);
1636 		del_relation_rb(fs_info, src, dst);
1637 		ret = quick_update_accounting(fs_info, src, dst, -1);
1638 		spin_unlock(&fs_info->qgroup_lock);
1639 	}
1640 out:
1641 	return ret;
1642 }
1643 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1644 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1645 			      u64 dst)
1646 {
1647 	struct btrfs_fs_info *fs_info = trans->fs_info;
1648 	int ret = 0;
1649 
1650 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1651 	ret = __del_qgroup_relation(trans, src, dst);
1652 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1653 
1654 	return ret;
1655 }
1656 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1657 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1658 {
1659 	struct btrfs_fs_info *fs_info = trans->fs_info;
1660 	struct btrfs_root *quota_root;
1661 	struct btrfs_qgroup *qgroup;
1662 	struct btrfs_qgroup *prealloc = NULL;
1663 	int ret = 0;
1664 
1665 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1666 	if (!fs_info->quota_root) {
1667 		ret = -ENOTCONN;
1668 		goto out;
1669 	}
1670 	quota_root = fs_info->quota_root;
1671 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1672 	if (qgroup) {
1673 		ret = -EEXIST;
1674 		goto out;
1675 	}
1676 
1677 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1678 	if (!prealloc) {
1679 		ret = -ENOMEM;
1680 		goto out;
1681 	}
1682 
1683 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1684 	if (ret)
1685 		goto out;
1686 
1687 	spin_lock(&fs_info->qgroup_lock);
1688 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1689 	spin_unlock(&fs_info->qgroup_lock);
1690 	prealloc = NULL;
1691 
1692 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1693 out:
1694 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1695 	kfree(prealloc);
1696 	return ret;
1697 }
1698 
1699 /*
1700  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1701  * Return >0 if we can delete the qgroup.
1702  * Return <0 for other errors during tree search.
1703  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1704 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1705 {
1706 	struct btrfs_key key;
1707 	struct btrfs_path *path;
1708 	int ret;
1709 
1710 	/*
1711 	 * Squota would never be inconsistent, but there can still be case
1712 	 * where a dropped subvolume still has qgroup numbers, and squota
1713 	 * relies on such qgroup for future accounting.
1714 	 *
1715 	 * So for squota, do not allow dropping any non-zero qgroup.
1716 	 */
1717 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1718 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1719 		return 0;
1720 
1721 	/* For higher level qgroup, we can only delete it if it has no child. */
1722 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1723 		if (!list_empty(&qgroup->members))
1724 			return 0;
1725 		return 1;
1726 	}
1727 
1728 	/*
1729 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1730 	 * for it.
1731 	 * This means even a subvolume is unlinked but not yet fully dropped,
1732 	 * we can not delete the qgroup.
1733 	 */
1734 	key.objectid = qgroup->qgroupid;
1735 	key.type = BTRFS_ROOT_ITEM_KEY;
1736 	key.offset = -1ULL;
1737 	path = btrfs_alloc_path();
1738 	if (!path)
1739 		return -ENOMEM;
1740 
1741 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1742 	btrfs_free_path(path);
1743 	/*
1744 	 * The @ret from btrfs_find_root() exactly matches our definition for
1745 	 * the return value, thus can be returned directly.
1746 	 */
1747 	return ret;
1748 }
1749 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1750 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1751 {
1752 	struct btrfs_fs_info *fs_info = trans->fs_info;
1753 	struct btrfs_qgroup *qgroup;
1754 	struct btrfs_qgroup_list *list;
1755 	int ret = 0;
1756 
1757 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1758 	if (!fs_info->quota_root) {
1759 		ret = -ENOTCONN;
1760 		goto out;
1761 	}
1762 
1763 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1764 	if (!qgroup) {
1765 		ret = -ENOENT;
1766 		goto out;
1767 	}
1768 
1769 	ret = can_delete_qgroup(fs_info, qgroup);
1770 	if (ret < 0)
1771 		goto out;
1772 	if (ret == 0) {
1773 		ret = -EBUSY;
1774 		goto out;
1775 	}
1776 
1777 	/* Check if there are no children of this qgroup */
1778 	if (!list_empty(&qgroup->members)) {
1779 		ret = -EBUSY;
1780 		goto out;
1781 	}
1782 
1783 	ret = del_qgroup_item(trans, qgroupid);
1784 	if (ret && ret != -ENOENT)
1785 		goto out;
1786 
1787 	while (!list_empty(&qgroup->groups)) {
1788 		list = list_first_entry(&qgroup->groups,
1789 					struct btrfs_qgroup_list, next_group);
1790 		ret = __del_qgroup_relation(trans, qgroupid,
1791 					    list->group->qgroupid);
1792 		if (ret)
1793 			goto out;
1794 	}
1795 
1796 	spin_lock(&fs_info->qgroup_lock);
1797 	/*
1798 	 * Warn on reserved space. The subvolume should has no child nor
1799 	 * corresponding subvolume.
1800 	 * Thus its reserved space should all be zero, no matter if qgroup
1801 	 * is consistent or the mode.
1802 	 */
1803 	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1804 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1805 	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
1806 		DEBUG_WARN();
1807 		btrfs_warn_rl(fs_info,
1808 "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
1809 			      btrfs_qgroup_level(qgroup->qgroupid),
1810 			      btrfs_qgroup_subvolid(qgroup->qgroupid),
1811 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
1812 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
1813 			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1814 
1815 	}
1816 	/*
1817 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1818 	 * consistent and if it's in regular qgroup mode.
1819 	 * For simple mode it's not as accurate thus we can hit non-zero values
1820 	 * very frequently.
1821 	 */
1822 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1823 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1824 		if (qgroup->rfer || qgroup->excl ||
1825 		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
1826 			DEBUG_WARN();
1827 			qgroup_mark_inconsistent(fs_info,
1828 				"to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1829 				btrfs_qgroup_level(qgroup->qgroupid),
1830 				btrfs_qgroup_subvolid(qgroup->qgroupid),
1831 				qgroup->rfer, qgroup->rfer_cmpr,
1832 				qgroup->excl, qgroup->excl_cmpr);
1833 		}
1834 	}
1835 	del_qgroup_rb(fs_info, qgroupid);
1836 	spin_unlock(&fs_info->qgroup_lock);
1837 
1838 	/*
1839 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1840 	 * spinlock, since the sysfs_remove_group() function needs to take
1841 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1842 	 */
1843 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1844 	kfree(qgroup);
1845 out:
1846 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1847 	return ret;
1848 }
1849 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1850 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1851 {
1852 	struct btrfs_trans_handle *trans;
1853 	int ret;
1854 
1855 	if (!btrfs_is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) ||
1856 	    !fs_info->quota_root)
1857 		return 0;
1858 
1859 	/*
1860 	 * Commit current transaction to make sure all the rfer/excl numbers
1861 	 * get updated.
1862 	 */
1863 	ret = btrfs_commit_current_transaction(fs_info->quota_root);
1864 	if (ret < 0)
1865 		return ret;
1866 
1867 	/* Start new trans to delete the qgroup info and limit items. */
1868 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1869 	if (IS_ERR(trans))
1870 		return PTR_ERR(trans);
1871 	ret = btrfs_remove_qgroup(trans, subvolid);
1872 	btrfs_end_transaction(trans);
1873 	/*
1874 	 * It's squota and the subvolume still has numbers needed for future
1875 	 * accounting, in this case we can not delete it.  Just skip it.
1876 	 *
1877 	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
1878 	 * safe to ignore them.
1879 	 */
1880 	if (ret == -EBUSY || ret == -ENOENT)
1881 		ret = 0;
1882 	return ret;
1883 }
1884 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1885 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1886 		       struct btrfs_qgroup_limit *limit)
1887 {
1888 	struct btrfs_fs_info *fs_info = trans->fs_info;
1889 	struct btrfs_qgroup *qgroup;
1890 	int ret = 0;
1891 	/* Sometimes we would want to clear the limit on this qgroup.
1892 	 * To meet this requirement, we treat the -1 as a special value
1893 	 * which tell kernel to clear the limit on this qgroup.
1894 	 */
1895 	const u64 CLEAR_VALUE = -1;
1896 
1897 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1898 	if (!fs_info->quota_root) {
1899 		ret = -ENOTCONN;
1900 		goto out;
1901 	}
1902 
1903 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1904 	if (!qgroup) {
1905 		ret = -ENOENT;
1906 		goto out;
1907 	}
1908 
1909 	spin_lock(&fs_info->qgroup_lock);
1910 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1911 		if (limit->max_rfer == CLEAR_VALUE) {
1912 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1913 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1914 			qgroup->max_rfer = 0;
1915 		} else {
1916 			qgroup->max_rfer = limit->max_rfer;
1917 		}
1918 	}
1919 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1920 		if (limit->max_excl == CLEAR_VALUE) {
1921 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1922 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1923 			qgroup->max_excl = 0;
1924 		} else {
1925 			qgroup->max_excl = limit->max_excl;
1926 		}
1927 	}
1928 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1929 		if (limit->rsv_rfer == CLEAR_VALUE) {
1930 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1931 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1932 			qgroup->rsv_rfer = 0;
1933 		} else {
1934 			qgroup->rsv_rfer = limit->rsv_rfer;
1935 		}
1936 	}
1937 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1938 		if (limit->rsv_excl == CLEAR_VALUE) {
1939 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1940 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1941 			qgroup->rsv_excl = 0;
1942 		} else {
1943 			qgroup->rsv_excl = limit->rsv_excl;
1944 		}
1945 	}
1946 	qgroup->lim_flags |= limit->flags;
1947 
1948 	spin_unlock(&fs_info->qgroup_lock);
1949 
1950 	ret = update_qgroup_limit_item(trans, qgroup);
1951 	if (ret)
1952 		qgroup_mark_inconsistent(fs_info, "qgroup item update error %d", ret);
1953 
1954 out:
1955 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1956 	return ret;
1957 }
1958 
1959 /*
1960  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1961  * So qgroup can account it at transaction committing time.
1962  *
1963  * No lock version, caller must acquire delayed ref lock and allocated memory,
1964  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1965  *
1966  * Return 0 for success insert
1967  * Return >0 for existing record, caller can free @record safely.
1968  * Return <0 for insertion failure, caller can free @record safely.
1969  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record,u64 bytenr)1970 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1971 				     struct btrfs_delayed_ref_root *delayed_refs,
1972 				     struct btrfs_qgroup_extent_record *record,
1973 				     u64 bytenr)
1974 {
1975 	struct btrfs_qgroup_extent_record *existing, *ret;
1976 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1977 
1978 	if (!btrfs_qgroup_full_accounting(fs_info))
1979 		return 1;
1980 
1981 #if BITS_PER_LONG == 32
1982 	if (bytenr >= MAX_LFS_FILESIZE) {
1983 		btrfs_err_rl(fs_info,
1984 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
1985 			     bytenr);
1986 		btrfs_err_32bit_limit(fs_info);
1987 		return -EOVERFLOW;
1988 	}
1989 #endif
1990 
1991 	trace_btrfs_qgroup_trace_extent(fs_info, record, bytenr);
1992 
1993 	xa_lock(&delayed_refs->dirty_extents);
1994 	existing = xa_load(&delayed_refs->dirty_extents, index);
1995 	if (existing) {
1996 		if (record->data_rsv && !existing->data_rsv) {
1997 			existing->data_rsv = record->data_rsv;
1998 			existing->data_rsv_refroot = record->data_rsv_refroot;
1999 		}
2000 		xa_unlock(&delayed_refs->dirty_extents);
2001 		return 1;
2002 	}
2003 
2004 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2005 	xa_unlock(&delayed_refs->dirty_extents);
2006 	if (xa_is_err(ret)) {
2007 		qgroup_mark_inconsistent(fs_info, "xarray insert error: %d", xa_err(ret));
2008 		return xa_err(ret);
2009 	}
2010 
2011 	return 0;
2012 }
2013 
2014 /*
2015  * Post handler after qgroup_trace_extent_nolock().
2016  *
2017  * NOTE: Current qgroup does the expensive backref walk at transaction
2018  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2019  * new transaction.
2020  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2021  * result.
2022  *
2023  * However for old_roots there is no need to do backref walk at that time,
2024  * since we search commit roots to walk backref and result will always be
2025  * correct.
2026  *
2027  * Due to the nature of no lock version, we can't do backref there.
2028  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2029  * spinlock context.
2030  *
2031  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2032  * using current root, then we can move all expensive backref walk out of
2033  * transaction committing, but not now as qgroup accounting will be wrong again.
2034  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr)2035 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2036 				   struct btrfs_qgroup_extent_record *qrecord,
2037 				   u64 bytenr)
2038 {
2039 	struct btrfs_fs_info *fs_info = trans->fs_info;
2040 	struct btrfs_backref_walk_ctx ctx = {
2041 		.bytenr = bytenr,
2042 		.fs_info = fs_info,
2043 	};
2044 	int ret;
2045 
2046 	if (!btrfs_qgroup_full_accounting(fs_info))
2047 		return 0;
2048 	/*
2049 	 * We are always called in a context where we are already holding a
2050 	 * transaction handle. Often we are called when adding a data delayed
2051 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2052 	 * in which case we will be holding a write lock on extent buffer from a
2053 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2054 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2055 	 * that must be acquired before locking any extent buffers.
2056 	 *
2057 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2058 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2059 	 * it would not use commit roots and would lock extent buffers, causing
2060 	 * a deadlock if it ends up trying to read lock the same extent buffer
2061 	 * that was previously write locked at btrfs_truncate_inode_items().
2062 	 *
2063 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2064 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2065 	 * holding a transaction handle we don't need its protection.
2066 	 */
2067 	ASSERT(trans != NULL);
2068 
2069 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2070 		return 0;
2071 
2072 	ret = btrfs_find_all_roots(&ctx, true);
2073 	if (ret < 0) {
2074 		qgroup_mark_inconsistent(fs_info,
2075 				"error accounting new delayed refs extent: %d", ret);
2076 		return 0;
2077 	}
2078 
2079 	/*
2080 	 * Here we don't need to get the lock of
2081 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2082 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2083 	 *
2084 	 * So modifying qrecord->old_roots is safe here
2085 	 */
2086 	qrecord->old_roots = ctx.roots;
2087 	return 0;
2088 }
2089 
2090 /*
2091  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2092  * @num_bytes.
2093  * So qgroup can account it at commit trans time.
2094  *
2095  * Better encapsulated version, with memory allocation and backref walk for
2096  * commit roots.
2097  * So this can sleep.
2098  *
2099  * Return 0 if the operation is done.
2100  * Return <0 for error, like memory allocation failure or invalid parameter
2101  * (NULL trans)
2102  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2103 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2104 			      u64 num_bytes)
2105 {
2106 	struct btrfs_fs_info *fs_info = trans->fs_info;
2107 	struct btrfs_qgroup_extent_record *record;
2108 	struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs;
2109 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2110 	int ret;
2111 
2112 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2113 		return 0;
2114 	record = kzalloc(sizeof(*record), GFP_NOFS);
2115 	if (!record)
2116 		return -ENOMEM;
2117 
2118 	if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
2119 		kfree(record);
2120 		return -ENOMEM;
2121 	}
2122 
2123 	record->num_bytes = num_bytes;
2124 
2125 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr);
2126 	if (ret) {
2127 		/* Clean up if insertion fails or item exists. */
2128 		xa_release(&delayed_refs->dirty_extents, index);
2129 		kfree(record);
2130 		return 0;
2131 	}
2132 	return btrfs_qgroup_trace_extent_post(trans, record, bytenr);
2133 }
2134 
2135 /*
2136  * Inform qgroup to trace all leaf items of data
2137  *
2138  * Return 0 for success
2139  * Return <0 for error(ENOMEM)
2140  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2141 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2142 				  struct extent_buffer *eb)
2143 {
2144 	struct btrfs_fs_info *fs_info = trans->fs_info;
2145 	int nr = btrfs_header_nritems(eb);
2146 	int i, extent_type, ret;
2147 	struct btrfs_key key;
2148 	struct btrfs_file_extent_item *fi;
2149 	u64 bytenr, num_bytes;
2150 
2151 	/* We can be called directly from walk_up_proc() */
2152 	if (!btrfs_qgroup_full_accounting(fs_info))
2153 		return 0;
2154 
2155 	for (i = 0; i < nr; i++) {
2156 		btrfs_item_key_to_cpu(eb, &key, i);
2157 
2158 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2159 			continue;
2160 
2161 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2162 		/* filter out non qgroup-accountable extents  */
2163 		extent_type = btrfs_file_extent_type(eb, fi);
2164 
2165 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2166 			continue;
2167 
2168 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2169 		if (!bytenr)
2170 			continue;
2171 
2172 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2173 
2174 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2175 		if (ret)
2176 			return ret;
2177 	}
2178 	cond_resched();
2179 	return 0;
2180 }
2181 
2182 /*
2183  * Walk up the tree from the bottom, freeing leaves and any interior
2184  * nodes which have had all slots visited. If a node (leaf or
2185  * interior) is freed, the node above it will have it's slot
2186  * incremented. The root node will never be freed.
2187  *
2188  * At the end of this function, we should have a path which has all
2189  * slots incremented to the next position for a search. If we need to
2190  * read a new node it will be NULL and the node above it will have the
2191  * correct slot selected for a later read.
2192  *
2193  * If we increment the root nodes slot counter past the number of
2194  * elements, 1 is returned to signal completion of the search.
2195  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2196 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2197 {
2198 	int level = 0;
2199 	int nr, slot;
2200 	struct extent_buffer *eb;
2201 
2202 	if (root_level == 0)
2203 		return 1;
2204 
2205 	while (level <= root_level) {
2206 		eb = path->nodes[level];
2207 		nr = btrfs_header_nritems(eb);
2208 		path->slots[level]++;
2209 		slot = path->slots[level];
2210 		if (slot >= nr || level == 0) {
2211 			/*
2212 			 * Don't free the root -  we will detect this
2213 			 * condition after our loop and return a
2214 			 * positive value for caller to stop walking the tree.
2215 			 */
2216 			if (level != root_level) {
2217 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2218 				path->locks[level] = 0;
2219 
2220 				free_extent_buffer(eb);
2221 				path->nodes[level] = NULL;
2222 				path->slots[level] = 0;
2223 			}
2224 		} else {
2225 			/*
2226 			 * We have a valid slot to walk back down
2227 			 * from. Stop here so caller can process these
2228 			 * new nodes.
2229 			 */
2230 			break;
2231 		}
2232 
2233 		level++;
2234 	}
2235 
2236 	eb = path->nodes[root_level];
2237 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2238 		return 1;
2239 
2240 	return 0;
2241 }
2242 
2243 /*
2244  * Helper function to trace a subtree tree block swap.
2245  *
2246  * The swap will happen in highest tree block, but there may be a lot of
2247  * tree blocks involved.
2248  *
2249  * For example:
2250  *  OO = Old tree blocks
2251  *  NN = New tree blocks allocated during balance
2252  *
2253  *           File tree (257)                  Reloc tree for 257
2254  * L2              OO                                NN
2255  *               /    \                            /    \
2256  * L1          OO      OO (a)                    OO      NN (a)
2257  *            / \     / \                       / \     / \
2258  * L0       OO   OO OO   OO                   OO   OO NN   NN
2259  *                  (b)  (c)                          (b)  (c)
2260  *
2261  * When calling qgroup_trace_extent_swap(), we will pass:
2262  * @src_eb = OO(a)
2263  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2264  * @dst_level = 0
2265  * @root_level = 1
2266  *
2267  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2268  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2269  *
2270  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2271  *
2272  * 1) Tree search from @src_eb
2273  *    It should acts as a simplified btrfs_search_slot().
2274  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2275  *    (first key).
2276  *
2277  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2278  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2279  *    They should be marked during previous (@dst_level = 1) iteration.
2280  *
2281  * 3) Mark file extents in leaves dirty
2282  *    We don't have good way to pick out new file extents only.
2283  *    So we still follow the old method by scanning all file extents in
2284  *    the leave.
2285  *
2286  * This function can free us from keeping two paths, thus later we only need
2287  * to care about how to iterate all new tree blocks in reloc tree.
2288  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2289 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2290 				    struct extent_buffer *src_eb,
2291 				    struct btrfs_path *dst_path,
2292 				    int dst_level, int root_level,
2293 				    bool trace_leaf)
2294 {
2295 	struct btrfs_key key;
2296 	struct btrfs_path *src_path;
2297 	struct btrfs_fs_info *fs_info = trans->fs_info;
2298 	u32 nodesize = fs_info->nodesize;
2299 	int cur_level = root_level;
2300 	int ret;
2301 
2302 	BUG_ON(dst_level > root_level);
2303 	/* Level mismatch */
2304 	if (btrfs_header_level(src_eb) != root_level)
2305 		return -EINVAL;
2306 
2307 	src_path = btrfs_alloc_path();
2308 	if (!src_path) {
2309 		ret = -ENOMEM;
2310 		goto out;
2311 	}
2312 
2313 	if (dst_level)
2314 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2315 	else
2316 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2317 
2318 	/* For src_path */
2319 	refcount_inc(&src_eb->refs);
2320 	src_path->nodes[root_level] = src_eb;
2321 	src_path->slots[root_level] = dst_path->slots[root_level];
2322 	src_path->locks[root_level] = 0;
2323 
2324 	/* A simplified version of btrfs_search_slot() */
2325 	while (cur_level >= dst_level) {
2326 		struct btrfs_key src_key;
2327 		struct btrfs_key dst_key;
2328 
2329 		if (src_path->nodes[cur_level] == NULL) {
2330 			struct extent_buffer *eb;
2331 			int parent_slot;
2332 
2333 			eb = src_path->nodes[cur_level + 1];
2334 			parent_slot = src_path->slots[cur_level + 1];
2335 
2336 			eb = btrfs_read_node_slot(eb, parent_slot);
2337 			if (IS_ERR(eb)) {
2338 				ret = PTR_ERR(eb);
2339 				goto out;
2340 			}
2341 
2342 			src_path->nodes[cur_level] = eb;
2343 
2344 			btrfs_tree_read_lock(eb);
2345 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2346 		}
2347 
2348 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2349 		if (cur_level) {
2350 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2351 					&dst_key, dst_path->slots[cur_level]);
2352 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2353 					&src_key, src_path->slots[cur_level]);
2354 		} else {
2355 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2356 					&dst_key, dst_path->slots[cur_level]);
2357 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2358 					&src_key, src_path->slots[cur_level]);
2359 		}
2360 		/* Content mismatch, something went wrong */
2361 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2362 			ret = -ENOENT;
2363 			goto out;
2364 		}
2365 		cur_level--;
2366 	}
2367 
2368 	/*
2369 	 * Now both @dst_path and @src_path have been populated, record the tree
2370 	 * blocks for qgroup accounting.
2371 	 */
2372 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2373 					nodesize);
2374 	if (ret < 0)
2375 		goto out;
2376 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2377 					nodesize);
2378 	if (ret < 0)
2379 		goto out;
2380 
2381 	/* Record leaf file extents */
2382 	if (dst_level == 0 && trace_leaf) {
2383 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2384 		if (ret < 0)
2385 			goto out;
2386 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2387 	}
2388 out:
2389 	btrfs_free_path(src_path);
2390 	return ret;
2391 }
2392 
2393 /*
2394  * Helper function to do recursive generation-aware depth-first search, to
2395  * locate all new tree blocks in a subtree of reloc tree.
2396  *
2397  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2398  *         reloc tree
2399  * L2         NN (a)
2400  *          /    \
2401  * L1    OO        NN (b)
2402  *      /  \      /  \
2403  * L0  OO  OO    OO  NN
2404  *               (c) (d)
2405  * If we pass:
2406  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2407  * @cur_level = 1
2408  * @root_level = 1
2409  *
2410  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2411  * above tree blocks along with their counter parts in file tree.
2412  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2413  * won't affect OO(c).
2414  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2415 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2416 					   struct extent_buffer *src_eb,
2417 					   struct btrfs_path *dst_path,
2418 					   int cur_level, int root_level,
2419 					   u64 last_snapshot, bool trace_leaf)
2420 {
2421 	struct btrfs_fs_info *fs_info = trans->fs_info;
2422 	struct extent_buffer *eb;
2423 	bool need_cleanup = false;
2424 	int ret = 0;
2425 	int i;
2426 
2427 	/* Level sanity check */
2428 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2429 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2430 	    root_level < cur_level) {
2431 		btrfs_err_rl(fs_info,
2432 			"%s: bad levels, cur_level=%d root_level=%d",
2433 			__func__, cur_level, root_level);
2434 		return -EUCLEAN;
2435 	}
2436 
2437 	/* Read the tree block if needed */
2438 	if (dst_path->nodes[cur_level] == NULL) {
2439 		int parent_slot;
2440 		u64 child_gen;
2441 
2442 		/*
2443 		 * dst_path->nodes[root_level] must be initialized before
2444 		 * calling this function.
2445 		 */
2446 		if (cur_level == root_level) {
2447 			btrfs_err_rl(fs_info,
2448 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2449 				__func__, root_level, root_level, cur_level);
2450 			return -EUCLEAN;
2451 		}
2452 
2453 		/*
2454 		 * We need to get child blockptr/gen from parent before we can
2455 		 * read it.
2456 		  */
2457 		eb = dst_path->nodes[cur_level + 1];
2458 		parent_slot = dst_path->slots[cur_level + 1];
2459 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2460 
2461 		/* This node is old, no need to trace */
2462 		if (child_gen < last_snapshot)
2463 			goto out;
2464 
2465 		eb = btrfs_read_node_slot(eb, parent_slot);
2466 		if (IS_ERR(eb)) {
2467 			ret = PTR_ERR(eb);
2468 			goto out;
2469 		}
2470 
2471 		dst_path->nodes[cur_level] = eb;
2472 		dst_path->slots[cur_level] = 0;
2473 
2474 		btrfs_tree_read_lock(eb);
2475 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2476 		need_cleanup = true;
2477 	}
2478 
2479 	/* Now record this tree block and its counter part for qgroups */
2480 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2481 				       root_level, trace_leaf);
2482 	if (ret < 0)
2483 		goto cleanup;
2484 
2485 	eb = dst_path->nodes[cur_level];
2486 
2487 	if (cur_level > 0) {
2488 		/* Iterate all child tree blocks */
2489 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2490 			/* Skip old tree blocks as they won't be swapped */
2491 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2492 				continue;
2493 			dst_path->slots[cur_level] = i;
2494 
2495 			/* Recursive call (at most 7 times) */
2496 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2497 					dst_path, cur_level - 1, root_level,
2498 					last_snapshot, trace_leaf);
2499 			if (ret < 0)
2500 				goto cleanup;
2501 		}
2502 	}
2503 
2504 cleanup:
2505 	if (need_cleanup) {
2506 		/* Clean up */
2507 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2508 				     dst_path->locks[cur_level]);
2509 		free_extent_buffer(dst_path->nodes[cur_level]);
2510 		dst_path->nodes[cur_level] = NULL;
2511 		dst_path->slots[cur_level] = 0;
2512 		dst_path->locks[cur_level] = 0;
2513 	}
2514 out:
2515 	return ret;
2516 }
2517 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2518 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2519 				struct extent_buffer *src_eb,
2520 				struct extent_buffer *dst_eb,
2521 				u64 last_snapshot, bool trace_leaf)
2522 {
2523 	struct btrfs_fs_info *fs_info = trans->fs_info;
2524 	struct btrfs_path *dst_path = NULL;
2525 	int level;
2526 	int ret;
2527 
2528 	if (!btrfs_qgroup_full_accounting(fs_info))
2529 		return 0;
2530 
2531 	/* Wrong parameter order */
2532 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2533 		btrfs_err_rl(fs_info,
2534 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2535 			     btrfs_header_generation(src_eb),
2536 			     btrfs_header_generation(dst_eb));
2537 		return -EUCLEAN;
2538 	}
2539 
2540 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2541 		ret = -EIO;
2542 		goto out;
2543 	}
2544 
2545 	level = btrfs_header_level(dst_eb);
2546 	dst_path = btrfs_alloc_path();
2547 	if (!dst_path) {
2548 		ret = -ENOMEM;
2549 		goto out;
2550 	}
2551 	/* For dst_path */
2552 	refcount_inc(&dst_eb->refs);
2553 	dst_path->nodes[level] = dst_eb;
2554 	dst_path->slots[level] = 0;
2555 	dst_path->locks[level] = 0;
2556 
2557 	/* Do the generation aware breadth-first search */
2558 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2559 					      level, last_snapshot, trace_leaf);
2560 	if (ret < 0)
2561 		goto out;
2562 	ret = 0;
2563 
2564 out:
2565 	btrfs_free_path(dst_path);
2566 	if (ret < 0)
2567 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
2568 	return ret;
2569 }
2570 
2571 /*
2572  * Inform qgroup to trace a whole subtree, including all its child tree
2573  * blocks and data.
2574  * The root tree block is specified by @root_eb.
2575  *
2576  * Normally used by relocation(tree block swap) and subvolume deletion.
2577  *
2578  * Return 0 for success
2579  * Return <0 for error(ENOMEM or tree search error)
2580  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2581 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2582 			       struct extent_buffer *root_eb,
2583 			       u64 root_gen, int root_level)
2584 {
2585 	struct btrfs_fs_info *fs_info = trans->fs_info;
2586 	int ret = 0;
2587 	int level;
2588 	u8 drop_subptree_thres;
2589 	struct extent_buffer *eb = root_eb;
2590 	struct btrfs_path *path = NULL;
2591 
2592 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2593 	ASSERT(root_eb != NULL);
2594 
2595 	if (!btrfs_qgroup_full_accounting(fs_info))
2596 		return 0;
2597 
2598 	spin_lock(&fs_info->qgroup_lock);
2599 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2600 	spin_unlock(&fs_info->qgroup_lock);
2601 
2602 	/*
2603 	 * This function only gets called for snapshot drop, if we hit a high
2604 	 * node here, it means we are going to change ownership for quite a lot
2605 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2606 	 *
2607 	 * So here if we find a high tree here, we just skip the accounting and
2608 	 * mark qgroup inconsistent.
2609 	 */
2610 	if (root_level >= drop_subptree_thres) {
2611 		qgroup_mark_inconsistent(fs_info, "subtree level reached threshold");
2612 		return 0;
2613 	}
2614 
2615 	if (!extent_buffer_uptodate(root_eb)) {
2616 		struct btrfs_tree_parent_check check = {
2617 			.transid = root_gen,
2618 			.level = root_level
2619 		};
2620 
2621 		ret = btrfs_read_extent_buffer(root_eb, &check);
2622 		if (ret)
2623 			goto out;
2624 	}
2625 
2626 	if (root_level == 0) {
2627 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2628 		goto out;
2629 	}
2630 
2631 	path = btrfs_alloc_path();
2632 	if (!path)
2633 		return -ENOMEM;
2634 
2635 	/*
2636 	 * Walk down the tree.  Missing extent blocks are filled in as
2637 	 * we go. Metadata is accounted every time we read a new
2638 	 * extent block.
2639 	 *
2640 	 * When we reach a leaf, we account for file extent items in it,
2641 	 * walk back up the tree (adjusting slot pointers as we go)
2642 	 * and restart the search process.
2643 	 */
2644 	refcount_inc(&root_eb->refs);	/* For path */
2645 	path->nodes[root_level] = root_eb;
2646 	path->slots[root_level] = 0;
2647 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2648 walk_down:
2649 	level = root_level;
2650 	while (level >= 0) {
2651 		if (path->nodes[level] == NULL) {
2652 			int parent_slot;
2653 			u64 child_bytenr;
2654 
2655 			/*
2656 			 * We need to get child blockptr from parent before we
2657 			 * can read it.
2658 			  */
2659 			eb = path->nodes[level + 1];
2660 			parent_slot = path->slots[level + 1];
2661 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2662 
2663 			eb = btrfs_read_node_slot(eb, parent_slot);
2664 			if (IS_ERR(eb)) {
2665 				ret = PTR_ERR(eb);
2666 				goto out;
2667 			}
2668 
2669 			path->nodes[level] = eb;
2670 			path->slots[level] = 0;
2671 
2672 			btrfs_tree_read_lock(eb);
2673 			path->locks[level] = BTRFS_READ_LOCK;
2674 
2675 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2676 							fs_info->nodesize);
2677 			if (ret)
2678 				goto out;
2679 		}
2680 
2681 		if (level == 0) {
2682 			ret = btrfs_qgroup_trace_leaf_items(trans,
2683 							    path->nodes[level]);
2684 			if (ret)
2685 				goto out;
2686 
2687 			/* Nonzero return here means we completed our search */
2688 			ret = adjust_slots_upwards(path, root_level);
2689 			if (ret)
2690 				break;
2691 
2692 			/* Restart search with new slots */
2693 			goto walk_down;
2694 		}
2695 
2696 		level--;
2697 	}
2698 
2699 	ret = 0;
2700 out:
2701 	btrfs_free_path(path);
2702 
2703 	return ret;
2704 }
2705 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2706 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2707 {
2708 	if (!list_empty(&qgroup->nested_iterator))
2709 		return;
2710 
2711 	list_add_tail(&qgroup->nested_iterator, head);
2712 }
2713 
qgroup_iterator_nested_clean(struct list_head * head)2714 static void qgroup_iterator_nested_clean(struct list_head *head)
2715 {
2716 	while (!list_empty(head)) {
2717 		struct btrfs_qgroup *qgroup;
2718 
2719 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2720 		list_del_init(&qgroup->nested_iterator);
2721 	}
2722 }
2723 
2724 #define UPDATE_NEW	0
2725 #define UPDATE_OLD	1
2726 /*
2727  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2728  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,int update_old)2729 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2730 				 struct ulist *roots, struct list_head *qgroups,
2731 				 u64 seq, int update_old)
2732 {
2733 	struct ulist_node *unode;
2734 	struct ulist_iterator uiter;
2735 	struct btrfs_qgroup *qg;
2736 
2737 	if (!roots)
2738 		return;
2739 	ULIST_ITER_INIT(&uiter);
2740 	while ((unode = ulist_next(roots, &uiter))) {
2741 		LIST_HEAD(tmp);
2742 
2743 		qg = find_qgroup_rb(fs_info, unode->val);
2744 		if (!qg)
2745 			continue;
2746 
2747 		qgroup_iterator_nested_add(qgroups, qg);
2748 		qgroup_iterator_add(&tmp, qg);
2749 		list_for_each_entry(qg, &tmp, iterator) {
2750 			struct btrfs_qgroup_list *glist;
2751 
2752 			if (update_old)
2753 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2754 			else
2755 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2756 
2757 			list_for_each_entry(glist, &qg->groups, next_group) {
2758 				qgroup_iterator_nested_add(qgroups, glist->group);
2759 				qgroup_iterator_add(&tmp, glist->group);
2760 			}
2761 		}
2762 		qgroup_iterator_clean(&tmp);
2763 	}
2764 }
2765 
2766 /*
2767  * Update qgroup rfer/excl counters.
2768  * Rfer update is easy, codes can explain themselves.
2769  *
2770  * Excl update is tricky, the update is split into 2 parts.
2771  * Part 1: Possible exclusive <-> sharing detect:
2772  *	|	A	|	!A	|
2773  *  -------------------------------------
2774  *  B	|	*	|	-	|
2775  *  -------------------------------------
2776  *  !B	|	+	|	**	|
2777  *  -------------------------------------
2778  *
2779  * Conditions:
2780  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2781  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2782  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2783  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2784  *
2785  * Results:
2786  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2787  * *: Definitely not changed.		**: Possible unchanged.
2788  *
2789  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2790  *
2791  * To make the logic clear, we first use condition A and B to split
2792  * combination into 4 results.
2793  *
2794  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2795  * only on variant maybe 0.
2796  *
2797  * Lastly, check result **, since there are 2 variants maybe 0, split them
2798  * again(2x2).
2799  * But this time we don't need to consider other things, the codes and logic
2800  * is easy to understand now.
2801  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2802 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2803 				   struct list_head *qgroups, u64 nr_old_roots,
2804 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2805 {
2806 	struct btrfs_qgroup *qg;
2807 
2808 	list_for_each_entry(qg, qgroups, nested_iterator) {
2809 		u64 cur_new_count, cur_old_count;
2810 		bool dirty = false;
2811 
2812 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2813 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2814 
2815 		trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count,
2816 						   cur_new_count);
2817 
2818 		/* Rfer update part */
2819 		if (cur_old_count == 0 && cur_new_count > 0) {
2820 			qg->rfer += num_bytes;
2821 			qg->rfer_cmpr += num_bytes;
2822 			dirty = true;
2823 		}
2824 		if (cur_old_count > 0 && cur_new_count == 0) {
2825 			qg->rfer -= num_bytes;
2826 			qg->rfer_cmpr -= num_bytes;
2827 			dirty = true;
2828 		}
2829 
2830 		/* Excl update part */
2831 		/* Exclusive/none -> shared case */
2832 		if (cur_old_count == nr_old_roots &&
2833 		    cur_new_count < nr_new_roots) {
2834 			/* Exclusive -> shared */
2835 			if (cur_old_count != 0) {
2836 				qg->excl -= num_bytes;
2837 				qg->excl_cmpr -= num_bytes;
2838 				dirty = true;
2839 			}
2840 		}
2841 
2842 		/* Shared -> exclusive/none case */
2843 		if (cur_old_count < nr_old_roots &&
2844 		    cur_new_count == nr_new_roots) {
2845 			/* Shared->exclusive */
2846 			if (cur_new_count != 0) {
2847 				qg->excl += num_bytes;
2848 				qg->excl_cmpr += num_bytes;
2849 				dirty = true;
2850 			}
2851 		}
2852 
2853 		/* Exclusive/none -> exclusive/none case */
2854 		if (cur_old_count == nr_old_roots &&
2855 		    cur_new_count == nr_new_roots) {
2856 			if (cur_old_count == 0) {
2857 				/* None -> exclusive/none */
2858 
2859 				if (cur_new_count != 0) {
2860 					/* None -> exclusive */
2861 					qg->excl += num_bytes;
2862 					qg->excl_cmpr += num_bytes;
2863 					dirty = true;
2864 				}
2865 				/* None -> none, nothing changed */
2866 			} else {
2867 				/* Exclusive -> exclusive/none */
2868 
2869 				if (cur_new_count == 0) {
2870 					/* Exclusive -> none */
2871 					qg->excl -= num_bytes;
2872 					qg->excl_cmpr -= num_bytes;
2873 					dirty = true;
2874 				}
2875 				/* Exclusive -> exclusive, nothing changed */
2876 			}
2877 		}
2878 
2879 		if (dirty)
2880 			qgroup_dirty(fs_info, qg);
2881 	}
2882 }
2883 
2884 /*
2885  * Check if the @roots potentially is a list of fs tree roots
2886  *
2887  * Return 0 for definitely not a fs/subvol tree roots ulist
2888  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2889  *          one as well)
2890  */
maybe_fs_roots(struct ulist * roots)2891 static int maybe_fs_roots(struct ulist *roots)
2892 {
2893 	struct ulist_node *unode;
2894 	struct ulist_iterator uiter;
2895 
2896 	/* Empty one, still possible for fs roots */
2897 	if (!roots || roots->nnodes == 0)
2898 		return 1;
2899 
2900 	ULIST_ITER_INIT(&uiter);
2901 	unode = ulist_next(roots, &uiter);
2902 	if (!unode)
2903 		return 1;
2904 
2905 	/*
2906 	 * If it contains fs tree roots, then it must belong to fs/subvol
2907 	 * trees.
2908 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2909 	 */
2910 	return btrfs_is_fstree(unode->val);
2911 }
2912 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2913 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2914 				u64 num_bytes, struct ulist *old_roots,
2915 				struct ulist *new_roots)
2916 {
2917 	struct btrfs_fs_info *fs_info = trans->fs_info;
2918 	LIST_HEAD(qgroups);
2919 	u64 seq;
2920 	u64 nr_new_roots = 0;
2921 	u64 nr_old_roots = 0;
2922 	int ret = 0;
2923 
2924 	/*
2925 	 * If quotas get disabled meanwhile, the resources need to be freed and
2926 	 * we can't just exit here.
2927 	 */
2928 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2929 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2930 		goto out_free;
2931 
2932 	if (new_roots) {
2933 		if (!maybe_fs_roots(new_roots))
2934 			goto out_free;
2935 		nr_new_roots = new_roots->nnodes;
2936 	}
2937 	if (old_roots) {
2938 		if (!maybe_fs_roots(old_roots))
2939 			goto out_free;
2940 		nr_old_roots = old_roots->nnodes;
2941 	}
2942 
2943 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2944 	if (nr_old_roots == 0 && nr_new_roots == 0)
2945 		goto out_free;
2946 
2947 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2948 					num_bytes, nr_old_roots, nr_new_roots);
2949 
2950 	mutex_lock(&fs_info->qgroup_rescan_lock);
2951 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2952 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2953 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2954 			ret = 0;
2955 			goto out_free;
2956 		}
2957 	}
2958 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2959 
2960 	spin_lock(&fs_info->qgroup_lock);
2961 	seq = fs_info->qgroup_seq;
2962 
2963 	/* Update old refcnts using old_roots */
2964 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
2965 
2966 	/* Update new refcnts using new_roots */
2967 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
2968 
2969 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
2970 			       num_bytes, seq);
2971 
2972 	/*
2973 	 * We're done using the iterator, release all its qgroups while holding
2974 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
2975 	 * and trigger use-after-free accesses to qgroups.
2976 	 */
2977 	qgroup_iterator_nested_clean(&qgroups);
2978 
2979 	/*
2980 	 * Bump qgroup_seq to avoid seq overlap
2981 	 */
2982 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2983 	spin_unlock(&fs_info->qgroup_lock);
2984 out_free:
2985 	ulist_free(old_roots);
2986 	ulist_free(new_roots);
2987 	return ret;
2988 }
2989 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)2990 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
2991 {
2992 	struct btrfs_fs_info *fs_info = trans->fs_info;
2993 	struct btrfs_qgroup_extent_record *record;
2994 	struct btrfs_delayed_ref_root *delayed_refs;
2995 	struct ulist *new_roots = NULL;
2996 	unsigned long index;
2997 	u64 num_dirty_extents = 0;
2998 	u64 qgroup_to_skip;
2999 	int ret = 0;
3000 
3001 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3002 		return 0;
3003 
3004 	delayed_refs = &trans->transaction->delayed_refs;
3005 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3006 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3007 		const u64 bytenr = (((u64)index) << fs_info->sectorsize_bits);
3008 
3009 		num_dirty_extents++;
3010 		trace_btrfs_qgroup_account_extents(fs_info, record, bytenr);
3011 
3012 		if (!ret && !(fs_info->qgroup_flags &
3013 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3014 			struct btrfs_backref_walk_ctx ctx = { 0 };
3015 
3016 			ctx.bytenr = bytenr;
3017 			ctx.fs_info = fs_info;
3018 
3019 			/*
3020 			 * Old roots should be searched when inserting qgroup
3021 			 * extent record.
3022 			 *
3023 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3024 			 * we may have some record inserted during
3025 			 * NO_ACCOUNTING (thus no old_roots populated), but
3026 			 * later we start rescan, which clears NO_ACCOUNTING,
3027 			 * leaving some inserted records without old_roots
3028 			 * populated.
3029 			 *
3030 			 * Those cases are rare and should not cause too much
3031 			 * time spent during commit_transaction().
3032 			 */
3033 			if (!record->old_roots) {
3034 				/* Search commit root to find old_roots */
3035 				ret = btrfs_find_all_roots(&ctx, false);
3036 				if (ret < 0)
3037 					goto cleanup;
3038 				record->old_roots = ctx.roots;
3039 				ctx.roots = NULL;
3040 			}
3041 
3042 			/*
3043 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3044 			 * which doesn't lock tree or delayed_refs and search
3045 			 * current root. It's safe inside commit_transaction().
3046 			 */
3047 			ctx.trans = trans;
3048 			ctx.time_seq = BTRFS_SEQ_LAST;
3049 			ret = btrfs_find_all_roots(&ctx, false);
3050 			if (ret < 0)
3051 				goto cleanup;
3052 			new_roots = ctx.roots;
3053 			if (qgroup_to_skip) {
3054 				ulist_del(new_roots, qgroup_to_skip, 0);
3055 				ulist_del(record->old_roots, qgroup_to_skip,
3056 					  0);
3057 			}
3058 			ret = btrfs_qgroup_account_extent(trans, bytenr,
3059 							  record->num_bytes,
3060 							  record->old_roots,
3061 							  new_roots);
3062 			record->old_roots = NULL;
3063 			new_roots = NULL;
3064 		}
3065 		/* Free the reserved data space */
3066 		btrfs_qgroup_free_refroot(fs_info,
3067 				record->data_rsv_refroot,
3068 				record->data_rsv,
3069 				BTRFS_QGROUP_RSV_DATA);
3070 cleanup:
3071 		ulist_free(record->old_roots);
3072 		ulist_free(new_roots);
3073 		new_roots = NULL;
3074 		xa_erase(&delayed_refs->dirty_extents, index);
3075 		kfree(record);
3076 
3077 	}
3078 	trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents);
3079 	return ret;
3080 }
3081 
3082 /*
3083  * Writes all changed qgroups to disk.
3084  * Called by the transaction commit path and the qgroup assign ioctl.
3085  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3086 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3087 {
3088 	struct btrfs_fs_info *fs_info = trans->fs_info;
3089 	int ret = 0;
3090 
3091 	/*
3092 	 * In case we are called from the qgroup assign ioctl, assert that we
3093 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3094 	 * disable operation (ioctl) and access a freed quota root.
3095 	 */
3096 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3097 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3098 
3099 	if (!fs_info->quota_root)
3100 		return ret;
3101 
3102 	spin_lock(&fs_info->qgroup_lock);
3103 	while (!list_empty(&fs_info->dirty_qgroups)) {
3104 		struct btrfs_qgroup *qgroup;
3105 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3106 					  struct btrfs_qgroup, dirty);
3107 		list_del_init(&qgroup->dirty);
3108 		spin_unlock(&fs_info->qgroup_lock);
3109 		ret = update_qgroup_info_item(trans, qgroup);
3110 		if (ret)
3111 			qgroup_mark_inconsistent(fs_info,
3112 						 "qgroup info item update error %d", ret);
3113 		ret = update_qgroup_limit_item(trans, qgroup);
3114 		if (ret)
3115 			qgroup_mark_inconsistent(fs_info,
3116 						 "qgroup limit item update error %d", ret);
3117 		spin_lock(&fs_info->qgroup_lock);
3118 	}
3119 	if (btrfs_qgroup_enabled(fs_info))
3120 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3121 	else
3122 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3123 	spin_unlock(&fs_info->qgroup_lock);
3124 
3125 	ret = update_qgroup_status_item(trans);
3126 	if (ret)
3127 		qgroup_mark_inconsistent(fs_info,
3128 					 "qgroup status item update error %d", ret);
3129 
3130 	return ret;
3131 }
3132 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3133 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3134 			       struct btrfs_qgroup_inherit *inherit,
3135 			       size_t size)
3136 {
3137 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3138 		return -EOPNOTSUPP;
3139 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3140 		return -EINVAL;
3141 
3142 	/*
3143 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3144 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3145 	 * been disabled in userspace for a very long time, but here we should
3146 	 * also disable it in kernel, as this behavior is known to mark qgroup
3147 	 * inconsistent, and a rescan would wipe out the changes anyway.
3148 	 *
3149 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3150 	 */
3151 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3152 		return -EINVAL;
3153 
3154 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3155 		return -EINVAL;
3156 
3157 	/*
3158 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3159 	 * Qgroup can still be later enabled causing problems, but in that case
3160 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3161 	 */
3162 	if (!btrfs_qgroup_enabled(fs_info))
3163 		return 0;
3164 
3165 	/*
3166 	 * Now check all the remaining qgroups, they should all:
3167 	 *
3168 	 * - Exist
3169 	 * - Be higher level qgroups.
3170 	 */
3171 	for (int i = 0; i < inherit->num_qgroups; i++) {
3172 		struct btrfs_qgroup *qgroup;
3173 		u64 qgroupid = inherit->qgroups[i];
3174 
3175 		if (btrfs_qgroup_level(qgroupid) == 0)
3176 			return -EINVAL;
3177 
3178 		spin_lock(&fs_info->qgroup_lock);
3179 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3180 		if (!qgroup) {
3181 			spin_unlock(&fs_info->qgroup_lock);
3182 			return -ENOENT;
3183 		}
3184 		spin_unlock(&fs_info->qgroup_lock);
3185 	}
3186 	return 0;
3187 }
3188 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3189 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3190 			       u64 inode_rootid,
3191 			       struct btrfs_qgroup_inherit **inherit)
3192 {
3193 	int i = 0;
3194 	u64 num_qgroups = 0;
3195 	struct btrfs_qgroup *inode_qg;
3196 	struct btrfs_qgroup_list *qg_list;
3197 	struct btrfs_qgroup_inherit *res;
3198 	size_t struct_sz;
3199 	u64 *qgids;
3200 
3201 	if (*inherit)
3202 		return -EEXIST;
3203 
3204 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3205 	if (!inode_qg)
3206 		return -ENOENT;
3207 
3208 	num_qgroups = list_count_nodes(&inode_qg->groups);
3209 
3210 	if (!num_qgroups)
3211 		return 0;
3212 
3213 	struct_sz = struct_size(res, qgroups, num_qgroups);
3214 	if (struct_sz == SIZE_MAX)
3215 		return -ERANGE;
3216 
3217 	res = kzalloc(struct_sz, GFP_NOFS);
3218 	if (!res)
3219 		return -ENOMEM;
3220 	res->num_qgroups = num_qgroups;
3221 	qgids = res->qgroups;
3222 
3223 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3224 		qgids[i++] = qg_list->group->qgroupid;
3225 
3226 	*inherit = res;
3227 	return 0;
3228 }
3229 
3230 /*
3231  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3232  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3233  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3234  *
3235  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3236  * Return 0 if a quick inherit is done.
3237  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3238  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3239 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3240 					 u64 srcid, u64 parentid)
3241 {
3242 	struct btrfs_qgroup *src;
3243 	struct btrfs_qgroup *parent;
3244 	struct btrfs_qgroup_list *list;
3245 	int nr_parents = 0;
3246 
3247 	src = find_qgroup_rb(fs_info, srcid);
3248 	if (!src)
3249 		return -ENOENT;
3250 	parent = find_qgroup_rb(fs_info, parentid);
3251 	if (!parent)
3252 		return -ENOENT;
3253 
3254 	/*
3255 	 * Source has no parent qgroup, but our new qgroup would have one.
3256 	 * Qgroup numbers would become inconsistent.
3257 	 */
3258 	if (list_empty(&src->groups))
3259 		return 1;
3260 
3261 	list_for_each_entry(list, &src->groups, next_group) {
3262 		/* The parent is not the same, quick update is not possible. */
3263 		if (list->group->qgroupid != parentid)
3264 			return 1;
3265 		nr_parents++;
3266 		/*
3267 		 * More than one parent qgroup, we can't be sure about accounting
3268 		 * consistency.
3269 		 */
3270 		if (nr_parents > 1)
3271 			return 1;
3272 	}
3273 
3274 	/*
3275 	 * The parent is not exclusively owning all its bytes.  We're not sure
3276 	 * if the source has any bytes not fully owned by the parent.
3277 	 */
3278 	if (parent->excl != parent->rfer)
3279 		return 1;
3280 
3281 	parent->excl += fs_info->nodesize;
3282 	parent->rfer += fs_info->nodesize;
3283 	return 0;
3284 }
3285 
3286 /*
3287  * Copy the accounting information between qgroups. This is necessary
3288  * when a snapshot or a subvolume is created. Throwing an error will
3289  * cause a transaction abort so we take extra care here to only error
3290  * when a readonly fs is a reasonable outcome.
3291  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3292 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3293 			 u64 objectid, u64 inode_rootid,
3294 			 struct btrfs_qgroup_inherit *inherit)
3295 {
3296 	int ret = 0;
3297 	u64 *i_qgroups;
3298 	bool committing = false;
3299 	struct btrfs_fs_info *fs_info = trans->fs_info;
3300 	struct btrfs_root *quota_root;
3301 	struct btrfs_qgroup *srcgroup;
3302 	struct btrfs_qgroup *dstgroup;
3303 	struct btrfs_qgroup *prealloc;
3304 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3305 	bool free_inherit = false;
3306 	bool need_rescan = false;
3307 	u32 level_size = 0;
3308 	u64 nums;
3309 
3310 	if (!btrfs_qgroup_enabled(fs_info))
3311 		return 0;
3312 
3313 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3314 	if (!prealloc)
3315 		return -ENOMEM;
3316 
3317 	/*
3318 	 * There are only two callers of this function.
3319 	 *
3320 	 * One in create_subvol() in the ioctl context, which needs to hold
3321 	 * the qgroup_ioctl_lock.
3322 	 *
3323 	 * The other one in create_pending_snapshot() where no other qgroup
3324 	 * code can modify the fs as they all need to either start a new trans
3325 	 * or hold a trans handler, thus we don't need to hold
3326 	 * qgroup_ioctl_lock.
3327 	 * This would avoid long and complex lock chain and make lockdep happy.
3328 	 */
3329 	spin_lock(&fs_info->trans_lock);
3330 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3331 		committing = true;
3332 	spin_unlock(&fs_info->trans_lock);
3333 
3334 	if (!committing)
3335 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3336 
3337 	quota_root = fs_info->quota_root;
3338 	if (!quota_root) {
3339 		ret = -EINVAL;
3340 		goto out;
3341 	}
3342 
3343 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3344 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3345 		if (ret)
3346 			goto out;
3347 		free_inherit = true;
3348 	}
3349 
3350 	if (inherit) {
3351 		i_qgroups = (u64 *)(inherit + 1);
3352 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3353 		       2 * inherit->num_excl_copies;
3354 		for (int i = 0; i < nums; i++) {
3355 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3356 
3357 			/*
3358 			 * Zero out invalid groups so we can ignore
3359 			 * them later.
3360 			 */
3361 			if (!srcgroup ||
3362 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3363 				*i_qgroups = 0ULL;
3364 
3365 			++i_qgroups;
3366 		}
3367 	}
3368 
3369 	/*
3370 	 * create a tracking group for the subvol itself
3371 	 */
3372 	ret = add_qgroup_item(trans, quota_root, objectid);
3373 	if (ret)
3374 		goto out;
3375 
3376 	/*
3377 	 * add qgroup to all inherited groups
3378 	 */
3379 	if (inherit) {
3380 		i_qgroups = (u64 *)(inherit + 1);
3381 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3382 			if (*i_qgroups == 0)
3383 				continue;
3384 			ret = add_qgroup_relation_item(trans, objectid,
3385 						       *i_qgroups);
3386 			if (ret && ret != -EEXIST)
3387 				goto out;
3388 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3389 						       objectid);
3390 			if (ret && ret != -EEXIST)
3391 				goto out;
3392 		}
3393 		ret = 0;
3394 
3395 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3396 					 sizeof(struct btrfs_qgroup_list *),
3397 					 GFP_NOFS);
3398 		if (!qlist_prealloc) {
3399 			ret = -ENOMEM;
3400 			goto out;
3401 		}
3402 		for (int i = 0; i < inherit->num_qgroups; i++) {
3403 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3404 						    GFP_NOFS);
3405 			if (!qlist_prealloc[i]) {
3406 				ret = -ENOMEM;
3407 				goto out;
3408 			}
3409 		}
3410 	}
3411 
3412 	spin_lock(&fs_info->qgroup_lock);
3413 
3414 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3415 	prealloc = NULL;
3416 
3417 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3418 		dstgroup->lim_flags = inherit->lim.flags;
3419 		dstgroup->max_rfer = inherit->lim.max_rfer;
3420 		dstgroup->max_excl = inherit->lim.max_excl;
3421 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3422 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3423 
3424 		qgroup_dirty(fs_info, dstgroup);
3425 	}
3426 
3427 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3428 		srcgroup = find_qgroup_rb(fs_info, srcid);
3429 		if (!srcgroup)
3430 			goto unlock;
3431 
3432 		/*
3433 		 * We call inherit after we clone the root in order to make sure
3434 		 * our counts don't go crazy, so at this point the only
3435 		 * difference between the two roots should be the root node.
3436 		 */
3437 		level_size = fs_info->nodesize;
3438 		dstgroup->rfer = srcgroup->rfer;
3439 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3440 		dstgroup->excl = level_size;
3441 		dstgroup->excl_cmpr = level_size;
3442 		srcgroup->excl = level_size;
3443 		srcgroup->excl_cmpr = level_size;
3444 
3445 		/* inherit the limit info */
3446 		dstgroup->lim_flags = srcgroup->lim_flags;
3447 		dstgroup->max_rfer = srcgroup->max_rfer;
3448 		dstgroup->max_excl = srcgroup->max_excl;
3449 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3450 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3451 
3452 		qgroup_dirty(fs_info, dstgroup);
3453 		qgroup_dirty(fs_info, srcgroup);
3454 
3455 		/*
3456 		 * If the source qgroup has parent but the new one doesn't,
3457 		 * we need a full rescan.
3458 		 */
3459 		if (!inherit && !list_empty(&srcgroup->groups))
3460 			need_rescan = true;
3461 	}
3462 
3463 	if (!inherit)
3464 		goto unlock;
3465 
3466 	i_qgroups = (u64 *)(inherit + 1);
3467 	for (int i = 0; i < inherit->num_qgroups; i++) {
3468 		if (*i_qgroups) {
3469 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3470 					      *i_qgroups);
3471 			qlist_prealloc[i] = NULL;
3472 			if (ret)
3473 				goto unlock;
3474 		}
3475 		if (srcid) {
3476 			/* Check if we can do a quick inherit. */
3477 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3478 			if (ret < 0)
3479 				goto unlock;
3480 			if (ret > 0)
3481 				need_rescan = true;
3482 			ret = 0;
3483 		}
3484 		++i_qgroups;
3485 	}
3486 
3487 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3488 		struct btrfs_qgroup *src;
3489 		struct btrfs_qgroup *dst;
3490 
3491 		if (!i_qgroups[0] || !i_qgroups[1])
3492 			continue;
3493 
3494 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3495 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3496 
3497 		if (!src || !dst) {
3498 			ret = -EINVAL;
3499 			goto unlock;
3500 		}
3501 
3502 		dst->rfer = src->rfer - level_size;
3503 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3504 
3505 		/* Manually tweaking numbers certainly needs a rescan */
3506 		need_rescan = true;
3507 	}
3508 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3509 		struct btrfs_qgroup *src;
3510 		struct btrfs_qgroup *dst;
3511 
3512 		if (!i_qgroups[0] || !i_qgroups[1])
3513 			continue;
3514 
3515 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3516 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3517 
3518 		if (!src || !dst) {
3519 			ret = -EINVAL;
3520 			goto unlock;
3521 		}
3522 
3523 		dst->excl = src->excl + level_size;
3524 		dst->excl_cmpr = src->excl_cmpr + level_size;
3525 		need_rescan = true;
3526 	}
3527 
3528 unlock:
3529 	spin_unlock(&fs_info->qgroup_lock);
3530 	if (!ret)
3531 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3532 out:
3533 	if (!committing)
3534 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3535 	if (need_rescan)
3536 		qgroup_mark_inconsistent(fs_info, "qgroup inherit needs a rescan");
3537 	if (qlist_prealloc) {
3538 		for (int i = 0; i < inherit->num_qgroups; i++)
3539 			kfree(qlist_prealloc[i]);
3540 		kfree(qlist_prealloc);
3541 	}
3542 	if (free_inherit)
3543 		kfree(inherit);
3544 	kfree(prealloc);
3545 	return ret;
3546 }
3547 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3548 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3549 {
3550 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3551 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3552 		return false;
3553 
3554 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3555 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3556 		return false;
3557 
3558 	return true;
3559 }
3560 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3561 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3562 			  enum btrfs_qgroup_rsv_type type)
3563 {
3564 	struct btrfs_qgroup *qgroup;
3565 	struct btrfs_fs_info *fs_info = root->fs_info;
3566 	u64 ref_root = btrfs_root_id(root);
3567 	int ret = 0;
3568 	LIST_HEAD(qgroup_list);
3569 
3570 	if (!btrfs_is_fstree(ref_root))
3571 		return 0;
3572 
3573 	if (num_bytes == 0)
3574 		return 0;
3575 
3576 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3577 	    capable(CAP_SYS_RESOURCE))
3578 		enforce = false;
3579 
3580 	spin_lock(&fs_info->qgroup_lock);
3581 	if (!fs_info->quota_root)
3582 		goto out;
3583 
3584 	qgroup = find_qgroup_rb(fs_info, ref_root);
3585 	if (!qgroup)
3586 		goto out;
3587 
3588 	qgroup_iterator_add(&qgroup_list, qgroup);
3589 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3590 		struct btrfs_qgroup_list *glist;
3591 
3592 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3593 			ret = -EDQUOT;
3594 			goto out;
3595 		}
3596 
3597 		list_for_each_entry(glist, &qgroup->groups, next_group)
3598 			qgroup_iterator_add(&qgroup_list, glist->group);
3599 	}
3600 
3601 	ret = 0;
3602 	/*
3603 	 * no limits exceeded, now record the reservation into all qgroups
3604 	 */
3605 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3606 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3607 
3608 out:
3609 	qgroup_iterator_clean(&qgroup_list);
3610 	spin_unlock(&fs_info->qgroup_lock);
3611 	return ret;
3612 }
3613 
3614 /*
3615  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3616  * qgroup).
3617  *
3618  * Will handle all higher level qgroup too.
3619  *
3620  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3621  * This special case is only used for META_PERTRANS type.
3622  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3623 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3624 			       u64 ref_root, u64 num_bytes,
3625 			       enum btrfs_qgroup_rsv_type type)
3626 {
3627 	struct btrfs_qgroup *qgroup;
3628 	LIST_HEAD(qgroup_list);
3629 
3630 	if (!btrfs_is_fstree(ref_root))
3631 		return;
3632 
3633 	if (num_bytes == 0)
3634 		return;
3635 
3636 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3637 		WARN(1, "%s: Invalid type to free", __func__);
3638 		return;
3639 	}
3640 	spin_lock(&fs_info->qgroup_lock);
3641 
3642 	if (!fs_info->quota_root)
3643 		goto out;
3644 
3645 	qgroup = find_qgroup_rb(fs_info, ref_root);
3646 	if (!qgroup)
3647 		goto out;
3648 
3649 	if (num_bytes == (u64)-1)
3650 		/*
3651 		 * We're freeing all pertrans rsv, get reserved value from
3652 		 * level 0 qgroup as real num_bytes to free.
3653 		 */
3654 		num_bytes = qgroup->rsv.values[type];
3655 
3656 	qgroup_iterator_add(&qgroup_list, qgroup);
3657 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3658 		struct btrfs_qgroup_list *glist;
3659 
3660 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3661 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3662 			qgroup_iterator_add(&qgroup_list, glist->group);
3663 		}
3664 	}
3665 out:
3666 	qgroup_iterator_clean(&qgroup_list);
3667 	spin_unlock(&fs_info->qgroup_lock);
3668 }
3669 
3670 /*
3671  * Check if the leaf is the last leaf. Which means all node pointers
3672  * are at their last position.
3673  */
is_last_leaf(struct btrfs_path * path)3674 static bool is_last_leaf(struct btrfs_path *path)
3675 {
3676 	int i;
3677 
3678 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3679 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3680 			return false;
3681 	}
3682 	return true;
3683 }
3684 
3685 /*
3686  * returns < 0 on error, 0 when more leafs are to be scanned.
3687  * returns 1 when done.
3688  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3689 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3690 			      struct btrfs_path *path)
3691 {
3692 	struct btrfs_fs_info *fs_info = trans->fs_info;
3693 	struct btrfs_root *extent_root;
3694 	struct btrfs_key found;
3695 	struct extent_buffer *scratch_leaf = NULL;
3696 	u64 num_bytes;
3697 	bool done;
3698 	int slot;
3699 	int ret;
3700 
3701 	if (!btrfs_qgroup_full_accounting(fs_info))
3702 		return 1;
3703 
3704 	mutex_lock(&fs_info->qgroup_rescan_lock);
3705 	extent_root = btrfs_extent_root(fs_info,
3706 				fs_info->qgroup_rescan_progress.objectid);
3707 	ret = btrfs_search_slot_for_read(extent_root,
3708 					 &fs_info->qgroup_rescan_progress,
3709 					 path, 1, 0);
3710 
3711 	btrfs_debug(fs_info,
3712 		"current progress key (%llu %u %llu), search_slot ret %d",
3713 		fs_info->qgroup_rescan_progress.objectid,
3714 		fs_info->qgroup_rescan_progress.type,
3715 		fs_info->qgroup_rescan_progress.offset, ret);
3716 
3717 	if (ret) {
3718 		/*
3719 		 * The rescan is about to end, we will not be scanning any
3720 		 * further blocks. We cannot unset the RESCAN flag here, because
3721 		 * we want to commit the transaction if everything went well.
3722 		 * To make the live accounting work in this phase, we set our
3723 		 * scan progress pointer such that every real extent objectid
3724 		 * will be smaller.
3725 		 */
3726 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3727 		btrfs_release_path(path);
3728 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3729 		return ret;
3730 	}
3731 	done = is_last_leaf(path);
3732 
3733 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3734 			      btrfs_header_nritems(path->nodes[0]) - 1);
3735 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3736 
3737 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3738 	if (!scratch_leaf) {
3739 		ret = -ENOMEM;
3740 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3741 		goto out;
3742 	}
3743 	slot = path->slots[0];
3744 	btrfs_release_path(path);
3745 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3746 
3747 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3748 		struct btrfs_backref_walk_ctx ctx = { 0 };
3749 
3750 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3751 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3752 		    found.type != BTRFS_METADATA_ITEM_KEY)
3753 			continue;
3754 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3755 			num_bytes = fs_info->nodesize;
3756 		else
3757 			num_bytes = found.offset;
3758 
3759 		ctx.bytenr = found.objectid;
3760 		ctx.fs_info = fs_info;
3761 
3762 		ret = btrfs_find_all_roots(&ctx, false);
3763 		if (ret < 0)
3764 			goto out;
3765 		/* For rescan, just pass old_roots as NULL */
3766 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3767 						  num_bytes, NULL, ctx.roots);
3768 		if (ret < 0)
3769 			goto out;
3770 	}
3771 out:
3772 	if (scratch_leaf)
3773 		free_extent_buffer(scratch_leaf);
3774 
3775 	if (done && !ret) {
3776 		ret = 1;
3777 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3778 	}
3779 	return ret;
3780 }
3781 
rescan_should_stop(struct btrfs_fs_info * fs_info)3782 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3783 {
3784 	if (btrfs_fs_closing(fs_info))
3785 		return true;
3786 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3787 		return true;
3788 	if (!btrfs_qgroup_enabled(fs_info))
3789 		return true;
3790 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3791 		return true;
3792 	return false;
3793 }
3794 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3795 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3796 {
3797 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3798 						     qgroup_rescan_work);
3799 	struct btrfs_path *path;
3800 	struct btrfs_trans_handle *trans = NULL;
3801 	int ret = 0;
3802 	bool stopped = false;
3803 	bool did_leaf_rescans = false;
3804 
3805 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3806 		return;
3807 
3808 	path = btrfs_alloc_path();
3809 	if (!path) {
3810 		ret = -ENOMEM;
3811 		goto out;
3812 	}
3813 	/*
3814 	 * Rescan should only search for commit root, and any later difference
3815 	 * should be recorded by qgroup
3816 	 */
3817 	path->search_commit_root = 1;
3818 	path->skip_locking = 1;
3819 
3820 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3821 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3822 		if (IS_ERR(trans)) {
3823 			ret = PTR_ERR(trans);
3824 			break;
3825 		}
3826 
3827 		ret = qgroup_rescan_leaf(trans, path);
3828 		did_leaf_rescans = true;
3829 
3830 		if (ret > 0)
3831 			btrfs_commit_transaction(trans);
3832 		else
3833 			btrfs_end_transaction(trans);
3834 	}
3835 
3836 out:
3837 	btrfs_free_path(path);
3838 
3839 	mutex_lock(&fs_info->qgroup_rescan_lock);
3840 	if (ret > 0 &&
3841 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3842 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3843 	} else if (ret < 0 || stopped) {
3844 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3845 	}
3846 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3847 
3848 	/*
3849 	 * Only update status, since the previous part has already updated the
3850 	 * qgroup info, and only if we did any actual work. This also prevents
3851 	 * race with a concurrent quota disable, which has already set
3852 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3853 	 * btrfs_quota_disable().
3854 	 */
3855 	if (did_leaf_rescans) {
3856 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3857 		if (IS_ERR(trans)) {
3858 			ret = PTR_ERR(trans);
3859 			trans = NULL;
3860 			btrfs_err(fs_info,
3861 				  "fail to start transaction for status update: %d",
3862 				  ret);
3863 		}
3864 	} else {
3865 		trans = NULL;
3866 	}
3867 
3868 	mutex_lock(&fs_info->qgroup_rescan_lock);
3869 	if (!stopped ||
3870 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3871 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3872 	if (trans) {
3873 		int ret2 = update_qgroup_status_item(trans);
3874 
3875 		if (ret2 < 0) {
3876 			ret = ret2;
3877 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3878 		}
3879 	}
3880 	fs_info->qgroup_rescan_running = false;
3881 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3882 	complete_all(&fs_info->qgroup_rescan_completion);
3883 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3884 
3885 	if (!trans)
3886 		return;
3887 
3888 	btrfs_end_transaction(trans);
3889 
3890 	if (stopped) {
3891 		btrfs_info(fs_info, "qgroup scan paused");
3892 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3893 		btrfs_info(fs_info, "qgroup scan cancelled");
3894 	} else if (ret >= 0) {
3895 		btrfs_info(fs_info, "qgroup scan completed%s",
3896 			ret > 0 ? " (inconsistency flag cleared)" : "");
3897 	} else {
3898 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3899 	}
3900 }
3901 
3902 /*
3903  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3904  * memory required for the rescan context.
3905  */
3906 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3907 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3908 		   int init_flags)
3909 {
3910 	int ret = 0;
3911 
3912 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3913 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3914 		return -EINVAL;
3915 	}
3916 
3917 	if (!init_flags) {
3918 		/* we're resuming qgroup rescan at mount time */
3919 		if (!(fs_info->qgroup_flags &
3920 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3921 			btrfs_debug(fs_info,
3922 			"qgroup rescan init failed, qgroup rescan is not queued");
3923 			ret = -EINVAL;
3924 		} else if (!(fs_info->qgroup_flags &
3925 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3926 			btrfs_debug(fs_info,
3927 			"qgroup rescan init failed, qgroup is not enabled");
3928 			ret = -ENOTCONN;
3929 		}
3930 
3931 		if (ret)
3932 			return ret;
3933 	}
3934 
3935 	mutex_lock(&fs_info->qgroup_rescan_lock);
3936 
3937 	if (init_flags) {
3938 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3939 			ret = -EINPROGRESS;
3940 		} else if (!(fs_info->qgroup_flags &
3941 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3942 			btrfs_debug(fs_info,
3943 			"qgroup rescan init failed, qgroup is not enabled");
3944 			ret = -ENOTCONN;
3945 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3946 			/* Quota disable is in progress */
3947 			ret = -EBUSY;
3948 		}
3949 
3950 		if (ret) {
3951 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3952 			return ret;
3953 		}
3954 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3955 	}
3956 
3957 	memset(&fs_info->qgroup_rescan_progress, 0,
3958 		sizeof(fs_info->qgroup_rescan_progress));
3959 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3960 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3961 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3962 	init_completion(&fs_info->qgroup_rescan_completion);
3963 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3964 
3965 	btrfs_init_work(&fs_info->qgroup_rescan_work,
3966 			btrfs_qgroup_rescan_worker, NULL);
3967 	return 0;
3968 }
3969 
3970 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)3971 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3972 {
3973 	struct rb_node *n;
3974 	struct btrfs_qgroup *qgroup;
3975 
3976 	spin_lock(&fs_info->qgroup_lock);
3977 	/* clear all current qgroup tracking information */
3978 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3979 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
3980 		qgroup->rfer = 0;
3981 		qgroup->rfer_cmpr = 0;
3982 		qgroup->excl = 0;
3983 		qgroup->excl_cmpr = 0;
3984 		qgroup_dirty(fs_info, qgroup);
3985 	}
3986 	spin_unlock(&fs_info->qgroup_lock);
3987 }
3988 
3989 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)3990 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3991 {
3992 	int ret = 0;
3993 
3994 	ret = qgroup_rescan_init(fs_info, 0, 1);
3995 	if (ret)
3996 		return ret;
3997 
3998 	/*
3999 	 * We have set the rescan_progress to 0, which means no more
4000 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4001 	 * However, btrfs_qgroup_account_ref may be right after its call
4002 	 * to btrfs_find_all_roots, in which case it would still do the
4003 	 * accounting.
4004 	 * To solve this, we're committing the transaction, which will
4005 	 * ensure we run all delayed refs and only after that, we are
4006 	 * going to clear all tracking information for a clean start.
4007 	 */
4008 
4009 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4010 	if (ret) {
4011 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4012 		return ret;
4013 	}
4014 
4015 	qgroup_rescan_zero_tracking(fs_info);
4016 
4017 	mutex_lock(&fs_info->qgroup_rescan_lock);
4018 	/*
4019 	 * The rescan worker is only for full accounting qgroups, check if it's
4020 	 * enabled as it is pointless to queue it otherwise. A concurrent quota
4021 	 * disable may also have just cleared BTRFS_FS_QUOTA_ENABLED.
4022 	 */
4023 	if (btrfs_qgroup_full_accounting(fs_info)) {
4024 		fs_info->qgroup_rescan_running = true;
4025 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4026 				 &fs_info->qgroup_rescan_work);
4027 	} else {
4028 		ret = -ENOTCONN;
4029 	}
4030 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4031 
4032 	return ret;
4033 }
4034 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4035 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4036 				     bool interruptible)
4037 {
4038 	int running;
4039 	int ret = 0;
4040 
4041 	mutex_lock(&fs_info->qgroup_rescan_lock);
4042 	running = fs_info->qgroup_rescan_running;
4043 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4044 
4045 	if (!running)
4046 		return 0;
4047 
4048 	if (interruptible)
4049 		ret = wait_for_completion_interruptible(
4050 					&fs_info->qgroup_rescan_completion);
4051 	else
4052 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4053 
4054 	return ret;
4055 }
4056 
4057 /*
4058  * this is only called from open_ctree where we're still single threaded, thus
4059  * locking is omitted here.
4060  */
4061 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4062 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4063 {
4064 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4065 		mutex_lock(&fs_info->qgroup_rescan_lock);
4066 		fs_info->qgroup_rescan_running = true;
4067 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4068 				 &fs_info->qgroup_rescan_work);
4069 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4070 	}
4071 }
4072 
4073 #define rbtree_iterate_from_safe(node, next, start)				\
4074        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4075 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4076 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4077 				  struct extent_changeset *reserved, u64 start,
4078 				  u64 len)
4079 {
4080 	struct rb_node *node;
4081 	struct rb_node *next;
4082 	struct ulist_node *entry;
4083 	int ret = 0;
4084 
4085 	node = reserved->range_changed.root.rb_node;
4086 	if (!node)
4087 		return 0;
4088 	while (node) {
4089 		entry = rb_entry(node, struct ulist_node, rb_node);
4090 		if (entry->val < start)
4091 			node = node->rb_right;
4092 		else
4093 			node = node->rb_left;
4094 	}
4095 
4096 	if (entry->val > start && rb_prev(&entry->rb_node))
4097 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4098 				 rb_node);
4099 
4100 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4101 		u64 entry_start;
4102 		u64 entry_end;
4103 		u64 entry_len;
4104 		int clear_ret;
4105 
4106 		entry = rb_entry(node, struct ulist_node, rb_node);
4107 		entry_start = entry->val;
4108 		entry_end = entry->aux;
4109 		entry_len = entry_end - entry_start + 1;
4110 
4111 		if (entry_start >= start + len)
4112 			break;
4113 		if (entry_start + entry_len <= start)
4114 			continue;
4115 		/*
4116 		 * Now the entry is in [start, start + len), revert the
4117 		 * EXTENT_QGROUP_RESERVED bit.
4118 		 */
4119 		clear_ret = btrfs_clear_extent_bit(&inode->io_tree, entry_start, entry_end,
4120 						   EXTENT_QGROUP_RESERVED, NULL);
4121 		if (!ret && clear_ret < 0)
4122 			ret = clear_ret;
4123 
4124 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4125 		if (likely(reserved->bytes_changed >= entry_len)) {
4126 			reserved->bytes_changed -= entry_len;
4127 		} else {
4128 			WARN_ON(1);
4129 			reserved->bytes_changed = 0;
4130 		}
4131 	}
4132 
4133 	return ret;
4134 }
4135 
4136 /*
4137  * Try to free some space for qgroup.
4138  *
4139  * For qgroup, there are only 3 ways to free qgroup space:
4140  * - Flush nodatacow write
4141  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4142  *   In theory, we should only flush nodatacow inodes, but it's not yet
4143  *   possible, so we need to flush the whole root.
4144  *
4145  * - Wait for ordered extents
4146  *   When ordered extents are finished, their reserved metadata is finally
4147  *   converted to per_trans status, which can be freed by later commit
4148  *   transaction.
4149  *
4150  * - Commit transaction
4151  *   This would free the meta_per_trans space.
4152  *   In theory this shouldn't provide much space, but any more qgroup space
4153  *   is needed.
4154  */
try_flush_qgroup(struct btrfs_root * root)4155 static int try_flush_qgroup(struct btrfs_root *root)
4156 {
4157 	int ret;
4158 
4159 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4160 	ASSERT(current->journal_info == NULL);
4161 	if (WARN_ON(current->journal_info))
4162 		return 0;
4163 
4164 	/*
4165 	 * We don't want to run flush again and again, so if there is a running
4166 	 * one, we won't try to start a new flush, but exit directly.
4167 	 */
4168 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4169 		wait_event(root->qgroup_flush_wait,
4170 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4171 		return 0;
4172 	}
4173 
4174 	ret = btrfs_start_delalloc_snapshot(root, true);
4175 	if (ret < 0)
4176 		goto out;
4177 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4178 
4179 	/*
4180 	 * After waiting for ordered extents run delayed iputs in order to free
4181 	 * space from unlinked files before committing the current transaction,
4182 	 * as ordered extents may have been holding the last reference of an
4183 	 * inode and they add a delayed iput when they complete.
4184 	 */
4185 	btrfs_run_delayed_iputs(root->fs_info);
4186 	btrfs_wait_on_delayed_iputs(root->fs_info);
4187 
4188 	ret = btrfs_commit_current_transaction(root);
4189 out:
4190 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4191 	wake_up(&root->qgroup_flush_wait);
4192 	return ret;
4193 }
4194 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4195 static int qgroup_reserve_data(struct btrfs_inode *inode,
4196 			struct extent_changeset **reserved_ret, u64 start,
4197 			u64 len)
4198 {
4199 	struct btrfs_root *root = inode->root;
4200 	struct extent_changeset *reserved;
4201 	bool new_reserved = false;
4202 	u64 orig_reserved;
4203 	u64 to_reserve;
4204 	int ret;
4205 
4206 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4207 	    !btrfs_is_fstree(btrfs_root_id(root)) || len == 0)
4208 		return 0;
4209 
4210 	/* @reserved parameter is mandatory for qgroup */
4211 	if (WARN_ON(!reserved_ret))
4212 		return -EINVAL;
4213 	if (!*reserved_ret) {
4214 		new_reserved = true;
4215 		*reserved_ret = extent_changeset_alloc();
4216 		if (!*reserved_ret)
4217 			return -ENOMEM;
4218 	}
4219 	reserved = *reserved_ret;
4220 	/* Record already reserved space */
4221 	orig_reserved = reserved->bytes_changed;
4222 	ret = btrfs_set_record_extent_bits(&inode->io_tree, start,
4223 					   start + len - 1, EXTENT_QGROUP_RESERVED,
4224 					   reserved);
4225 
4226 	/* Newly reserved space */
4227 	to_reserve = reserved->bytes_changed - orig_reserved;
4228 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4229 					to_reserve, QGROUP_RESERVE);
4230 	if (ret < 0)
4231 		goto out;
4232 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4233 	if (ret < 0)
4234 		goto cleanup;
4235 
4236 	return ret;
4237 
4238 cleanup:
4239 	qgroup_unreserve_range(inode, reserved, start, len);
4240 out:
4241 	if (new_reserved) {
4242 		extent_changeset_free(reserved);
4243 		*reserved_ret = NULL;
4244 	}
4245 	return ret;
4246 }
4247 
4248 /*
4249  * Reserve qgroup space for range [start, start + len).
4250  *
4251  * This function will either reserve space from related qgroups or do nothing
4252  * if the range is already reserved.
4253  *
4254  * Return 0 for successful reservation
4255  * Return <0 for error (including -EQUOT)
4256  *
4257  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4258  *	 commit transaction. So caller should not hold any dirty page locked.
4259  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4260 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4261 			struct extent_changeset **reserved_ret, u64 start,
4262 			u64 len)
4263 {
4264 	int ret;
4265 
4266 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4267 	if (ret <= 0 && ret != -EDQUOT)
4268 		return ret;
4269 
4270 	ret = try_flush_qgroup(inode->root);
4271 	if (ret < 0)
4272 		return ret;
4273 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4274 }
4275 
4276 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4277 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4278 				     struct extent_changeset *reserved,
4279 				     u64 start, u64 len, u64 *freed_ret)
4280 {
4281 	struct btrfs_root *root = inode->root;
4282 	struct ulist_node *unode;
4283 	struct ulist_iterator uiter;
4284 	struct extent_changeset changeset;
4285 	u64 freed = 0;
4286 	int ret;
4287 
4288 	extent_changeset_init(&changeset);
4289 	len = round_up(start + len, root->fs_info->sectorsize);
4290 	start = round_down(start, root->fs_info->sectorsize);
4291 
4292 	ULIST_ITER_INIT(&uiter);
4293 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4294 		u64 range_start = unode->val;
4295 		/* unode->aux is the inclusive end */
4296 		u64 range_len = unode->aux - range_start + 1;
4297 		u64 free_start;
4298 		u64 free_len;
4299 
4300 		extent_changeset_release(&changeset);
4301 
4302 		/* Only free range in range [start, start + len) */
4303 		if (range_start >= start + len ||
4304 		    range_start + range_len <= start)
4305 			continue;
4306 		free_start = max(range_start, start);
4307 		free_len = min(start + len, range_start + range_len) -
4308 			   free_start;
4309 		/*
4310 		 * TODO: To also modify reserved->ranges_reserved to reflect
4311 		 * the modification.
4312 		 *
4313 		 * However as long as we free qgroup reserved according to
4314 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4315 		 * So not need to rush.
4316 		 */
4317 		ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start,
4318 						     free_start + free_len - 1,
4319 						     EXTENT_QGROUP_RESERVED,
4320 						     &changeset);
4321 		if (ret < 0)
4322 			goto out;
4323 		freed += changeset.bytes_changed;
4324 	}
4325 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4326 				  BTRFS_QGROUP_RSV_DATA);
4327 	if (freed_ret)
4328 		*freed_ret = freed;
4329 	ret = 0;
4330 out:
4331 	extent_changeset_release(&changeset);
4332 	return ret;
4333 }
4334 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4335 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4336 			struct extent_changeset *reserved, u64 start, u64 len,
4337 			u64 *released, int free)
4338 {
4339 	struct extent_changeset changeset;
4340 	int trace_op = QGROUP_RELEASE;
4341 	int ret;
4342 
4343 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4344 		return btrfs_clear_record_extent_bits(&inode->io_tree, start,
4345 						      start + len - 1,
4346 						      EXTENT_QGROUP_RESERVED, NULL);
4347 	}
4348 
4349 	/* In release case, we shouldn't have @reserved */
4350 	WARN_ON(!free && reserved);
4351 	if (free && reserved)
4352 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4353 	extent_changeset_init(&changeset);
4354 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1,
4355 					     EXTENT_QGROUP_RESERVED, &changeset);
4356 	if (ret < 0)
4357 		goto out;
4358 
4359 	if (free)
4360 		trace_op = QGROUP_FREE;
4361 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4362 					changeset.bytes_changed, trace_op);
4363 	if (free)
4364 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4365 				btrfs_root_id(inode->root),
4366 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4367 	if (released)
4368 		*released = changeset.bytes_changed;
4369 out:
4370 	extent_changeset_release(&changeset);
4371 	return ret;
4372 }
4373 
4374 /*
4375  * Free a reserved space range from io_tree and related qgroups
4376  *
4377  * Should be called when a range of pages get invalidated before reaching disk.
4378  * Or for error cleanup case.
4379  * if @reserved is given, only reserved range in [@start, @start + @len) will
4380  * be freed.
4381  *
4382  * For data written to disk, use btrfs_qgroup_release_data().
4383  *
4384  * NOTE: This function may sleep for memory allocation.
4385  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4386 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4387 			   struct extent_changeset *reserved,
4388 			   u64 start, u64 len, u64 *freed)
4389 {
4390 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4391 }
4392 
4393 /*
4394  * Release a reserved space range from io_tree only.
4395  *
4396  * Should be called when a range of pages get written to disk and corresponding
4397  * FILE_EXTENT is inserted into corresponding root.
4398  *
4399  * Since new qgroup accounting framework will only update qgroup numbers at
4400  * commit_transaction() time, its reserved space shouldn't be freed from
4401  * related qgroups.
4402  *
4403  * But we should release the range from io_tree, to allow further write to be
4404  * COWed.
4405  *
4406  * NOTE: This function may sleep for memory allocation.
4407  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4408 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4409 {
4410 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4411 }
4412 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4413 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4414 			      enum btrfs_qgroup_rsv_type type)
4415 {
4416 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4417 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4418 		return;
4419 	if (num_bytes == 0)
4420 		return;
4421 
4422 	spin_lock(&root->qgroup_meta_rsv_lock);
4423 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4424 		root->qgroup_meta_rsv_prealloc += num_bytes;
4425 	else
4426 		root->qgroup_meta_rsv_pertrans += num_bytes;
4427 	spin_unlock(&root->qgroup_meta_rsv_lock);
4428 }
4429 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4430 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4431 			     enum btrfs_qgroup_rsv_type type)
4432 {
4433 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4434 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4435 		return 0;
4436 	if (num_bytes == 0)
4437 		return 0;
4438 
4439 	spin_lock(&root->qgroup_meta_rsv_lock);
4440 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4441 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4442 				  num_bytes);
4443 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4444 	} else {
4445 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4446 				  num_bytes);
4447 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4448 	}
4449 	spin_unlock(&root->qgroup_meta_rsv_lock);
4450 	return num_bytes;
4451 }
4452 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4453 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4454 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4455 {
4456 	struct btrfs_fs_info *fs_info = root->fs_info;
4457 	int ret;
4458 
4459 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4460 	    !btrfs_is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4461 		return 0;
4462 
4463 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4464 	trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type);
4465 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4466 	if (ret < 0)
4467 		return ret;
4468 	/*
4469 	 * Record what we have reserved into root.
4470 	 *
4471 	 * To avoid quota disabled->enabled underflow.
4472 	 * In that case, we may try to free space we haven't reserved
4473 	 * (since quota was disabled), so record what we reserved into root.
4474 	 * And ensure later release won't underflow this number.
4475 	 */
4476 	add_root_meta_rsv(root, num_bytes, type);
4477 	return ret;
4478 }
4479 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4480 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4481 				enum btrfs_qgroup_rsv_type type, bool enforce,
4482 				bool noflush)
4483 {
4484 	int ret;
4485 
4486 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4487 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4488 		return ret;
4489 
4490 	ret = try_flush_qgroup(root);
4491 	if (ret < 0)
4492 		return ret;
4493 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4494 }
4495 
4496 /*
4497  * Per-transaction meta reservation should be all freed at transaction commit
4498  * time
4499  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4500 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4501 {
4502 	struct btrfs_fs_info *fs_info = root->fs_info;
4503 
4504 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4505 	    !btrfs_is_fstree(btrfs_root_id(root)))
4506 		return;
4507 
4508 	/* TODO: Update trace point to handle such free */
4509 	trace_btrfs_qgroup_meta_free_all_pertrans(root);
4510 	/* Special value -1 means to free all reserved space */
4511 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4512 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4513 }
4514 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4515 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4516 			      enum btrfs_qgroup_rsv_type type)
4517 {
4518 	struct btrfs_fs_info *fs_info = root->fs_info;
4519 
4520 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4521 	    !btrfs_is_fstree(btrfs_root_id(root)))
4522 		return;
4523 
4524 	/*
4525 	 * reservation for META_PREALLOC can happen before quota is enabled,
4526 	 * which can lead to underflow.
4527 	 * Here ensure we will only free what we really have reserved.
4528 	 */
4529 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4530 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4531 	trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4532 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4533 }
4534 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4535 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4536 				int num_bytes)
4537 {
4538 	struct btrfs_qgroup *qgroup;
4539 	LIST_HEAD(qgroup_list);
4540 
4541 	if (num_bytes == 0)
4542 		return;
4543 	if (!fs_info->quota_root)
4544 		return;
4545 
4546 	spin_lock(&fs_info->qgroup_lock);
4547 	qgroup = find_qgroup_rb(fs_info, ref_root);
4548 	if (!qgroup)
4549 		goto out;
4550 
4551 	qgroup_iterator_add(&qgroup_list, qgroup);
4552 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4553 		struct btrfs_qgroup_list *glist;
4554 
4555 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4556 				BTRFS_QGROUP_RSV_META_PREALLOC);
4557 		if (!sb_rdonly(fs_info->sb))
4558 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4559 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4560 
4561 		list_for_each_entry(glist, &qgroup->groups, next_group)
4562 			qgroup_iterator_add(&qgroup_list, glist->group);
4563 	}
4564 out:
4565 	qgroup_iterator_clean(&qgroup_list);
4566 	spin_unlock(&fs_info->qgroup_lock);
4567 }
4568 
4569 /*
4570  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4571  *
4572  * This is called when preallocated meta reservation needs to be used.
4573  * Normally after btrfs_join_transaction() call.
4574  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4575 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4576 {
4577 	struct btrfs_fs_info *fs_info = root->fs_info;
4578 
4579 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4580 	    !btrfs_is_fstree(btrfs_root_id(root)))
4581 		return;
4582 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4583 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4584 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4585 	trace_btrfs_qgroup_meta_convert(root, num_bytes);
4586 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4587 	if (!sb_rdonly(fs_info->sb))
4588 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4589 }
4590 
4591 /*
4592  * Check qgroup reserved space leaking, normally at destroy inode
4593  * time
4594  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4595 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4596 {
4597 	struct extent_changeset changeset;
4598 	struct ulist_node *unode;
4599 	struct ulist_iterator iter;
4600 	int ret;
4601 
4602 	extent_changeset_init(&changeset);
4603 	ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4604 					     EXTENT_QGROUP_RESERVED, &changeset);
4605 
4606 	WARN_ON(ret < 0);
4607 	if (WARN_ON(changeset.bytes_changed)) {
4608 		ULIST_ITER_INIT(&iter);
4609 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4610 			btrfs_warn(inode->root->fs_info,
4611 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4612 				btrfs_ino(inode), unode->val, unode->aux);
4613 		}
4614 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4615 				btrfs_root_id(inode->root),
4616 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4617 
4618 	}
4619 	extent_changeset_release(&changeset);
4620 }
4621 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4622 void btrfs_qgroup_init_swapped_blocks(
4623 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4624 {
4625 	int i;
4626 
4627 	spin_lock_init(&swapped_blocks->lock);
4628 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4629 		swapped_blocks->blocks[i] = RB_ROOT;
4630 	swapped_blocks->swapped = false;
4631 }
4632 
4633 /*
4634  * Delete all swapped blocks record of @root.
4635  * Every record here means we skipped a full subtree scan for qgroup.
4636  *
4637  * Gets called when committing one transaction.
4638  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4639 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4640 {
4641 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4642 	int i;
4643 
4644 	swapped_blocks = &root->swapped_blocks;
4645 
4646 	spin_lock(&swapped_blocks->lock);
4647 	if (!swapped_blocks->swapped)
4648 		goto out;
4649 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4650 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4651 		struct btrfs_qgroup_swapped_block *entry;
4652 		struct btrfs_qgroup_swapped_block *next;
4653 
4654 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4655 						     node)
4656 			kfree(entry);
4657 		swapped_blocks->blocks[i] = RB_ROOT;
4658 	}
4659 	swapped_blocks->swapped = false;
4660 out:
4661 	spin_unlock(&swapped_blocks->lock);
4662 }
4663 
qgroup_swapped_block_bytenr_key_cmp(const void * key,const struct rb_node * node)4664 static int qgroup_swapped_block_bytenr_key_cmp(const void *key, const struct rb_node *node)
4665 {
4666 	const u64 *bytenr = key;
4667 	const struct btrfs_qgroup_swapped_block *block = rb_entry(node,
4668 					  struct btrfs_qgroup_swapped_block, node);
4669 
4670 	if (block->subvol_bytenr < *bytenr)
4671 		return -1;
4672 	else if (block->subvol_bytenr > *bytenr)
4673 		return 1;
4674 
4675 	return 0;
4676 }
4677 
qgroup_swapped_block_bytenr_cmp(struct rb_node * new,const struct rb_node * existing)4678 static int qgroup_swapped_block_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
4679 {
4680 	const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
4681 					      struct btrfs_qgroup_swapped_block, node);
4682 
4683 	return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
4684 }
4685 
4686 /*
4687  * Add subtree roots record into @subvol_root.
4688  *
4689  * @subvol_root:	tree root of the subvolume tree get swapped
4690  * @bg:			block group under balance
4691  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4692  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4693  *			BOTH POINTERS ARE BEFORE TREE SWAP
4694  * @last_snapshot:	last snapshot generation of the subvolume tree
4695  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4696 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
4697 		struct btrfs_block_group *bg,
4698 		struct extent_buffer *subvol_parent, int subvol_slot,
4699 		struct extent_buffer *reloc_parent, int reloc_slot,
4700 		u64 last_snapshot)
4701 {
4702 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4703 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4704 	struct btrfs_qgroup_swapped_block *block;
4705 	struct rb_node *node;
4706 	int level = btrfs_header_level(subvol_parent) - 1;
4707 	int ret = 0;
4708 
4709 	if (!btrfs_qgroup_full_accounting(fs_info))
4710 		return 0;
4711 
4712 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4713 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4714 		btrfs_err_rl(fs_info,
4715 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4716 			__func__,
4717 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4718 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4719 		return -EUCLEAN;
4720 	}
4721 
4722 	block = kmalloc(sizeof(*block), GFP_NOFS);
4723 	if (!block) {
4724 		ret = -ENOMEM;
4725 		goto out;
4726 	}
4727 
4728 	/*
4729 	 * @reloc_parent/slot is still before swap, while @block is going to
4730 	 * record the bytenr after swap, so we do the swap here.
4731 	 */
4732 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4733 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4734 							     reloc_slot);
4735 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4736 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4737 							    subvol_slot);
4738 	block->last_snapshot = last_snapshot;
4739 	block->level = level;
4740 
4741 	/*
4742 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4743 	 * no one else can modify tree blocks thus we qgroup will not change
4744 	 * no matter the value of trace_leaf.
4745 	 */
4746 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4747 		block->trace_leaf = true;
4748 	else
4749 		block->trace_leaf = false;
4750 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4751 
4752 	/* Insert @block into @blocks */
4753 	spin_lock(&blocks->lock);
4754 	node = rb_find_add(&block->node, &blocks->blocks[level], qgroup_swapped_block_bytenr_cmp);
4755 	if (node) {
4756 		struct btrfs_qgroup_swapped_block *entry;
4757 
4758 		entry = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4759 
4760 		if (entry->subvol_generation != block->subvol_generation ||
4761 		    entry->reloc_bytenr != block->reloc_bytenr ||
4762 		    entry->reloc_generation != block->reloc_generation) {
4763 			/*
4764 			 * Duplicated but mismatch entry found.  Shouldn't happen.
4765 			 * Marking qgroup inconsistent should be enough for end
4766 			 * users.
4767 			 */
4768 			DEBUG_WARN("duplicated but mismatched entry found");
4769 			ret = -EEXIST;
4770 		}
4771 		kfree(block);
4772 		goto out_unlock;
4773 	}
4774 	blocks->swapped = true;
4775 out_unlock:
4776 	spin_unlock(&blocks->lock);
4777 out:
4778 	if (ret < 0)
4779 		qgroup_mark_inconsistent(fs_info, "%s error: %d", __func__, ret);
4780 	return ret;
4781 }
4782 
4783 /*
4784  * Check if the tree block is a subtree root, and if so do the needed
4785  * delayed subtree trace for qgroup.
4786  *
4787  * This is called during btrfs_cow_block().
4788  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4789 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4790 					 struct btrfs_root *root,
4791 					 struct extent_buffer *subvol_eb)
4792 {
4793 	struct btrfs_fs_info *fs_info = root->fs_info;
4794 	struct btrfs_tree_parent_check check = { 0 };
4795 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4796 	struct btrfs_qgroup_swapped_block *block;
4797 	struct extent_buffer *reloc_eb = NULL;
4798 	struct rb_node *node;
4799 	bool swapped = false;
4800 	int level = btrfs_header_level(subvol_eb);
4801 	int ret = 0;
4802 	int i;
4803 
4804 	if (!btrfs_qgroup_full_accounting(fs_info))
4805 		return 0;
4806 	if (!btrfs_is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4807 		return 0;
4808 
4809 	spin_lock(&blocks->lock);
4810 	if (!blocks->swapped) {
4811 		spin_unlock(&blocks->lock);
4812 		return 0;
4813 	}
4814 	node = rb_find(&subvol_eb->start, &blocks->blocks[level],
4815 			qgroup_swapped_block_bytenr_key_cmp);
4816 	if (!node) {
4817 		spin_unlock(&blocks->lock);
4818 		goto out;
4819 	}
4820 	block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4821 
4822 	/* Found one, remove it from @blocks first and update blocks->swapped */
4823 	rb_erase(&block->node, &blocks->blocks[level]);
4824 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4825 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4826 			swapped = true;
4827 			break;
4828 		}
4829 	}
4830 	blocks->swapped = swapped;
4831 	spin_unlock(&blocks->lock);
4832 
4833 	check.level = block->level;
4834 	check.transid = block->reloc_generation;
4835 	check.has_first_key = true;
4836 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4837 
4838 	/* Read out reloc subtree root */
4839 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4840 	if (IS_ERR(reloc_eb)) {
4841 		ret = PTR_ERR(reloc_eb);
4842 		reloc_eb = NULL;
4843 		goto free_out;
4844 	}
4845 	if (!extent_buffer_uptodate(reloc_eb)) {
4846 		ret = -EIO;
4847 		goto free_out;
4848 	}
4849 
4850 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4851 			block->last_snapshot, block->trace_leaf);
4852 free_out:
4853 	kfree(block);
4854 	free_extent_buffer(reloc_eb);
4855 out:
4856 	if (ret < 0) {
4857 		qgroup_mark_inconsistent(fs_info,
4858 				"failed to account subtree at bytenr %llu: %d",
4859 				subvol_eb->start, ret);
4860 	}
4861 	return ret;
4862 }
4863 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4864 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4865 {
4866 	struct btrfs_qgroup_extent_record *entry;
4867 	unsigned long index;
4868 
4869 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4870 		ulist_free(entry->old_roots);
4871 		kfree(entry);
4872 	}
4873 	xa_destroy(&trans->delayed_refs.dirty_extents);
4874 }
4875 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4876 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4877 			      const struct btrfs_squota_delta *delta)
4878 {
4879 	int ret;
4880 	struct btrfs_qgroup *qgroup;
4881 	struct btrfs_qgroup *qg;
4882 	LIST_HEAD(qgroup_list);
4883 	u64 root = delta->root;
4884 	u64 num_bytes = delta->num_bytes;
4885 	const int sign = (delta->is_inc ? 1 : -1);
4886 
4887 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4888 		return 0;
4889 
4890 	if (!btrfs_is_fstree(root))
4891 		return 0;
4892 
4893 	/* If the extent predates enabling quotas, don't count it. */
4894 	if (delta->generation < fs_info->qgroup_enable_gen)
4895 		return 0;
4896 
4897 	spin_lock(&fs_info->qgroup_lock);
4898 	qgroup = find_qgroup_rb(fs_info, root);
4899 	if (!qgroup) {
4900 		ret = -ENOENT;
4901 		goto out;
4902 	}
4903 
4904 	ret = 0;
4905 	qgroup_iterator_add(&qgroup_list, qgroup);
4906 	list_for_each_entry(qg, &qgroup_list, iterator) {
4907 		struct btrfs_qgroup_list *glist;
4908 
4909 		qg->excl += num_bytes * sign;
4910 		qg->rfer += num_bytes * sign;
4911 		qgroup_dirty(fs_info, qg);
4912 
4913 		list_for_each_entry(glist, &qg->groups, next_group)
4914 			qgroup_iterator_add(&qgroup_list, glist->group);
4915 	}
4916 	qgroup_iterator_clean(&qgroup_list);
4917 
4918 out:
4919 	spin_unlock(&fs_info->qgroup_lock);
4920 	return ret;
4921 }
4922