xref: /linux/fs/btrfs/qgroup.c (revision 4e46774408d942efe4eb35dc62e5af3af71b9a30)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/workqueue.h>
13 #include <linux/btrfs.h>
14 #include <linux/sched/mm.h>
15 
16 #include "ctree.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "locking.h"
20 #include "ulist.h"
21 #include "backref.h"
22 #include "extent_io.h"
23 #include "qgroup.h"
24 #include "block-group.h"
25 #include "sysfs.h"
26 #include "tree-mod-log.h"
27 #include "fs.h"
28 #include "accessors.h"
29 #include "extent-tree.h"
30 #include "root-tree.h"
31 #include "tree-checker.h"
32 
btrfs_qgroup_mode(const struct btrfs_fs_info * fs_info)33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info)
34 {
35 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
36 		return BTRFS_QGROUP_MODE_DISABLED;
37 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
38 		return BTRFS_QGROUP_MODE_SIMPLE;
39 	return BTRFS_QGROUP_MODE_FULL;
40 }
41 
btrfs_qgroup_enabled(const struct btrfs_fs_info * fs_info)42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info)
43 {
44 	return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
45 }
46 
btrfs_qgroup_full_accounting(const struct btrfs_fs_info * fs_info)47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info)
48 {
49 	return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
50 }
51 
52 /*
53  * Helpers to access qgroup reservation
54  *
55  * Callers should ensure the lock context and type are valid
56  */
57 
qgroup_rsv_total(const struct btrfs_qgroup * qgroup)58 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
59 {
60 	u64 ret = 0;
61 	int i;
62 
63 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
64 		ret += qgroup->rsv.values[i];
65 
66 	return ret;
67 }
68 
69 #ifdef CONFIG_BTRFS_DEBUG
qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)70 static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
71 {
72 	if (type == BTRFS_QGROUP_RSV_DATA)
73 		return "data";
74 	if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
75 		return "meta_pertrans";
76 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
77 		return "meta_prealloc";
78 	return NULL;
79 }
80 #endif
81 
qgroup_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
83 			   struct btrfs_qgroup *qgroup, u64 num_bytes,
84 			   enum btrfs_qgroup_rsv_type type)
85 {
86 	trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
87 	qgroup->rsv.values[type] += num_bytes;
88 }
89 
qgroup_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup,u64 num_bytes,enum btrfs_qgroup_rsv_type type)90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
91 			       struct btrfs_qgroup *qgroup, u64 num_bytes,
92 			       enum btrfs_qgroup_rsv_type type)
93 {
94 	trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
95 	if (qgroup->rsv.values[type] >= num_bytes) {
96 		qgroup->rsv.values[type] -= num_bytes;
97 		return;
98 	}
99 #ifdef CONFIG_BTRFS_DEBUG
100 	WARN_RATELIMIT(1,
101 		"qgroup %llu %s reserved space underflow, have %llu to free %llu",
102 		qgroup->qgroupid, qgroup_rsv_type_str(type),
103 		qgroup->rsv.values[type], num_bytes);
104 #endif
105 	qgroup->rsv.values[type] = 0;
106 }
107 
qgroup_rsv_add_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)108 static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
109 				     struct btrfs_qgroup *dest,
110 				     const struct btrfs_qgroup *src)
111 {
112 	int i;
113 
114 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
115 		qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
116 }
117 
qgroup_rsv_release_by_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * dest,const struct btrfs_qgroup * src)118 static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
119 					 struct btrfs_qgroup *dest,
120 					 const struct btrfs_qgroup *src)
121 {
122 	int i;
123 
124 	for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
125 		qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
126 }
127 
btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)128 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
129 					   int mod)
130 {
131 	if (qg->old_refcnt < seq)
132 		qg->old_refcnt = seq;
133 	qg->old_refcnt += mod;
134 }
135 
btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup * qg,u64 seq,int mod)136 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
137 					   int mod)
138 {
139 	if (qg->new_refcnt < seq)
140 		qg->new_refcnt = seq;
141 	qg->new_refcnt += mod;
142 }
143 
btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup * qg,u64 seq)144 static inline u64 btrfs_qgroup_get_old_refcnt(const struct btrfs_qgroup *qg, u64 seq)
145 {
146 	if (qg->old_refcnt < seq)
147 		return 0;
148 	return qg->old_refcnt - seq;
149 }
150 
btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup * qg,u64 seq)151 static inline u64 btrfs_qgroup_get_new_refcnt(const struct btrfs_qgroup *qg, u64 seq)
152 {
153 	if (qg->new_refcnt < seq)
154 		return 0;
155 	return qg->new_refcnt - seq;
156 }
157 
158 static int
159 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
160 		   int init_flags);
161 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
162 
163 /* must be called with qgroup_ioctl_lock held */
find_qgroup_rb(const struct btrfs_fs_info * fs_info,u64 qgroupid)164 static struct btrfs_qgroup *find_qgroup_rb(const struct btrfs_fs_info *fs_info,
165 					   u64 qgroupid)
166 {
167 	struct rb_node *n = fs_info->qgroup_tree.rb_node;
168 	struct btrfs_qgroup *qgroup;
169 
170 	while (n) {
171 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
172 		if (qgroup->qgroupid < qgroupid)
173 			n = n->rb_left;
174 		else if (qgroup->qgroupid > qgroupid)
175 			n = n->rb_right;
176 		else
177 			return qgroup;
178 	}
179 	return NULL;
180 }
181 
182 /*
183  * Add qgroup to the filesystem's qgroup tree.
184  *
185  * Must be called with qgroup_lock held and @prealloc preallocated.
186  *
187  * The control on the lifespan of @prealloc would be transferred to this
188  * function, thus caller should no longer touch @prealloc.
189  */
add_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * prealloc,u64 qgroupid)190 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
191 					  struct btrfs_qgroup *prealloc,
192 					  u64 qgroupid)
193 {
194 	struct rb_node **p = &fs_info->qgroup_tree.rb_node;
195 	struct rb_node *parent = NULL;
196 	struct btrfs_qgroup *qgroup;
197 
198 	/* Caller must have pre-allocated @prealloc. */
199 	ASSERT(prealloc);
200 
201 	while (*p) {
202 		parent = *p;
203 		qgroup = rb_entry(parent, struct btrfs_qgroup, node);
204 
205 		if (qgroup->qgroupid < qgroupid) {
206 			p = &(*p)->rb_left;
207 		} else if (qgroup->qgroupid > qgroupid) {
208 			p = &(*p)->rb_right;
209 		} else {
210 			kfree(prealloc);
211 			return qgroup;
212 		}
213 	}
214 
215 	qgroup = prealloc;
216 	qgroup->qgroupid = qgroupid;
217 	INIT_LIST_HEAD(&qgroup->groups);
218 	INIT_LIST_HEAD(&qgroup->members);
219 	INIT_LIST_HEAD(&qgroup->dirty);
220 	INIT_LIST_HEAD(&qgroup->iterator);
221 	INIT_LIST_HEAD(&qgroup->nested_iterator);
222 
223 	rb_link_node(&qgroup->node, parent, p);
224 	rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
225 
226 	return qgroup;
227 }
228 
__del_qgroup_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)229 static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
230 			    struct btrfs_qgroup *qgroup)
231 {
232 	struct btrfs_qgroup_list *list;
233 
234 	list_del(&qgroup->dirty);
235 	while (!list_empty(&qgroup->groups)) {
236 		list = list_first_entry(&qgroup->groups,
237 					struct btrfs_qgroup_list, next_group);
238 		list_del(&list->next_group);
239 		list_del(&list->next_member);
240 		kfree(list);
241 	}
242 
243 	while (!list_empty(&qgroup->members)) {
244 		list = list_first_entry(&qgroup->members,
245 					struct btrfs_qgroup_list, next_member);
246 		list_del(&list->next_group);
247 		list_del(&list->next_member);
248 		kfree(list);
249 	}
250 }
251 
252 /* must be called with qgroup_lock held */
del_qgroup_rb(struct btrfs_fs_info * fs_info,u64 qgroupid)253 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
254 {
255 	struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
256 
257 	if (!qgroup)
258 		return -ENOENT;
259 
260 	rb_erase(&qgroup->node, &fs_info->qgroup_tree);
261 	__del_qgroup_rb(fs_info, qgroup);
262 	return 0;
263 }
264 
265 /*
266  * Add relation specified by two qgroups.
267  *
268  * Must be called with qgroup_lock held, the ownership of @prealloc is
269  * transferred to this function and caller should not touch it anymore.
270  *
271  * Return: 0        on success
272  *         -ENOENT  if one of the qgroups is NULL
273  *         <0       other errors
274  */
__add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent)275 static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
276 			     struct btrfs_qgroup *member,
277 			     struct btrfs_qgroup *parent)
278 {
279 	if (!member || !parent) {
280 		kfree(prealloc);
281 		return -ENOENT;
282 	}
283 
284 	prealloc->group = parent;
285 	prealloc->member = member;
286 	list_add_tail(&prealloc->next_group, &member->groups);
287 	list_add_tail(&prealloc->next_member, &parent->members);
288 
289 	return 0;
290 }
291 
292 /*
293  * Add relation specified by two qgroup ids.
294  *
295  * Must be called with qgroup_lock held.
296  *
297  * Return: 0        on success
298  *         -ENOENT  if one of the ids does not exist
299  *         <0       other errors
300  */
add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid)301 static int add_relation_rb(struct btrfs_fs_info *fs_info,
302 			   struct btrfs_qgroup_list *prealloc,
303 			   u64 memberid, u64 parentid)
304 {
305 	struct btrfs_qgroup *member;
306 	struct btrfs_qgroup *parent;
307 
308 	member = find_qgroup_rb(fs_info, memberid);
309 	parent = find_qgroup_rb(fs_info, parentid);
310 
311 	return __add_relation_rb(prealloc, member, parent);
312 }
313 
314 /* Must be called with qgroup_lock held */
del_relation_rb(struct btrfs_fs_info * fs_info,u64 memberid,u64 parentid)315 static int del_relation_rb(struct btrfs_fs_info *fs_info,
316 			   u64 memberid, u64 parentid)
317 {
318 	struct btrfs_qgroup *member;
319 	struct btrfs_qgroup *parent;
320 	struct btrfs_qgroup_list *list;
321 
322 	member = find_qgroup_rb(fs_info, memberid);
323 	parent = find_qgroup_rb(fs_info, parentid);
324 	if (!member || !parent)
325 		return -ENOENT;
326 
327 	list_for_each_entry(list, &member->groups, next_group) {
328 		if (list->group == parent) {
329 			list_del(&list->next_group);
330 			list_del(&list->next_member);
331 			kfree(list);
332 			return 0;
333 		}
334 	}
335 	return -ENOENT;
336 }
337 
338 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_verify_qgroup_counts(const struct btrfs_fs_info * fs_info,u64 qgroupid,u64 rfer,u64 excl)339 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
340 			       u64 rfer, u64 excl)
341 {
342 	struct btrfs_qgroup *qgroup;
343 
344 	qgroup = find_qgroup_rb(fs_info, qgroupid);
345 	if (!qgroup)
346 		return -EINVAL;
347 	if (qgroup->rfer != rfer || qgroup->excl != excl)
348 		return -EINVAL;
349 	return 0;
350 }
351 #endif
352 
qgroup_mark_inconsistent(struct btrfs_fs_info * fs_info)353 static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
354 {
355 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
356 		return;
357 	fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
358 				  BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
359 				  BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
360 }
361 
qgroup_read_enable_gen(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot,struct btrfs_qgroup_status_item * ptr)362 static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
363 				   struct extent_buffer *leaf, int slot,
364 				   struct btrfs_qgroup_status_item *ptr)
365 {
366 	ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
367 	ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
368 	fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
369 }
370 
371 /*
372  * The full config is read in one go, only called from open_ctree()
373  * It doesn't use any locking, as at this point we're still single-threaded
374  */
btrfs_read_qgroup_config(struct btrfs_fs_info * fs_info)375 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
376 {
377 	struct btrfs_key key;
378 	struct btrfs_key found_key;
379 	struct btrfs_root *quota_root = fs_info->quota_root;
380 	struct btrfs_path *path = NULL;
381 	struct extent_buffer *l;
382 	int slot;
383 	int ret = 0;
384 	u64 flags = 0;
385 	u64 rescan_progress = 0;
386 
387 	if (!fs_info->quota_root)
388 		return 0;
389 
390 	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
391 	if (!fs_info->qgroup_ulist) {
392 		ret = -ENOMEM;
393 		goto out;
394 	}
395 
396 	path = btrfs_alloc_path();
397 	if (!path) {
398 		ret = -ENOMEM;
399 		goto out;
400 	}
401 
402 	ret = btrfs_sysfs_add_qgroups(fs_info);
403 	if (ret < 0)
404 		goto out;
405 	/* default this to quota off, in case no status key is found */
406 	fs_info->qgroup_flags = 0;
407 
408 	/*
409 	 * pass 1: read status, all qgroup infos and limits
410 	 */
411 	key.objectid = 0;
412 	key.type = 0;
413 	key.offset = 0;
414 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
415 	if (ret)
416 		goto out;
417 
418 	while (1) {
419 		struct btrfs_qgroup *qgroup;
420 
421 		slot = path->slots[0];
422 		l = path->nodes[0];
423 		btrfs_item_key_to_cpu(l, &found_key, slot);
424 
425 		if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
426 			struct btrfs_qgroup_status_item *ptr;
427 
428 			ptr = btrfs_item_ptr(l, slot,
429 					     struct btrfs_qgroup_status_item);
430 
431 			if (btrfs_qgroup_status_version(l, ptr) !=
432 			    BTRFS_QGROUP_STATUS_VERSION) {
433 				btrfs_err(fs_info,
434 				 "old qgroup version, quota disabled");
435 				goto out;
436 			}
437 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
438 			if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
439 				qgroup_read_enable_gen(fs_info, l, slot, ptr);
440 			} else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
441 				qgroup_mark_inconsistent(fs_info);
442 				btrfs_err(fs_info,
443 					"qgroup generation mismatch, marked as inconsistent");
444 			}
445 			rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
446 			goto next1;
447 		}
448 
449 		if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
450 		    found_key.type != BTRFS_QGROUP_LIMIT_KEY)
451 			goto next1;
452 
453 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
454 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
455 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
456 			btrfs_err(fs_info, "inconsistent qgroup config");
457 			qgroup_mark_inconsistent(fs_info);
458 		}
459 		if (!qgroup) {
460 			struct btrfs_qgroup *prealloc;
461 			struct btrfs_root *tree_root = fs_info->tree_root;
462 
463 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
464 			if (!prealloc) {
465 				ret = -ENOMEM;
466 				goto out;
467 			}
468 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
469 			/*
470 			 * If a qgroup exists for a subvolume ID, it is possible
471 			 * that subvolume has been deleted, in which case
472 			 * re-using that ID would lead to incorrect accounting.
473 			 *
474 			 * Ensure that we skip any such subvol ids.
475 			 *
476 			 * We don't need to lock because this is only called
477 			 * during mount before we start doing things like creating
478 			 * subvolumes.
479 			 */
480 			if (is_fstree(qgroup->qgroupid) &&
481 			    qgroup->qgroupid > tree_root->free_objectid)
482 				/*
483 				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
484 				 * as it will get checked on the next call to
485 				 * btrfs_get_free_objectid.
486 				 */
487 				tree_root->free_objectid = qgroup->qgroupid + 1;
488 		}
489 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
490 		if (ret < 0)
491 			goto out;
492 
493 		switch (found_key.type) {
494 		case BTRFS_QGROUP_INFO_KEY: {
495 			struct btrfs_qgroup_info_item *ptr;
496 
497 			ptr = btrfs_item_ptr(l, slot,
498 					     struct btrfs_qgroup_info_item);
499 			qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
500 			qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
501 			qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
502 			qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
503 			/* generation currently unused */
504 			break;
505 		}
506 		case BTRFS_QGROUP_LIMIT_KEY: {
507 			struct btrfs_qgroup_limit_item *ptr;
508 
509 			ptr = btrfs_item_ptr(l, slot,
510 					     struct btrfs_qgroup_limit_item);
511 			qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
512 			qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
513 			qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
514 			qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
515 			qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
516 			break;
517 		}
518 		}
519 next1:
520 		ret = btrfs_next_item(quota_root, path);
521 		if (ret < 0)
522 			goto out;
523 		if (ret)
524 			break;
525 	}
526 	btrfs_release_path(path);
527 
528 	/*
529 	 * pass 2: read all qgroup relations
530 	 */
531 	key.objectid = 0;
532 	key.type = BTRFS_QGROUP_RELATION_KEY;
533 	key.offset = 0;
534 	ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
535 	if (ret)
536 		goto out;
537 	while (1) {
538 		struct btrfs_qgroup_list *list = NULL;
539 
540 		slot = path->slots[0];
541 		l = path->nodes[0];
542 		btrfs_item_key_to_cpu(l, &found_key, slot);
543 
544 		if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
545 			goto next2;
546 
547 		if (found_key.objectid > found_key.offset) {
548 			/* parent <- member, not needed to build config */
549 			/* FIXME should we omit the key completely? */
550 			goto next2;
551 		}
552 
553 		list = kzalloc(sizeof(*list), GFP_KERNEL);
554 		if (!list) {
555 			ret = -ENOMEM;
556 			goto out;
557 		}
558 		ret = add_relation_rb(fs_info, list, found_key.objectid,
559 				      found_key.offset);
560 		list = NULL;
561 		if (ret == -ENOENT) {
562 			btrfs_warn(fs_info,
563 				"orphan qgroup relation 0x%llx->0x%llx",
564 				found_key.objectid, found_key.offset);
565 			ret = 0;	/* ignore the error */
566 		}
567 		if (ret)
568 			goto out;
569 next2:
570 		ret = btrfs_next_item(quota_root, path);
571 		if (ret < 0)
572 			goto out;
573 		if (ret)
574 			break;
575 	}
576 out:
577 	btrfs_free_path(path);
578 	fs_info->qgroup_flags |= flags;
579 	if (ret >= 0) {
580 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
581 			set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
582 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
583 			ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
584 	} else {
585 		ulist_free(fs_info->qgroup_ulist);
586 		fs_info->qgroup_ulist = NULL;
587 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
588 		btrfs_sysfs_del_qgroups(fs_info);
589 	}
590 
591 	return ret < 0 ? ret : 0;
592 }
593 
594 /*
595  * Called in close_ctree() when quota is still enabled.  This verifies we don't
596  * leak some reserved space.
597  *
598  * Return false if no reserved space is left.
599  * Return true if some reserved space is leaked.
600  */
btrfs_check_quota_leak(const struct btrfs_fs_info * fs_info)601 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info)
602 {
603 	struct rb_node *node;
604 	bool ret = false;
605 
606 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
607 		return ret;
608 	/*
609 	 * Since we're unmounting, there is no race and no need to grab qgroup
610 	 * lock.  And here we don't go post-order to provide a more user
611 	 * friendly sorted result.
612 	 */
613 	for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
614 		struct btrfs_qgroup *qgroup;
615 		int i;
616 
617 		qgroup = rb_entry(node, struct btrfs_qgroup, node);
618 		for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
619 			if (qgroup->rsv.values[i]) {
620 				ret = true;
621 				btrfs_warn(fs_info,
622 		"qgroup %hu/%llu has unreleased space, type %d rsv %llu",
623 				   btrfs_qgroup_level(qgroup->qgroupid),
624 				   btrfs_qgroup_subvolid(qgroup->qgroupid),
625 				   i, qgroup->rsv.values[i]);
626 			}
627 		}
628 	}
629 	return ret;
630 }
631 
632 /*
633  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
634  * first two are in single-threaded paths.And for the third one, we have set
635  * quota_root to be null with qgroup_lock held before, so it is safe to clean
636  * up the in-memory structures without qgroup_lock held.
637  */
btrfs_free_qgroup_config(struct btrfs_fs_info * fs_info)638 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
639 {
640 	struct rb_node *n;
641 	struct btrfs_qgroup *qgroup;
642 
643 	while ((n = rb_first(&fs_info->qgroup_tree))) {
644 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
645 		rb_erase(n, &fs_info->qgroup_tree);
646 		__del_qgroup_rb(fs_info, qgroup);
647 		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
648 		kfree(qgroup);
649 	}
650 	/*
651 	 * We call btrfs_free_qgroup_config() when unmounting
652 	 * filesystem and disabling quota, so we set qgroup_ulist
653 	 * to be null here to avoid double free.
654 	 */
655 	ulist_free(fs_info->qgroup_ulist);
656 	fs_info->qgroup_ulist = NULL;
657 	btrfs_sysfs_del_qgroups(fs_info);
658 }
659 
add_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)660 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
661 				    u64 dst)
662 {
663 	int ret;
664 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
665 	struct btrfs_path *path;
666 	struct btrfs_key key;
667 
668 	path = btrfs_alloc_path();
669 	if (!path)
670 		return -ENOMEM;
671 
672 	key.objectid = src;
673 	key.type = BTRFS_QGROUP_RELATION_KEY;
674 	key.offset = dst;
675 
676 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
677 
678 	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
679 
680 	btrfs_free_path(path);
681 	return ret;
682 }
683 
del_qgroup_relation_item(struct btrfs_trans_handle * trans,u64 src,u64 dst)684 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
685 				    u64 dst)
686 {
687 	int ret;
688 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
689 	struct btrfs_path *path;
690 	struct btrfs_key key;
691 
692 	path = btrfs_alloc_path();
693 	if (!path)
694 		return -ENOMEM;
695 
696 	key.objectid = src;
697 	key.type = BTRFS_QGROUP_RELATION_KEY;
698 	key.offset = dst;
699 
700 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
701 	if (ret < 0)
702 		goto out;
703 
704 	if (ret > 0) {
705 		ret = -ENOENT;
706 		goto out;
707 	}
708 
709 	ret = btrfs_del_item(trans, quota_root, path);
710 out:
711 	btrfs_free_path(path);
712 	return ret;
713 }
714 
add_qgroup_item(struct btrfs_trans_handle * trans,struct btrfs_root * quota_root,u64 qgroupid)715 static int add_qgroup_item(struct btrfs_trans_handle *trans,
716 			   struct btrfs_root *quota_root, u64 qgroupid)
717 {
718 	int ret;
719 	struct btrfs_path *path;
720 	struct btrfs_qgroup_info_item *qgroup_info;
721 	struct btrfs_qgroup_limit_item *qgroup_limit;
722 	struct extent_buffer *leaf;
723 	struct btrfs_key key;
724 
725 	if (btrfs_is_testing(quota_root->fs_info))
726 		return 0;
727 
728 	path = btrfs_alloc_path();
729 	if (!path)
730 		return -ENOMEM;
731 
732 	key.objectid = 0;
733 	key.type = BTRFS_QGROUP_INFO_KEY;
734 	key.offset = qgroupid;
735 
736 	/*
737 	 * Avoid a transaction abort by catching -EEXIST here. In that
738 	 * case, we proceed by re-initializing the existing structure
739 	 * on disk.
740 	 */
741 
742 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
743 				      sizeof(*qgroup_info));
744 	if (ret && ret != -EEXIST)
745 		goto out;
746 
747 	leaf = path->nodes[0];
748 	qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
749 				 struct btrfs_qgroup_info_item);
750 	btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
751 	btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
752 	btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
753 	btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
754 	btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
755 
756 	btrfs_mark_buffer_dirty(trans, leaf);
757 
758 	btrfs_release_path(path);
759 
760 	key.type = BTRFS_QGROUP_LIMIT_KEY;
761 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
762 				      sizeof(*qgroup_limit));
763 	if (ret && ret != -EEXIST)
764 		goto out;
765 
766 	leaf = path->nodes[0];
767 	qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
768 				  struct btrfs_qgroup_limit_item);
769 	btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
770 	btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
771 	btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
772 	btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
773 	btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
774 
775 	btrfs_mark_buffer_dirty(trans, leaf);
776 
777 	ret = 0;
778 out:
779 	btrfs_free_path(path);
780 	return ret;
781 }
782 
del_qgroup_item(struct btrfs_trans_handle * trans,u64 qgroupid)783 static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
784 {
785 	int ret;
786 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
787 	struct btrfs_path *path;
788 	struct btrfs_key key;
789 
790 	path = btrfs_alloc_path();
791 	if (!path)
792 		return -ENOMEM;
793 
794 	key.objectid = 0;
795 	key.type = BTRFS_QGROUP_INFO_KEY;
796 	key.offset = qgroupid;
797 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
798 	if (ret < 0)
799 		goto out;
800 
801 	if (ret > 0) {
802 		ret = -ENOENT;
803 		goto out;
804 	}
805 
806 	ret = btrfs_del_item(trans, quota_root, path);
807 	if (ret)
808 		goto out;
809 
810 	btrfs_release_path(path);
811 
812 	key.type = BTRFS_QGROUP_LIMIT_KEY;
813 	ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
814 	if (ret < 0)
815 		goto out;
816 
817 	if (ret > 0) {
818 		ret = -ENOENT;
819 		goto out;
820 	}
821 
822 	ret = btrfs_del_item(trans, quota_root, path);
823 
824 out:
825 	btrfs_free_path(path);
826 	return ret;
827 }
828 
update_qgroup_limit_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)829 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
830 				    struct btrfs_qgroup *qgroup)
831 {
832 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
833 	struct btrfs_path *path;
834 	struct btrfs_key key;
835 	struct extent_buffer *l;
836 	struct btrfs_qgroup_limit_item *qgroup_limit;
837 	int ret;
838 	int slot;
839 
840 	key.objectid = 0;
841 	key.type = BTRFS_QGROUP_LIMIT_KEY;
842 	key.offset = qgroup->qgroupid;
843 
844 	path = btrfs_alloc_path();
845 	if (!path)
846 		return -ENOMEM;
847 
848 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
849 	if (ret > 0)
850 		ret = -ENOENT;
851 
852 	if (ret)
853 		goto out;
854 
855 	l = path->nodes[0];
856 	slot = path->slots[0];
857 	qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
858 	btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
859 	btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
860 	btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
861 	btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
862 	btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
863 
864 	btrfs_mark_buffer_dirty(trans, l);
865 
866 out:
867 	btrfs_free_path(path);
868 	return ret;
869 }
870 
update_qgroup_info_item(struct btrfs_trans_handle * trans,struct btrfs_qgroup * qgroup)871 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
872 				   struct btrfs_qgroup *qgroup)
873 {
874 	struct btrfs_fs_info *fs_info = trans->fs_info;
875 	struct btrfs_root *quota_root = fs_info->quota_root;
876 	struct btrfs_path *path;
877 	struct btrfs_key key;
878 	struct extent_buffer *l;
879 	struct btrfs_qgroup_info_item *qgroup_info;
880 	int ret;
881 	int slot;
882 
883 	if (btrfs_is_testing(fs_info))
884 		return 0;
885 
886 	key.objectid = 0;
887 	key.type = BTRFS_QGROUP_INFO_KEY;
888 	key.offset = qgroup->qgroupid;
889 
890 	path = btrfs_alloc_path();
891 	if (!path)
892 		return -ENOMEM;
893 
894 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
895 	if (ret > 0)
896 		ret = -ENOENT;
897 
898 	if (ret)
899 		goto out;
900 
901 	l = path->nodes[0];
902 	slot = path->slots[0];
903 	qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
904 	btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
905 	btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
906 	btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
907 	btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
908 	btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
909 
910 	btrfs_mark_buffer_dirty(trans, l);
911 
912 out:
913 	btrfs_free_path(path);
914 	return ret;
915 }
916 
update_qgroup_status_item(struct btrfs_trans_handle * trans)917 static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
918 {
919 	struct btrfs_fs_info *fs_info = trans->fs_info;
920 	struct btrfs_root *quota_root = fs_info->quota_root;
921 	struct btrfs_path *path;
922 	struct btrfs_key key;
923 	struct extent_buffer *l;
924 	struct btrfs_qgroup_status_item *ptr;
925 	int ret;
926 	int slot;
927 
928 	key.objectid = 0;
929 	key.type = BTRFS_QGROUP_STATUS_KEY;
930 	key.offset = 0;
931 
932 	path = btrfs_alloc_path();
933 	if (!path)
934 		return -ENOMEM;
935 
936 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
937 	if (ret > 0)
938 		ret = -ENOENT;
939 
940 	if (ret)
941 		goto out;
942 
943 	l = path->nodes[0];
944 	slot = path->slots[0];
945 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
946 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags &
947 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
948 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
949 	btrfs_set_qgroup_status_rescan(l, ptr,
950 				fs_info->qgroup_rescan_progress.objectid);
951 
952 	btrfs_mark_buffer_dirty(trans, l);
953 
954 out:
955 	btrfs_free_path(path);
956 	return ret;
957 }
958 
959 /*
960  * called with qgroup_lock held
961  */
btrfs_clean_quota_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)962 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
963 				  struct btrfs_root *root)
964 {
965 	struct btrfs_path *path;
966 	struct btrfs_key key;
967 	struct extent_buffer *leaf = NULL;
968 	int ret;
969 	int nr = 0;
970 
971 	path = btrfs_alloc_path();
972 	if (!path)
973 		return -ENOMEM;
974 
975 	key.objectid = 0;
976 	key.offset = 0;
977 	key.type = 0;
978 
979 	while (1) {
980 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
981 		if (ret < 0)
982 			goto out;
983 		leaf = path->nodes[0];
984 		nr = btrfs_header_nritems(leaf);
985 		if (!nr)
986 			break;
987 		/*
988 		 * delete the leaf one by one
989 		 * since the whole tree is going
990 		 * to be deleted.
991 		 */
992 		path->slots[0] = 0;
993 		ret = btrfs_del_items(trans, root, path, 0, nr);
994 		if (ret)
995 			goto out;
996 
997 		btrfs_release_path(path);
998 	}
999 	ret = 0;
1000 out:
1001 	btrfs_free_path(path);
1002 	return ret;
1003 }
1004 
btrfs_quota_enable(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_quota_ctl_args * quota_ctl_args)1005 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
1006 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
1007 {
1008 	struct btrfs_root *quota_root;
1009 	struct btrfs_root *tree_root = fs_info->tree_root;
1010 	struct btrfs_path *path = NULL;
1011 	struct btrfs_qgroup_status_item *ptr;
1012 	struct extent_buffer *leaf;
1013 	struct btrfs_key key;
1014 	struct btrfs_key found_key;
1015 	struct btrfs_qgroup *qgroup = NULL;
1016 	struct btrfs_qgroup *prealloc = NULL;
1017 	struct btrfs_trans_handle *trans = NULL;
1018 	struct ulist *ulist = NULL;
1019 	const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
1020 	int ret = 0;
1021 	int slot;
1022 
1023 	/*
1024 	 * We need to have subvol_sem write locked, to prevent races between
1025 	 * concurrent tasks trying to enable quotas, because we will unlock
1026 	 * and relock qgroup_ioctl_lock before setting fs_info->quota_root
1027 	 * and before setting BTRFS_FS_QUOTA_ENABLED.
1028 	 */
1029 	lockdep_assert_held_write(&fs_info->subvol_sem);
1030 
1031 	if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1032 		btrfs_err(fs_info,
1033 			  "qgroups are currently unsupported in extent tree v2");
1034 		return -EINVAL;
1035 	}
1036 
1037 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1038 	if (fs_info->quota_root)
1039 		goto out;
1040 
1041 	ulist = ulist_alloc(GFP_KERNEL);
1042 	if (!ulist) {
1043 		ret = -ENOMEM;
1044 		goto out;
1045 	}
1046 
1047 	ret = btrfs_sysfs_add_qgroups(fs_info);
1048 	if (ret < 0)
1049 		goto out;
1050 
1051 	/*
1052 	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
1053 	 * avoid lock acquisition inversion problems (reported by lockdep) between
1054 	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
1055 	 * start a transaction.
1056 	 * After we started the transaction lock qgroup_ioctl_lock again and
1057 	 * check if someone else created the quota root in the meanwhile. If so,
1058 	 * just return success and release the transaction handle.
1059 	 *
1060 	 * Also we don't need to worry about someone else calling
1061 	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
1062 	 * that function returns 0 (success) when the sysfs entries already exist.
1063 	 */
1064 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1065 
1066 	/*
1067 	 * 1 for quota root item
1068 	 * 1 for BTRFS_QGROUP_STATUS item
1069 	 *
1070 	 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
1071 	 * per subvolume. However those are not currently reserved since it
1072 	 * would be a lot of overkill.
1073 	 */
1074 	trans = btrfs_start_transaction(tree_root, 2);
1075 
1076 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1077 	if (IS_ERR(trans)) {
1078 		ret = PTR_ERR(trans);
1079 		trans = NULL;
1080 		goto out;
1081 	}
1082 
1083 	if (fs_info->quota_root)
1084 		goto out;
1085 
1086 	fs_info->qgroup_ulist = ulist;
1087 	ulist = NULL;
1088 
1089 	/*
1090 	 * initially create the quota tree
1091 	 */
1092 	quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
1093 	if (IS_ERR(quota_root)) {
1094 		ret =  PTR_ERR(quota_root);
1095 		btrfs_abort_transaction(trans, ret);
1096 		goto out;
1097 	}
1098 
1099 	path = btrfs_alloc_path();
1100 	if (!path) {
1101 		ret = -ENOMEM;
1102 		btrfs_abort_transaction(trans, ret);
1103 		goto out_free_root;
1104 	}
1105 
1106 	key.objectid = 0;
1107 	key.type = BTRFS_QGROUP_STATUS_KEY;
1108 	key.offset = 0;
1109 
1110 	ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1111 				      sizeof(*ptr));
1112 	if (ret) {
1113 		btrfs_abort_transaction(trans, ret);
1114 		goto out_free_path;
1115 	}
1116 
1117 	leaf = path->nodes[0];
1118 	ptr = btrfs_item_ptr(leaf, path->slots[0],
1119 				 struct btrfs_qgroup_status_item);
1120 	btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1121 	btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1122 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
1123 	if (simple) {
1124 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1125 		btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
1126 	} else {
1127 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1128 	}
1129 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
1130 				      BTRFS_QGROUP_STATUS_FLAGS_MASK);
1131 	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
1132 
1133 	btrfs_mark_buffer_dirty(trans, leaf);
1134 
1135 	key.objectid = 0;
1136 	key.type = BTRFS_ROOT_REF_KEY;
1137 	key.offset = 0;
1138 
1139 	btrfs_release_path(path);
1140 	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1141 	if (ret > 0)
1142 		goto out_add_root;
1143 	if (ret < 0) {
1144 		btrfs_abort_transaction(trans, ret);
1145 		goto out_free_path;
1146 	}
1147 
1148 	while (1) {
1149 		slot = path->slots[0];
1150 		leaf = path->nodes[0];
1151 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1152 
1153 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
1154 
1155 			/* Release locks on tree_root before we access quota_root */
1156 			btrfs_release_path(path);
1157 
1158 			/* We should not have a stray @prealloc pointer. */
1159 			ASSERT(prealloc == NULL);
1160 			prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1161 			if (!prealloc) {
1162 				ret = -ENOMEM;
1163 				btrfs_abort_transaction(trans, ret);
1164 				goto out_free_path;
1165 			}
1166 
1167 			ret = add_qgroup_item(trans, quota_root,
1168 					      found_key.offset);
1169 			if (ret) {
1170 				btrfs_abort_transaction(trans, ret);
1171 				goto out_free_path;
1172 			}
1173 
1174 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
1175 			prealloc = NULL;
1176 			if (IS_ERR(qgroup)) {
1177 				ret = PTR_ERR(qgroup);
1178 				btrfs_abort_transaction(trans, ret);
1179 				goto out_free_path;
1180 			}
1181 			ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1182 			if (ret < 0) {
1183 				btrfs_abort_transaction(trans, ret);
1184 				goto out_free_path;
1185 			}
1186 			ret = btrfs_search_slot_for_read(tree_root, &found_key,
1187 							 path, 1, 0);
1188 			if (ret < 0) {
1189 				btrfs_abort_transaction(trans, ret);
1190 				goto out_free_path;
1191 			}
1192 			if (ret > 0) {
1193 				/*
1194 				 * Shouldn't happen, but in case it does we
1195 				 * don't need to do the btrfs_next_item, just
1196 				 * continue.
1197 				 */
1198 				continue;
1199 			}
1200 		}
1201 		ret = btrfs_next_item(tree_root, path);
1202 		if (ret < 0) {
1203 			btrfs_abort_transaction(trans, ret);
1204 			goto out_free_path;
1205 		}
1206 		if (ret)
1207 			break;
1208 	}
1209 
1210 out_add_root:
1211 	btrfs_release_path(path);
1212 	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
1213 	if (ret) {
1214 		btrfs_abort_transaction(trans, ret);
1215 		goto out_free_path;
1216 	}
1217 
1218 	ASSERT(prealloc == NULL);
1219 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1220 	if (!prealloc) {
1221 		ret = -ENOMEM;
1222 		goto out_free_path;
1223 	}
1224 	qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
1225 	prealloc = NULL;
1226 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1227 	if (ret < 0) {
1228 		btrfs_abort_transaction(trans, ret);
1229 		goto out_free_path;
1230 	}
1231 
1232 	fs_info->qgroup_enable_gen = trans->transid;
1233 
1234 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1235 	/*
1236 	 * Commit the transaction while not holding qgroup_ioctl_lock, to avoid
1237 	 * a deadlock with tasks concurrently doing other qgroup operations, such
1238 	 * adding/removing qgroups or adding/deleting qgroup relations for example,
1239 	 * because all qgroup operations first start or join a transaction and then
1240 	 * lock the qgroup_ioctl_lock mutex.
1241 	 * We are safe from a concurrent task trying to enable quotas, by calling
1242 	 * this function, since we are serialized by fs_info->subvol_sem.
1243 	 */
1244 	ret = btrfs_commit_transaction(trans);
1245 	trans = NULL;
1246 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1247 	if (ret)
1248 		goto out_free_path;
1249 
1250 	/*
1251 	 * Set quota enabled flag after committing the transaction, to avoid
1252 	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1253 	 * creation.
1254 	 */
1255 	spin_lock(&fs_info->qgroup_lock);
1256 	fs_info->quota_root = quota_root;
1257 	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1258 	if (simple)
1259 		btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
1260 	spin_unlock(&fs_info->qgroup_lock);
1261 
1262 	/* Skip rescan for simple qgroups. */
1263 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
1264 		goto out_free_path;
1265 
1266 	ret = qgroup_rescan_init(fs_info, 0, 1);
1267 	if (!ret) {
1268 	        qgroup_rescan_zero_tracking(fs_info);
1269 		fs_info->qgroup_rescan_running = true;
1270 	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
1271 	                         &fs_info->qgroup_rescan_work);
1272 	} else {
1273 		/*
1274 		 * We have set both BTRFS_FS_QUOTA_ENABLED and
1275 		 * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with
1276 		 * -EINPROGRESS. That can happen because someone started the
1277 		 * rescan worker by calling quota rescan ioctl before we
1278 		 * attempted to initialize the rescan worker. Failure due to
1279 		 * quotas disabled in the meanwhile is not possible, because
1280 		 * we are holding a write lock on fs_info->subvol_sem, which
1281 		 * is also acquired when disabling quotas.
1282 		 * Ignore such error, and any other error would need to undo
1283 		 * everything we did in the transaction we just committed.
1284 		 */
1285 		ASSERT(ret == -EINPROGRESS);
1286 		ret = 0;
1287 	}
1288 
1289 out_free_path:
1290 	btrfs_free_path(path);
1291 out_free_root:
1292 	if (ret)
1293 		btrfs_put_root(quota_root);
1294 out:
1295 	if (ret) {
1296 		ulist_free(fs_info->qgroup_ulist);
1297 		fs_info->qgroup_ulist = NULL;
1298 		btrfs_sysfs_del_qgroups(fs_info);
1299 	}
1300 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1301 	if (ret && trans)
1302 		btrfs_end_transaction(trans);
1303 	else if (trans)
1304 		ret = btrfs_end_transaction(trans);
1305 	ulist_free(ulist);
1306 	kfree(prealloc);
1307 	return ret;
1308 }
1309 
1310 /*
1311  * It is possible to have outstanding ordered extents which reserved bytes
1312  * before we disabled. We need to fully flush delalloc, ordered extents, and a
1313  * commit to ensure that we don't leak such reservations, only to have them
1314  * come back if we re-enable.
1315  *
1316  * - enable simple quotas
1317  * - reserve space
1318  * - release it, store rsv_bytes in OE
1319  * - disable quotas
1320  * - enable simple quotas (qgroup rsv are all 0)
1321  * - OE finishes
1322  * - run delayed refs
1323  * - free rsv_bytes, resulting in miscounting or even underflow
1324  */
flush_reservations(struct btrfs_fs_info * fs_info)1325 static int flush_reservations(struct btrfs_fs_info *fs_info)
1326 {
1327 	int ret;
1328 
1329 	ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
1330 	if (ret)
1331 		return ret;
1332 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
1333 
1334 	return btrfs_commit_current_transaction(fs_info->tree_root);
1335 }
1336 
btrfs_quota_disable(struct btrfs_fs_info * fs_info)1337 int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
1338 {
1339 	struct btrfs_root *quota_root = NULL;
1340 	struct btrfs_trans_handle *trans = NULL;
1341 	int ret = 0;
1342 
1343 	/*
1344 	 * We need to have subvol_sem write locked to prevent races with
1345 	 * snapshot creation.
1346 	 */
1347 	lockdep_assert_held_write(&fs_info->subvol_sem);
1348 
1349 	/*
1350 	 * Relocation will mess with backrefs, so make sure we have the
1351 	 * cleaner_mutex held to protect us from relocate.
1352 	 */
1353 	lockdep_assert_held(&fs_info->cleaner_mutex);
1354 
1355 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1356 	if (!fs_info->quota_root)
1357 		goto out;
1358 
1359 	/*
1360 	 * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to
1361 	 * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs
1362 	 * to lock that mutex while holding a transaction handle and the rescan
1363 	 * worker needs to commit a transaction.
1364 	 */
1365 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1366 
1367 	/*
1368 	 * Request qgroup rescan worker to complete and wait for it. This wait
1369 	 * must be done before transaction start for quota disable since it may
1370 	 * deadlock with transaction by the qgroup rescan worker.
1371 	 */
1372 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1373 	btrfs_qgroup_wait_for_completion(fs_info, false);
1374 
1375 	/*
1376 	 * We have nothing held here and no trans handle, just return the error
1377 	 * if there is one.
1378 	 */
1379 	ret = flush_reservations(fs_info);
1380 	if (ret)
1381 		return ret;
1382 
1383 	/*
1384 	 * 1 For the root item
1385 	 *
1386 	 * We should also reserve enough items for the quota tree deletion in
1387 	 * btrfs_clean_quota_tree but this is not done.
1388 	 *
1389 	 * Also, we must always start a transaction without holding the mutex
1390 	 * qgroup_ioctl_lock, see btrfs_quota_enable().
1391 	 */
1392 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
1393 
1394 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1395 	if (IS_ERR(trans)) {
1396 		ret = PTR_ERR(trans);
1397 		trans = NULL;
1398 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1399 		goto out;
1400 	}
1401 
1402 	if (!fs_info->quota_root)
1403 		goto out;
1404 
1405 	spin_lock(&fs_info->qgroup_lock);
1406 	quota_root = fs_info->quota_root;
1407 	fs_info->quota_root = NULL;
1408 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1409 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
1410 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1411 	spin_unlock(&fs_info->qgroup_lock);
1412 
1413 	btrfs_free_qgroup_config(fs_info);
1414 
1415 	ret = btrfs_clean_quota_tree(trans, quota_root);
1416 	if (ret) {
1417 		btrfs_abort_transaction(trans, ret);
1418 		goto out;
1419 	}
1420 
1421 	ret = btrfs_del_root(trans, &quota_root->root_key);
1422 	if (ret) {
1423 		btrfs_abort_transaction(trans, ret);
1424 		goto out;
1425 	}
1426 
1427 	spin_lock(&fs_info->trans_lock);
1428 	list_del(&quota_root->dirty_list);
1429 	spin_unlock(&fs_info->trans_lock);
1430 
1431 	btrfs_tree_lock(quota_root->node);
1432 	btrfs_clear_buffer_dirty(trans, quota_root->node);
1433 	btrfs_tree_unlock(quota_root->node);
1434 	ret = btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
1435 				    quota_root->node, 0, 1);
1436 
1437 	if (ret < 0)
1438 		btrfs_abort_transaction(trans, ret);
1439 
1440 out:
1441 	btrfs_put_root(quota_root);
1442 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1443 	if (ret && trans)
1444 		btrfs_end_transaction(trans);
1445 	else if (trans)
1446 		ret = btrfs_commit_transaction(trans);
1447 	return ret;
1448 }
1449 
qgroup_dirty(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1450 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1451 			 struct btrfs_qgroup *qgroup)
1452 {
1453 	if (list_empty(&qgroup->dirty))
1454 		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1455 }
1456 
qgroup_iterator_add(struct list_head * head,struct btrfs_qgroup * qgroup)1457 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1458 {
1459 	if (!list_empty(&qgroup->iterator))
1460 		return;
1461 
1462 	list_add_tail(&qgroup->iterator, head);
1463 }
1464 
qgroup_iterator_clean(struct list_head * head)1465 static void qgroup_iterator_clean(struct list_head *head)
1466 {
1467 	while (!list_empty(head)) {
1468 		struct btrfs_qgroup *qgroup;
1469 
1470 		qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1471 		list_del_init(&qgroup->iterator);
1472 	}
1473 }
1474 
1475 /*
1476  * The easy accounting, we're updating qgroup relationship whose child qgroup
1477  * only has exclusive extents.
1478  *
1479  * In this case, all exclusive extents will also be exclusive for parent, so
1480  * excl/rfer just get added/removed.
1481  *
1482  * So is qgroup reservation space, which should also be added/removed to
1483  * parent.
1484  * Or when child tries to release reservation space, parent will underflow its
1485  * reservation (for relationship adding case).
1486  *
1487  * Caller should hold fs_info->qgroup_lock.
1488  */
__qgroup_excl_accounting(struct btrfs_fs_info * fs_info,u64 ref_root,struct btrfs_qgroup * src,int sign)1489 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
1490 				    struct btrfs_qgroup *src, int sign)
1491 {
1492 	struct btrfs_qgroup *qgroup;
1493 	struct btrfs_qgroup *cur;
1494 	LIST_HEAD(qgroup_list);
1495 	u64 num_bytes = src->excl;
1496 	int ret = 0;
1497 
1498 	qgroup = find_qgroup_rb(fs_info, ref_root);
1499 	if (!qgroup)
1500 		goto out;
1501 
1502 	qgroup_iterator_add(&qgroup_list, qgroup);
1503 	list_for_each_entry(cur, &qgroup_list, iterator) {
1504 		struct btrfs_qgroup_list *glist;
1505 
1506 		qgroup->rfer += sign * num_bytes;
1507 		qgroup->rfer_cmpr += sign * num_bytes;
1508 
1509 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1510 		qgroup->excl += sign * num_bytes;
1511 		qgroup->excl_cmpr += sign * num_bytes;
1512 
1513 		if (sign > 0)
1514 			qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1515 		else
1516 			qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1517 		qgroup_dirty(fs_info, qgroup);
1518 
1519 		/* Append parent qgroups to @qgroup_list. */
1520 		list_for_each_entry(glist, &qgroup->groups, next_group)
1521 			qgroup_iterator_add(&qgroup_list, glist->group);
1522 	}
1523 	ret = 0;
1524 out:
1525 	qgroup_iterator_clean(&qgroup_list);
1526 	return ret;
1527 }
1528 
1529 
1530 /*
1531  * Quick path for updating qgroup with only excl refs.
1532  *
1533  * In that case, just update all parent will be enough.
1534  * Or we needs to do a full rescan.
1535  * Caller should also hold fs_info->qgroup_lock.
1536  *
1537  * Return 0 for quick update, return >0 for need to full rescan
1538  * and mark INCONSISTENT flag.
1539  * Return < 0 for other error.
1540  */
quick_update_accounting(struct btrfs_fs_info * fs_info,u64 src,u64 dst,int sign)1541 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1542 				   u64 src, u64 dst, int sign)
1543 {
1544 	struct btrfs_qgroup *qgroup;
1545 	int ret = 1;
1546 
1547 	qgroup = find_qgroup_rb(fs_info, src);
1548 	if (!qgroup)
1549 		goto out;
1550 	if (qgroup->excl == qgroup->rfer) {
1551 		ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
1552 		if (ret < 0)
1553 			goto out;
1554 		ret = 0;
1555 	}
1556 out:
1557 	if (ret)
1558 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1559 	return ret;
1560 }
1561 
1562 /*
1563  * Add relation between @src and @dst qgroup. The @prealloc is allocated by the
1564  * callers and transferred here (either used or freed on error).
1565  */
btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc)1566 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
1567 			      struct btrfs_qgroup_list *prealloc)
1568 {
1569 	struct btrfs_fs_info *fs_info = trans->fs_info;
1570 	struct btrfs_qgroup *parent;
1571 	struct btrfs_qgroup *member;
1572 	struct btrfs_qgroup_list *list;
1573 	int ret = 0;
1574 
1575 	ASSERT(prealloc);
1576 
1577 	/* Check the level of src and dst first */
1578 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1579 		return -EINVAL;
1580 
1581 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1582 	if (!fs_info->quota_root) {
1583 		ret = -ENOTCONN;
1584 		goto out;
1585 	}
1586 	member = find_qgroup_rb(fs_info, src);
1587 	parent = find_qgroup_rb(fs_info, dst);
1588 	if (!member || !parent) {
1589 		ret = -EINVAL;
1590 		goto out;
1591 	}
1592 
1593 	/* check if such qgroup relation exist firstly */
1594 	list_for_each_entry(list, &member->groups, next_group) {
1595 		if (list->group == parent) {
1596 			ret = -EEXIST;
1597 			goto out;
1598 		}
1599 	}
1600 
1601 	ret = add_qgroup_relation_item(trans, src, dst);
1602 	if (ret)
1603 		goto out;
1604 
1605 	ret = add_qgroup_relation_item(trans, dst, src);
1606 	if (ret) {
1607 		del_qgroup_relation_item(trans, src, dst);
1608 		goto out;
1609 	}
1610 
1611 	spin_lock(&fs_info->qgroup_lock);
1612 	ret = __add_relation_rb(prealloc, member, parent);
1613 	prealloc = NULL;
1614 	if (ret < 0) {
1615 		spin_unlock(&fs_info->qgroup_lock);
1616 		goto out;
1617 	}
1618 	ret = quick_update_accounting(fs_info, src, dst, 1);
1619 	spin_unlock(&fs_info->qgroup_lock);
1620 out:
1621 	kfree(prealloc);
1622 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1623 	return ret;
1624 }
1625 
__del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1626 static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1627 				 u64 dst)
1628 {
1629 	struct btrfs_fs_info *fs_info = trans->fs_info;
1630 	struct btrfs_qgroup *parent;
1631 	struct btrfs_qgroup *member;
1632 	struct btrfs_qgroup_list *list;
1633 	bool found = false;
1634 	int ret = 0;
1635 	int ret2;
1636 
1637 	if (!fs_info->quota_root) {
1638 		ret = -ENOTCONN;
1639 		goto out;
1640 	}
1641 
1642 	member = find_qgroup_rb(fs_info, src);
1643 	parent = find_qgroup_rb(fs_info, dst);
1644 	/*
1645 	 * The parent/member pair doesn't exist, then try to delete the dead
1646 	 * relation items only.
1647 	 */
1648 	if (!member || !parent)
1649 		goto delete_item;
1650 
1651 	/* check if such qgroup relation exist firstly */
1652 	list_for_each_entry(list, &member->groups, next_group) {
1653 		if (list->group == parent) {
1654 			found = true;
1655 			break;
1656 		}
1657 	}
1658 
1659 delete_item:
1660 	ret = del_qgroup_relation_item(trans, src, dst);
1661 	if (ret < 0 && ret != -ENOENT)
1662 		goto out;
1663 	ret2 = del_qgroup_relation_item(trans, dst, src);
1664 	if (ret2 < 0 && ret2 != -ENOENT)
1665 		goto out;
1666 
1667 	/* At least one deletion succeeded, return 0 */
1668 	if (!ret || !ret2)
1669 		ret = 0;
1670 
1671 	if (found) {
1672 		spin_lock(&fs_info->qgroup_lock);
1673 		del_relation_rb(fs_info, src, dst);
1674 		ret = quick_update_accounting(fs_info, src, dst, -1);
1675 		spin_unlock(&fs_info->qgroup_lock);
1676 	}
1677 out:
1678 	return ret;
1679 }
1680 
btrfs_del_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst)1681 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1682 			      u64 dst)
1683 {
1684 	struct btrfs_fs_info *fs_info = trans->fs_info;
1685 	int ret = 0;
1686 
1687 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1688 	ret = __del_qgroup_relation(trans, src, dst);
1689 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1690 
1691 	return ret;
1692 }
1693 
btrfs_create_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1694 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1695 {
1696 	struct btrfs_fs_info *fs_info = trans->fs_info;
1697 	struct btrfs_root *quota_root;
1698 	struct btrfs_qgroup *qgroup;
1699 	struct btrfs_qgroup *prealloc = NULL;
1700 	int ret = 0;
1701 
1702 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
1703 		return 0;
1704 
1705 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1706 	if (!fs_info->quota_root) {
1707 		ret = -ENOTCONN;
1708 		goto out;
1709 	}
1710 	quota_root = fs_info->quota_root;
1711 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1712 	if (qgroup) {
1713 		ret = -EEXIST;
1714 		goto out;
1715 	}
1716 
1717 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
1718 	if (!prealloc) {
1719 		ret = -ENOMEM;
1720 		goto out;
1721 	}
1722 
1723 	ret = add_qgroup_item(trans, quota_root, qgroupid);
1724 	if (ret)
1725 		goto out;
1726 
1727 	spin_lock(&fs_info->qgroup_lock);
1728 	qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
1729 	spin_unlock(&fs_info->qgroup_lock);
1730 	prealloc = NULL;
1731 
1732 	ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1733 out:
1734 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1735 	kfree(prealloc);
1736 	return ret;
1737 }
1738 
1739 /*
1740  * Return 0 if we can not delete the qgroup (not empty or has children etc).
1741  * Return >0 if we can delete the qgroup.
1742  * Return <0 for other errors during tree search.
1743  */
can_delete_qgroup(struct btrfs_fs_info * fs_info,struct btrfs_qgroup * qgroup)1744 static int can_delete_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup)
1745 {
1746 	struct btrfs_key key;
1747 	struct btrfs_path *path;
1748 	int ret;
1749 
1750 	/*
1751 	 * Squota would never be inconsistent, but there can still be case
1752 	 * where a dropped subvolume still has qgroup numbers, and squota
1753 	 * relies on such qgroup for future accounting.
1754 	 *
1755 	 * So for squota, do not allow dropping any non-zero qgroup.
1756 	 */
1757 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
1758 	    (qgroup->rfer || qgroup->excl || qgroup->excl_cmpr || qgroup->rfer_cmpr))
1759 		return 0;
1760 
1761 	/* For higher level qgroup, we can only delete it if it has no child. */
1762 	if (btrfs_qgroup_level(qgroup->qgroupid)) {
1763 		if (!list_empty(&qgroup->members))
1764 			return 0;
1765 		return 1;
1766 	}
1767 
1768 	/*
1769 	 * For level-0 qgroups, we can only delete it if it has no subvolume
1770 	 * for it.
1771 	 * This means even a subvolume is unlinked but not yet fully dropped,
1772 	 * we can not delete the qgroup.
1773 	 */
1774 	key.objectid = qgroup->qgroupid;
1775 	key.type = BTRFS_ROOT_ITEM_KEY;
1776 	key.offset = -1ULL;
1777 	path = btrfs_alloc_path();
1778 	if (!path)
1779 		return -ENOMEM;
1780 
1781 	ret = btrfs_find_root(fs_info->tree_root, &key, path, NULL, NULL);
1782 	btrfs_free_path(path);
1783 	/*
1784 	 * The @ret from btrfs_find_root() exactly matches our definition for
1785 	 * the return value, thus can be returned directly.
1786 	 */
1787 	return ret;
1788 }
1789 
btrfs_remove_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid)1790 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
1791 {
1792 	struct btrfs_fs_info *fs_info = trans->fs_info;
1793 	struct btrfs_qgroup *qgroup;
1794 	struct btrfs_qgroup_list *list;
1795 	int ret = 0;
1796 
1797 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1798 	if (!fs_info->quota_root) {
1799 		ret = -ENOTCONN;
1800 		goto out;
1801 	}
1802 
1803 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1804 	if (!qgroup) {
1805 		ret = -ENOENT;
1806 		goto out;
1807 	}
1808 
1809 	ret = can_delete_qgroup(fs_info, qgroup);
1810 	if (ret < 0)
1811 		goto out;
1812 	if (ret == 0) {
1813 		ret = -EBUSY;
1814 		goto out;
1815 	}
1816 
1817 	/* Check if there are no children of this qgroup */
1818 	if (!list_empty(&qgroup->members)) {
1819 		ret = -EBUSY;
1820 		goto out;
1821 	}
1822 
1823 	ret = del_qgroup_item(trans, qgroupid);
1824 	if (ret && ret != -ENOENT)
1825 		goto out;
1826 
1827 	while (!list_empty(&qgroup->groups)) {
1828 		list = list_first_entry(&qgroup->groups,
1829 					struct btrfs_qgroup_list, next_group);
1830 		ret = __del_qgroup_relation(trans, qgroupid,
1831 					    list->group->qgroupid);
1832 		if (ret)
1833 			goto out;
1834 	}
1835 
1836 	spin_lock(&fs_info->qgroup_lock);
1837 	/*
1838 	 * Warn on reserved space. The subvolume should has no child nor
1839 	 * corresponding subvolume.
1840 	 * Thus its reserved space should all be zero, no matter if qgroup
1841 	 * is consistent or the mode.
1842 	 */
1843 	WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
1844 		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
1845 		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
1846 	/*
1847 	 * The same for rfer/excl numbers, but that's only if our qgroup is
1848 	 * consistent and if it's in regular qgroup mode.
1849 	 * For simple mode it's not as accurate thus we can hit non-zero values
1850 	 * very frequently.
1851 	 */
1852 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
1853 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
1854 		if (WARN_ON(qgroup->rfer || qgroup->excl ||
1855 			    qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
1856 			btrfs_warn_rl(fs_info,
1857 "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
1858 				      btrfs_qgroup_level(qgroup->qgroupid),
1859 				      btrfs_qgroup_subvolid(qgroup->qgroupid),
1860 				      qgroup->rfer, qgroup->rfer_cmpr,
1861 				      qgroup->excl, qgroup->excl_cmpr);
1862 			qgroup_mark_inconsistent(fs_info);
1863 		}
1864 	}
1865 	del_qgroup_rb(fs_info, qgroupid);
1866 	spin_unlock(&fs_info->qgroup_lock);
1867 
1868 	/*
1869 	 * Remove the qgroup from sysfs now without holding the qgroup_lock
1870 	 * spinlock, since the sysfs_remove_group() function needs to take
1871 	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1872 	 */
1873 	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1874 	kfree(qgroup);
1875 out:
1876 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1877 	return ret;
1878 }
1879 
btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info * fs_info,u64 subvolid)1880 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid)
1881 {
1882 	struct btrfs_trans_handle *trans;
1883 	int ret;
1884 
1885 	if (!is_fstree(subvolid) || !btrfs_qgroup_enabled(fs_info) || !fs_info->quota_root)
1886 		return 0;
1887 
1888 	/*
1889 	 * Commit current transaction to make sure all the rfer/excl numbers
1890 	 * get updated.
1891 	 */
1892 	trans = btrfs_start_transaction(fs_info->quota_root, 0);
1893 	if (IS_ERR(trans))
1894 		return PTR_ERR(trans);
1895 
1896 	ret = btrfs_commit_transaction(trans);
1897 	if (ret < 0)
1898 		return ret;
1899 
1900 	/* Start new trans to delete the qgroup info and limit items. */
1901 	trans = btrfs_start_transaction(fs_info->quota_root, 2);
1902 	if (IS_ERR(trans))
1903 		return PTR_ERR(trans);
1904 	ret = btrfs_remove_qgroup(trans, subvolid);
1905 	btrfs_end_transaction(trans);
1906 	/*
1907 	 * It's squota and the subvolume still has numbers needed for future
1908 	 * accounting, in this case we can not delete it.  Just skip it.
1909 	 */
1910 	if (ret == -EBUSY)
1911 		ret = 0;
1912 	return ret;
1913 }
1914 
btrfs_limit_qgroup(struct btrfs_trans_handle * trans,u64 qgroupid,struct btrfs_qgroup_limit * limit)1915 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1916 		       struct btrfs_qgroup_limit *limit)
1917 {
1918 	struct btrfs_fs_info *fs_info = trans->fs_info;
1919 	struct btrfs_qgroup *qgroup;
1920 	int ret = 0;
1921 	/* Sometimes we would want to clear the limit on this qgroup.
1922 	 * To meet this requirement, we treat the -1 as a special value
1923 	 * which tell kernel to clear the limit on this qgroup.
1924 	 */
1925 	const u64 CLEAR_VALUE = -1;
1926 
1927 	mutex_lock(&fs_info->qgroup_ioctl_lock);
1928 	if (!fs_info->quota_root) {
1929 		ret = -ENOTCONN;
1930 		goto out;
1931 	}
1932 
1933 	qgroup = find_qgroup_rb(fs_info, qgroupid);
1934 	if (!qgroup) {
1935 		ret = -ENOENT;
1936 		goto out;
1937 	}
1938 
1939 	spin_lock(&fs_info->qgroup_lock);
1940 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1941 		if (limit->max_rfer == CLEAR_VALUE) {
1942 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1943 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1944 			qgroup->max_rfer = 0;
1945 		} else {
1946 			qgroup->max_rfer = limit->max_rfer;
1947 		}
1948 	}
1949 	if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1950 		if (limit->max_excl == CLEAR_VALUE) {
1951 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1952 			limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1953 			qgroup->max_excl = 0;
1954 		} else {
1955 			qgroup->max_excl = limit->max_excl;
1956 		}
1957 	}
1958 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1959 		if (limit->rsv_rfer == CLEAR_VALUE) {
1960 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1961 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1962 			qgroup->rsv_rfer = 0;
1963 		} else {
1964 			qgroup->rsv_rfer = limit->rsv_rfer;
1965 		}
1966 	}
1967 	if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1968 		if (limit->rsv_excl == CLEAR_VALUE) {
1969 			qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1970 			limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1971 			qgroup->rsv_excl = 0;
1972 		} else {
1973 			qgroup->rsv_excl = limit->rsv_excl;
1974 		}
1975 	}
1976 	qgroup->lim_flags |= limit->flags;
1977 
1978 	spin_unlock(&fs_info->qgroup_lock);
1979 
1980 	ret = update_qgroup_limit_item(trans, qgroup);
1981 	if (ret) {
1982 		qgroup_mark_inconsistent(fs_info);
1983 		btrfs_info(fs_info, "unable to update quota limit for %llu",
1984 		       qgroupid);
1985 	}
1986 
1987 out:
1988 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
1989 	return ret;
1990 }
1991 
1992 /*
1993  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
1994  * So qgroup can account it at transaction committing time.
1995  *
1996  * No lock version, caller must acquire delayed ref lock and allocated memory,
1997  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
1998  *
1999  * Return 0 for success insert
2000  * Return >0 for existing record, caller can free @record safely.
2001  * Return <0 for insertion failure, caller can free @record safely.
2002  */
btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_qgroup_extent_record * record)2003 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
2004 				struct btrfs_delayed_ref_root *delayed_refs,
2005 				struct btrfs_qgroup_extent_record *record)
2006 {
2007 	struct btrfs_qgroup_extent_record *existing, *ret;
2008 	const unsigned long index = (record->bytenr >> fs_info->sectorsize_bits);
2009 
2010 	if (!btrfs_qgroup_full_accounting(fs_info))
2011 		return 1;
2012 
2013 #if BITS_PER_LONG == 32
2014 	if (record->bytenr >= MAX_LFS_FILESIZE) {
2015 		btrfs_err_rl(fs_info,
2016 "qgroup record for extent at %llu is beyond 32bit page cache and xarray index limit",
2017 			     record->bytenr);
2018 		btrfs_err_32bit_limit(fs_info);
2019 		return -EOVERFLOW;
2020 	}
2021 #endif
2022 
2023 	lockdep_assert_held(&delayed_refs->lock);
2024 	trace_btrfs_qgroup_trace_extent(fs_info, record);
2025 
2026 	xa_lock(&delayed_refs->dirty_extents);
2027 	existing = xa_load(&delayed_refs->dirty_extents, index);
2028 	if (existing) {
2029 		if (record->data_rsv && !existing->data_rsv) {
2030 			existing->data_rsv = record->data_rsv;
2031 			existing->data_rsv_refroot = record->data_rsv_refroot;
2032 		}
2033 		xa_unlock(&delayed_refs->dirty_extents);
2034 		return 1;
2035 	}
2036 
2037 	ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC);
2038 	xa_unlock(&delayed_refs->dirty_extents);
2039 	if (xa_is_err(ret)) {
2040 		qgroup_mark_inconsistent(fs_info);
2041 		return xa_err(ret);
2042 	}
2043 
2044 	return 0;
2045 }
2046 
2047 /*
2048  * Post handler after qgroup_trace_extent_nolock().
2049  *
2050  * NOTE: Current qgroup does the expensive backref walk at transaction
2051  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
2052  * new transaction.
2053  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
2054  * result.
2055  *
2056  * However for old_roots there is no need to do backref walk at that time,
2057  * since we search commit roots to walk backref and result will always be
2058  * correct.
2059  *
2060  * Due to the nature of no lock version, we can't do backref there.
2061  * So we must call btrfs_qgroup_trace_extent_post() after exiting
2062  * spinlock context.
2063  *
2064  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
2065  * using current root, then we can move all expensive backref walk out of
2066  * transaction committing, but not now as qgroup accounting will be wrong again.
2067  */
btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle * trans,struct btrfs_qgroup_extent_record * qrecord)2068 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
2069 				   struct btrfs_qgroup_extent_record *qrecord)
2070 {
2071 	struct btrfs_backref_walk_ctx ctx = { 0 };
2072 	int ret;
2073 
2074 	if (!btrfs_qgroup_full_accounting(trans->fs_info))
2075 		return 0;
2076 	/*
2077 	 * We are always called in a context where we are already holding a
2078 	 * transaction handle. Often we are called when adding a data delayed
2079 	 * reference from btrfs_truncate_inode_items() (truncating or unlinking),
2080 	 * in which case we will be holding a write lock on extent buffer from a
2081 	 * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
2082 	 * acquire fs_info->commit_root_sem, because that is a higher level lock
2083 	 * that must be acquired before locking any extent buffers.
2084 	 *
2085 	 * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
2086 	 * but we can't pass it a non-NULL transaction handle, because otherwise
2087 	 * it would not use commit roots and would lock extent buffers, causing
2088 	 * a deadlock if it ends up trying to read lock the same extent buffer
2089 	 * that was previously write locked at btrfs_truncate_inode_items().
2090 	 *
2091 	 * So pass a NULL transaction handle to btrfs_find_all_roots() and
2092 	 * explicitly tell it to not acquire the commit_root_sem - if we are
2093 	 * holding a transaction handle we don't need its protection.
2094 	 */
2095 	ASSERT(trans != NULL);
2096 
2097 	if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2098 		return 0;
2099 
2100 	ctx.bytenr = qrecord->bytenr;
2101 	ctx.fs_info = trans->fs_info;
2102 
2103 	ret = btrfs_find_all_roots(&ctx, true);
2104 	if (ret < 0) {
2105 		qgroup_mark_inconsistent(trans->fs_info);
2106 		btrfs_warn(trans->fs_info,
2107 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
2108 			ret);
2109 		return 0;
2110 	}
2111 
2112 	/*
2113 	 * Here we don't need to get the lock of
2114 	 * trans->transaction->delayed_refs, since inserted qrecord won't
2115 	 * be deleted, only qrecord->node may be modified (new qrecord insert)
2116 	 *
2117 	 * So modifying qrecord->old_roots is safe here
2118 	 */
2119 	qrecord->old_roots = ctx.roots;
2120 	return 0;
2121 }
2122 
2123 /*
2124  * Inform qgroup to trace one dirty extent, specified by @bytenr and
2125  * @num_bytes.
2126  * So qgroup can account it at commit trans time.
2127  *
2128  * Better encapsulated version, with memory allocation and backref walk for
2129  * commit roots.
2130  * So this can sleep.
2131  *
2132  * Return 0 if the operation is done.
2133  * Return <0 for error, like memory allocation failure or invalid parameter
2134  * (NULL trans)
2135  */
btrfs_qgroup_trace_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2136 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2137 			      u64 num_bytes)
2138 {
2139 	struct btrfs_fs_info *fs_info = trans->fs_info;
2140 	struct btrfs_qgroup_extent_record *record;
2141 	struct btrfs_delayed_ref_root *delayed_refs;
2142 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
2143 	int ret;
2144 
2145 	if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
2146 		return 0;
2147 	record = kzalloc(sizeof(*record), GFP_NOFS);
2148 	if (!record)
2149 		return -ENOMEM;
2150 
2151 	if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, index, GFP_NOFS)) {
2152 		kfree(record);
2153 		return -ENOMEM;
2154 	}
2155 
2156 	delayed_refs = &trans->transaction->delayed_refs;
2157 	record->bytenr = bytenr;
2158 	record->num_bytes = num_bytes;
2159 	record->old_roots = NULL;
2160 
2161 	spin_lock(&delayed_refs->lock);
2162 	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
2163 	spin_unlock(&delayed_refs->lock);
2164 	if (ret) {
2165 		/* Clean up if insertion fails or item exists. */
2166 		xa_release(&delayed_refs->dirty_extents, index);
2167 		kfree(record);
2168 		return 0;
2169 	}
2170 	return btrfs_qgroup_trace_extent_post(trans, record);
2171 }
2172 
2173 /*
2174  * Inform qgroup to trace all leaf items of data
2175  *
2176  * Return 0 for success
2177  * Return <0 for error(ENOMEM)
2178  */
btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle * trans,struct extent_buffer * eb)2179 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
2180 				  struct extent_buffer *eb)
2181 {
2182 	struct btrfs_fs_info *fs_info = trans->fs_info;
2183 	int nr = btrfs_header_nritems(eb);
2184 	int i, extent_type, ret;
2185 	struct btrfs_key key;
2186 	struct btrfs_file_extent_item *fi;
2187 	u64 bytenr, num_bytes;
2188 
2189 	/* We can be called directly from walk_up_proc() */
2190 	if (!btrfs_qgroup_full_accounting(fs_info))
2191 		return 0;
2192 
2193 	for (i = 0; i < nr; i++) {
2194 		btrfs_item_key_to_cpu(eb, &key, i);
2195 
2196 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2197 			continue;
2198 
2199 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2200 		/* filter out non qgroup-accountable extents  */
2201 		extent_type = btrfs_file_extent_type(eb, fi);
2202 
2203 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
2204 			continue;
2205 
2206 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
2207 		if (!bytenr)
2208 			continue;
2209 
2210 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
2211 
2212 		ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes);
2213 		if (ret)
2214 			return ret;
2215 	}
2216 	cond_resched();
2217 	return 0;
2218 }
2219 
2220 /*
2221  * Walk up the tree from the bottom, freeing leaves and any interior
2222  * nodes which have had all slots visited. If a node (leaf or
2223  * interior) is freed, the node above it will have it's slot
2224  * incremented. The root node will never be freed.
2225  *
2226  * At the end of this function, we should have a path which has all
2227  * slots incremented to the next position for a search. If we need to
2228  * read a new node it will be NULL and the node above it will have the
2229  * correct slot selected for a later read.
2230  *
2231  * If we increment the root nodes slot counter past the number of
2232  * elements, 1 is returned to signal completion of the search.
2233  */
adjust_slots_upwards(struct btrfs_path * path,int root_level)2234 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
2235 {
2236 	int level = 0;
2237 	int nr, slot;
2238 	struct extent_buffer *eb;
2239 
2240 	if (root_level == 0)
2241 		return 1;
2242 
2243 	while (level <= root_level) {
2244 		eb = path->nodes[level];
2245 		nr = btrfs_header_nritems(eb);
2246 		path->slots[level]++;
2247 		slot = path->slots[level];
2248 		if (slot >= nr || level == 0) {
2249 			/*
2250 			 * Don't free the root -  we will detect this
2251 			 * condition after our loop and return a
2252 			 * positive value for caller to stop walking the tree.
2253 			 */
2254 			if (level != root_level) {
2255 				btrfs_tree_unlock_rw(eb, path->locks[level]);
2256 				path->locks[level] = 0;
2257 
2258 				free_extent_buffer(eb);
2259 				path->nodes[level] = NULL;
2260 				path->slots[level] = 0;
2261 			}
2262 		} else {
2263 			/*
2264 			 * We have a valid slot to walk back down
2265 			 * from. Stop here so caller can process these
2266 			 * new nodes.
2267 			 */
2268 			break;
2269 		}
2270 
2271 		level++;
2272 	}
2273 
2274 	eb = path->nodes[root_level];
2275 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
2276 		return 1;
2277 
2278 	return 0;
2279 }
2280 
2281 /*
2282  * Helper function to trace a subtree tree block swap.
2283  *
2284  * The swap will happen in highest tree block, but there may be a lot of
2285  * tree blocks involved.
2286  *
2287  * For example:
2288  *  OO = Old tree blocks
2289  *  NN = New tree blocks allocated during balance
2290  *
2291  *           File tree (257)                  Reloc tree for 257
2292  * L2              OO                                NN
2293  *               /    \                            /    \
2294  * L1          OO      OO (a)                    OO      NN (a)
2295  *            / \     / \                       / \     / \
2296  * L0       OO   OO OO   OO                   OO   OO NN   NN
2297  *                  (b)  (c)                          (b)  (c)
2298  *
2299  * When calling qgroup_trace_extent_swap(), we will pass:
2300  * @src_eb = OO(a)
2301  * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
2302  * @dst_level = 0
2303  * @root_level = 1
2304  *
2305  * In that case, qgroup_trace_extent_swap() will search from OO(a) to
2306  * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2307  *
2308  * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
2309  *
2310  * 1) Tree search from @src_eb
2311  *    It should acts as a simplified btrfs_search_slot().
2312  *    The key for search can be extracted from @dst_path->nodes[dst_level]
2313  *    (first key).
2314  *
2315  * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2316  *    NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2317  *    They should be marked during previous (@dst_level = 1) iteration.
2318  *
2319  * 3) Mark file extents in leaves dirty
2320  *    We don't have good way to pick out new file extents only.
2321  *    So we still follow the old method by scanning all file extents in
2322  *    the leave.
2323  *
2324  * This function can free us from keeping two paths, thus later we only need
2325  * to care about how to iterate all new tree blocks in reloc tree.
2326  */
qgroup_trace_extent_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int dst_level,int root_level,bool trace_leaf)2327 static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
2328 				    struct extent_buffer *src_eb,
2329 				    struct btrfs_path *dst_path,
2330 				    int dst_level, int root_level,
2331 				    bool trace_leaf)
2332 {
2333 	struct btrfs_key key;
2334 	struct btrfs_path *src_path;
2335 	struct btrfs_fs_info *fs_info = trans->fs_info;
2336 	u32 nodesize = fs_info->nodesize;
2337 	int cur_level = root_level;
2338 	int ret;
2339 
2340 	BUG_ON(dst_level > root_level);
2341 	/* Level mismatch */
2342 	if (btrfs_header_level(src_eb) != root_level)
2343 		return -EINVAL;
2344 
2345 	src_path = btrfs_alloc_path();
2346 	if (!src_path) {
2347 		ret = -ENOMEM;
2348 		goto out;
2349 	}
2350 
2351 	if (dst_level)
2352 		btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2353 	else
2354 		btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
2355 
2356 	/* For src_path */
2357 	atomic_inc(&src_eb->refs);
2358 	src_path->nodes[root_level] = src_eb;
2359 	src_path->slots[root_level] = dst_path->slots[root_level];
2360 	src_path->locks[root_level] = 0;
2361 
2362 	/* A simplified version of btrfs_search_slot() */
2363 	while (cur_level >= dst_level) {
2364 		struct btrfs_key src_key;
2365 		struct btrfs_key dst_key;
2366 
2367 		if (src_path->nodes[cur_level] == NULL) {
2368 			struct extent_buffer *eb;
2369 			int parent_slot;
2370 
2371 			eb = src_path->nodes[cur_level + 1];
2372 			parent_slot = src_path->slots[cur_level + 1];
2373 
2374 			eb = btrfs_read_node_slot(eb, parent_slot);
2375 			if (IS_ERR(eb)) {
2376 				ret = PTR_ERR(eb);
2377 				goto out;
2378 			}
2379 
2380 			src_path->nodes[cur_level] = eb;
2381 
2382 			btrfs_tree_read_lock(eb);
2383 			src_path->locks[cur_level] = BTRFS_READ_LOCK;
2384 		}
2385 
2386 		src_path->slots[cur_level] = dst_path->slots[cur_level];
2387 		if (cur_level) {
2388 			btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
2389 					&dst_key, dst_path->slots[cur_level]);
2390 			btrfs_node_key_to_cpu(src_path->nodes[cur_level],
2391 					&src_key, src_path->slots[cur_level]);
2392 		} else {
2393 			btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
2394 					&dst_key, dst_path->slots[cur_level]);
2395 			btrfs_item_key_to_cpu(src_path->nodes[cur_level],
2396 					&src_key, src_path->slots[cur_level]);
2397 		}
2398 		/* Content mismatch, something went wrong */
2399 		if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
2400 			ret = -ENOENT;
2401 			goto out;
2402 		}
2403 		cur_level--;
2404 	}
2405 
2406 	/*
2407 	 * Now both @dst_path and @src_path have been populated, record the tree
2408 	 * blocks for qgroup accounting.
2409 	 */
2410 	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
2411 					nodesize);
2412 	if (ret < 0)
2413 		goto out;
2414 	ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start,
2415 					nodesize);
2416 	if (ret < 0)
2417 		goto out;
2418 
2419 	/* Record leaf file extents */
2420 	if (dst_level == 0 && trace_leaf) {
2421 		ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2422 		if (ret < 0)
2423 			goto out;
2424 		ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2425 	}
2426 out:
2427 	btrfs_free_path(src_path);
2428 	return ret;
2429 }
2430 
2431 /*
2432  * Helper function to do recursive generation-aware depth-first search, to
2433  * locate all new tree blocks in a subtree of reloc tree.
2434  *
2435  * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2436  *         reloc tree
2437  * L2         NN (a)
2438  *          /    \
2439  * L1    OO        NN (b)
2440  *      /  \      /  \
2441  * L0  OO  OO    OO  NN
2442  *               (c) (d)
2443  * If we pass:
2444  * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2445  * @cur_level = 1
2446  * @root_level = 1
2447  *
2448  * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2449  * above tree blocks along with their counter parts in file tree.
2450  * While during search, old tree blocks OO(c) will be skipped as tree block swap
2451  * won't affect OO(c).
2452  */
qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct btrfs_path * dst_path,int cur_level,int root_level,u64 last_snapshot,bool trace_leaf)2453 static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2454 					   struct extent_buffer *src_eb,
2455 					   struct btrfs_path *dst_path,
2456 					   int cur_level, int root_level,
2457 					   u64 last_snapshot, bool trace_leaf)
2458 {
2459 	struct btrfs_fs_info *fs_info = trans->fs_info;
2460 	struct extent_buffer *eb;
2461 	bool need_cleanup = false;
2462 	int ret = 0;
2463 	int i;
2464 
2465 	/* Level sanity check */
2466 	if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2467 	    root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2468 	    root_level < cur_level) {
2469 		btrfs_err_rl(fs_info,
2470 			"%s: bad levels, cur_level=%d root_level=%d",
2471 			__func__, cur_level, root_level);
2472 		return -EUCLEAN;
2473 	}
2474 
2475 	/* Read the tree block if needed */
2476 	if (dst_path->nodes[cur_level] == NULL) {
2477 		int parent_slot;
2478 		u64 child_gen;
2479 
2480 		/*
2481 		 * dst_path->nodes[root_level] must be initialized before
2482 		 * calling this function.
2483 		 */
2484 		if (cur_level == root_level) {
2485 			btrfs_err_rl(fs_info,
2486 	"%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2487 				__func__, root_level, root_level, cur_level);
2488 			return -EUCLEAN;
2489 		}
2490 
2491 		/*
2492 		 * We need to get child blockptr/gen from parent before we can
2493 		 * read it.
2494 		  */
2495 		eb = dst_path->nodes[cur_level + 1];
2496 		parent_slot = dst_path->slots[cur_level + 1];
2497 		child_gen = btrfs_node_ptr_generation(eb, parent_slot);
2498 
2499 		/* This node is old, no need to trace */
2500 		if (child_gen < last_snapshot)
2501 			goto out;
2502 
2503 		eb = btrfs_read_node_slot(eb, parent_slot);
2504 		if (IS_ERR(eb)) {
2505 			ret = PTR_ERR(eb);
2506 			goto out;
2507 		}
2508 
2509 		dst_path->nodes[cur_level] = eb;
2510 		dst_path->slots[cur_level] = 0;
2511 
2512 		btrfs_tree_read_lock(eb);
2513 		dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2514 		need_cleanup = true;
2515 	}
2516 
2517 	/* Now record this tree block and its counter part for qgroups */
2518 	ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
2519 				       root_level, trace_leaf);
2520 	if (ret < 0)
2521 		goto cleanup;
2522 
2523 	eb = dst_path->nodes[cur_level];
2524 
2525 	if (cur_level > 0) {
2526 		/* Iterate all child tree blocks */
2527 		for (i = 0; i < btrfs_header_nritems(eb); i++) {
2528 			/* Skip old tree blocks as they won't be swapped */
2529 			if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2530 				continue;
2531 			dst_path->slots[cur_level] = i;
2532 
2533 			/* Recursive call (at most 7 times) */
2534 			ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2535 					dst_path, cur_level - 1, root_level,
2536 					last_snapshot, trace_leaf);
2537 			if (ret < 0)
2538 				goto cleanup;
2539 		}
2540 	}
2541 
2542 cleanup:
2543 	if (need_cleanup) {
2544 		/* Clean up */
2545 		btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2546 				     dst_path->locks[cur_level]);
2547 		free_extent_buffer(dst_path->nodes[cur_level]);
2548 		dst_path->nodes[cur_level] = NULL;
2549 		dst_path->slots[cur_level] = 0;
2550 		dst_path->locks[cur_level] = 0;
2551 	}
2552 out:
2553 	return ret;
2554 }
2555 
qgroup_trace_subtree_swap(struct btrfs_trans_handle * trans,struct extent_buffer * src_eb,struct extent_buffer * dst_eb,u64 last_snapshot,bool trace_leaf)2556 static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2557 				struct extent_buffer *src_eb,
2558 				struct extent_buffer *dst_eb,
2559 				u64 last_snapshot, bool trace_leaf)
2560 {
2561 	struct btrfs_fs_info *fs_info = trans->fs_info;
2562 	struct btrfs_path *dst_path = NULL;
2563 	int level;
2564 	int ret;
2565 
2566 	if (!btrfs_qgroup_full_accounting(fs_info))
2567 		return 0;
2568 
2569 	/* Wrong parameter order */
2570 	if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2571 		btrfs_err_rl(fs_info,
2572 		"%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2573 			     btrfs_header_generation(src_eb),
2574 			     btrfs_header_generation(dst_eb));
2575 		return -EUCLEAN;
2576 	}
2577 
2578 	if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2579 		ret = -EIO;
2580 		goto out;
2581 	}
2582 
2583 	level = btrfs_header_level(dst_eb);
2584 	dst_path = btrfs_alloc_path();
2585 	if (!dst_path) {
2586 		ret = -ENOMEM;
2587 		goto out;
2588 	}
2589 	/* For dst_path */
2590 	atomic_inc(&dst_eb->refs);
2591 	dst_path->nodes[level] = dst_eb;
2592 	dst_path->slots[level] = 0;
2593 	dst_path->locks[level] = 0;
2594 
2595 	/* Do the generation aware breadth-first search */
2596 	ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2597 					      level, last_snapshot, trace_leaf);
2598 	if (ret < 0)
2599 		goto out;
2600 	ret = 0;
2601 
2602 out:
2603 	btrfs_free_path(dst_path);
2604 	if (ret < 0)
2605 		qgroup_mark_inconsistent(fs_info);
2606 	return ret;
2607 }
2608 
2609 /*
2610  * Inform qgroup to trace a whole subtree, including all its child tree
2611  * blocks and data.
2612  * The root tree block is specified by @root_eb.
2613  *
2614  * Normally used by relocation(tree block swap) and subvolume deletion.
2615  *
2616  * Return 0 for success
2617  * Return <0 for error(ENOMEM or tree search error)
2618  */
btrfs_qgroup_trace_subtree(struct btrfs_trans_handle * trans,struct extent_buffer * root_eb,u64 root_gen,int root_level)2619 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
2620 			       struct extent_buffer *root_eb,
2621 			       u64 root_gen, int root_level)
2622 {
2623 	struct btrfs_fs_info *fs_info = trans->fs_info;
2624 	int ret = 0;
2625 	int level;
2626 	u8 drop_subptree_thres;
2627 	struct extent_buffer *eb = root_eb;
2628 	struct btrfs_path *path = NULL;
2629 
2630 	ASSERT(0 <= root_level && root_level < BTRFS_MAX_LEVEL);
2631 	ASSERT(root_eb != NULL);
2632 
2633 	if (!btrfs_qgroup_full_accounting(fs_info))
2634 		return 0;
2635 
2636 	spin_lock(&fs_info->qgroup_lock);
2637 	drop_subptree_thres = fs_info->qgroup_drop_subtree_thres;
2638 	spin_unlock(&fs_info->qgroup_lock);
2639 
2640 	/*
2641 	 * This function only gets called for snapshot drop, if we hit a high
2642 	 * node here, it means we are going to change ownership for quite a lot
2643 	 * of extents, which will greatly slow down btrfs_commit_transaction().
2644 	 *
2645 	 * So here if we find a high tree here, we just skip the accounting and
2646 	 * mark qgroup inconsistent.
2647 	 */
2648 	if (root_level >= drop_subptree_thres) {
2649 		qgroup_mark_inconsistent(fs_info);
2650 		return 0;
2651 	}
2652 
2653 	if (!extent_buffer_uptodate(root_eb)) {
2654 		struct btrfs_tree_parent_check check = {
2655 			.has_first_key = false,
2656 			.transid = root_gen,
2657 			.level = root_level
2658 		};
2659 
2660 		ret = btrfs_read_extent_buffer(root_eb, &check);
2661 		if (ret)
2662 			goto out;
2663 	}
2664 
2665 	if (root_level == 0) {
2666 		ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
2667 		goto out;
2668 	}
2669 
2670 	path = btrfs_alloc_path();
2671 	if (!path)
2672 		return -ENOMEM;
2673 
2674 	/*
2675 	 * Walk down the tree.  Missing extent blocks are filled in as
2676 	 * we go. Metadata is accounted every time we read a new
2677 	 * extent block.
2678 	 *
2679 	 * When we reach a leaf, we account for file extent items in it,
2680 	 * walk back up the tree (adjusting slot pointers as we go)
2681 	 * and restart the search process.
2682 	 */
2683 	atomic_inc(&root_eb->refs);	/* For path */
2684 	path->nodes[root_level] = root_eb;
2685 	path->slots[root_level] = 0;
2686 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2687 walk_down:
2688 	level = root_level;
2689 	while (level >= 0) {
2690 		if (path->nodes[level] == NULL) {
2691 			int parent_slot;
2692 			u64 child_bytenr;
2693 
2694 			/*
2695 			 * We need to get child blockptr from parent before we
2696 			 * can read it.
2697 			  */
2698 			eb = path->nodes[level + 1];
2699 			parent_slot = path->slots[level + 1];
2700 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
2701 
2702 			eb = btrfs_read_node_slot(eb, parent_slot);
2703 			if (IS_ERR(eb)) {
2704 				ret = PTR_ERR(eb);
2705 				goto out;
2706 			}
2707 
2708 			path->nodes[level] = eb;
2709 			path->slots[level] = 0;
2710 
2711 			btrfs_tree_read_lock(eb);
2712 			path->locks[level] = BTRFS_READ_LOCK;
2713 
2714 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
2715 							fs_info->nodesize);
2716 			if (ret)
2717 				goto out;
2718 		}
2719 
2720 		if (level == 0) {
2721 			ret = btrfs_qgroup_trace_leaf_items(trans,
2722 							    path->nodes[level]);
2723 			if (ret)
2724 				goto out;
2725 
2726 			/* Nonzero return here means we completed our search */
2727 			ret = adjust_slots_upwards(path, root_level);
2728 			if (ret)
2729 				break;
2730 
2731 			/* Restart search with new slots */
2732 			goto walk_down;
2733 		}
2734 
2735 		level--;
2736 	}
2737 
2738 	ret = 0;
2739 out:
2740 	btrfs_free_path(path);
2741 
2742 	return ret;
2743 }
2744 
qgroup_iterator_nested_add(struct list_head * head,struct btrfs_qgroup * qgroup)2745 static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
2746 {
2747 	if (!list_empty(&qgroup->nested_iterator))
2748 		return;
2749 
2750 	list_add_tail(&qgroup->nested_iterator, head);
2751 }
2752 
qgroup_iterator_nested_clean(struct list_head * head)2753 static void qgroup_iterator_nested_clean(struct list_head *head)
2754 {
2755 	while (!list_empty(head)) {
2756 		struct btrfs_qgroup *qgroup;
2757 
2758 		qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
2759 		list_del_init(&qgroup->nested_iterator);
2760 	}
2761 }
2762 
2763 #define UPDATE_NEW	0
2764 #define UPDATE_OLD	1
2765 /*
2766  * Walk all of the roots that points to the bytenr and adjust their refcnts.
2767  */
qgroup_update_refcnt(struct btrfs_fs_info * fs_info,struct ulist * roots,struct list_head * qgroups,u64 seq,int update_old)2768 static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2769 				 struct ulist *roots, struct list_head *qgroups,
2770 				 u64 seq, int update_old)
2771 {
2772 	struct ulist_node *unode;
2773 	struct ulist_iterator uiter;
2774 	struct btrfs_qgroup *qg;
2775 
2776 	if (!roots)
2777 		return;
2778 	ULIST_ITER_INIT(&uiter);
2779 	while ((unode = ulist_next(roots, &uiter))) {
2780 		LIST_HEAD(tmp);
2781 
2782 		qg = find_qgroup_rb(fs_info, unode->val);
2783 		if (!qg)
2784 			continue;
2785 
2786 		qgroup_iterator_nested_add(qgroups, qg);
2787 		qgroup_iterator_add(&tmp, qg);
2788 		list_for_each_entry(qg, &tmp, iterator) {
2789 			struct btrfs_qgroup_list *glist;
2790 
2791 			if (update_old)
2792 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2793 			else
2794 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2795 
2796 			list_for_each_entry(glist, &qg->groups, next_group) {
2797 				qgroup_iterator_nested_add(qgroups, glist->group);
2798 				qgroup_iterator_add(&tmp, glist->group);
2799 			}
2800 		}
2801 		qgroup_iterator_clean(&tmp);
2802 	}
2803 }
2804 
2805 /*
2806  * Update qgroup rfer/excl counters.
2807  * Rfer update is easy, codes can explain themselves.
2808  *
2809  * Excl update is tricky, the update is split into 2 parts.
2810  * Part 1: Possible exclusive <-> sharing detect:
2811  *	|	A	|	!A	|
2812  *  -------------------------------------
2813  *  B	|	*	|	-	|
2814  *  -------------------------------------
2815  *  !B	|	+	|	**	|
2816  *  -------------------------------------
2817  *
2818  * Conditions:
2819  * A:	cur_old_roots < nr_old_roots	(not exclusive before)
2820  * !A:	cur_old_roots == nr_old_roots	(possible exclusive before)
2821  * B:	cur_new_roots < nr_new_roots	(not exclusive now)
2822  * !B:	cur_new_roots == nr_new_roots	(possible exclusive now)
2823  *
2824  * Results:
2825  * +: Possible sharing -> exclusive	-: Possible exclusive -> sharing
2826  * *: Definitely not changed.		**: Possible unchanged.
2827  *
2828  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2829  *
2830  * To make the logic clear, we first use condition A and B to split
2831  * combination into 4 results.
2832  *
2833  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2834  * only on variant maybe 0.
2835  *
2836  * Lastly, check result **, since there are 2 variants maybe 0, split them
2837  * again(2x2).
2838  * But this time we don't need to consider other things, the codes and logic
2839  * is easy to understand now.
2840  */
qgroup_update_counters(struct btrfs_fs_info * fs_info,struct list_head * qgroups,u64 nr_old_roots,u64 nr_new_roots,u64 num_bytes,u64 seq)2841 static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
2842 				   struct list_head *qgroups, u64 nr_old_roots,
2843 				   u64 nr_new_roots, u64 num_bytes, u64 seq)
2844 {
2845 	struct btrfs_qgroup *qg;
2846 
2847 	list_for_each_entry(qg, qgroups, nested_iterator) {
2848 		u64 cur_new_count, cur_old_count;
2849 		bool dirty = false;
2850 
2851 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2852 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2853 
2854 		trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2855 					     cur_new_count);
2856 
2857 		/* Rfer update part */
2858 		if (cur_old_count == 0 && cur_new_count > 0) {
2859 			qg->rfer += num_bytes;
2860 			qg->rfer_cmpr += num_bytes;
2861 			dirty = true;
2862 		}
2863 		if (cur_old_count > 0 && cur_new_count == 0) {
2864 			qg->rfer -= num_bytes;
2865 			qg->rfer_cmpr -= num_bytes;
2866 			dirty = true;
2867 		}
2868 
2869 		/* Excl update part */
2870 		/* Exclusive/none -> shared case */
2871 		if (cur_old_count == nr_old_roots &&
2872 		    cur_new_count < nr_new_roots) {
2873 			/* Exclusive -> shared */
2874 			if (cur_old_count != 0) {
2875 				qg->excl -= num_bytes;
2876 				qg->excl_cmpr -= num_bytes;
2877 				dirty = true;
2878 			}
2879 		}
2880 
2881 		/* Shared -> exclusive/none case */
2882 		if (cur_old_count < nr_old_roots &&
2883 		    cur_new_count == nr_new_roots) {
2884 			/* Shared->exclusive */
2885 			if (cur_new_count != 0) {
2886 				qg->excl += num_bytes;
2887 				qg->excl_cmpr += num_bytes;
2888 				dirty = true;
2889 			}
2890 		}
2891 
2892 		/* Exclusive/none -> exclusive/none case */
2893 		if (cur_old_count == nr_old_roots &&
2894 		    cur_new_count == nr_new_roots) {
2895 			if (cur_old_count == 0) {
2896 				/* None -> exclusive/none */
2897 
2898 				if (cur_new_count != 0) {
2899 					/* None -> exclusive */
2900 					qg->excl += num_bytes;
2901 					qg->excl_cmpr += num_bytes;
2902 					dirty = true;
2903 				}
2904 				/* None -> none, nothing changed */
2905 			} else {
2906 				/* Exclusive -> exclusive/none */
2907 
2908 				if (cur_new_count == 0) {
2909 					/* Exclusive -> none */
2910 					qg->excl -= num_bytes;
2911 					qg->excl_cmpr -= num_bytes;
2912 					dirty = true;
2913 				}
2914 				/* Exclusive -> exclusive, nothing changed */
2915 			}
2916 		}
2917 
2918 		if (dirty)
2919 			qgroup_dirty(fs_info, qg);
2920 	}
2921 }
2922 
2923 /*
2924  * Check if the @roots potentially is a list of fs tree roots
2925  *
2926  * Return 0 for definitely not a fs/subvol tree roots ulist
2927  * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2928  *          one as well)
2929  */
maybe_fs_roots(struct ulist * roots)2930 static int maybe_fs_roots(struct ulist *roots)
2931 {
2932 	struct ulist_node *unode;
2933 	struct ulist_iterator uiter;
2934 
2935 	/* Empty one, still possible for fs roots */
2936 	if (!roots || roots->nnodes == 0)
2937 		return 1;
2938 
2939 	ULIST_ITER_INIT(&uiter);
2940 	unode = ulist_next(roots, &uiter);
2941 	if (!unode)
2942 		return 1;
2943 
2944 	/*
2945 	 * If it contains fs tree roots, then it must belong to fs/subvol
2946 	 * trees.
2947 	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2948 	 */
2949 	return is_fstree(unode->val);
2950 }
2951 
btrfs_qgroup_account_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct ulist * old_roots,struct ulist * new_roots)2952 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2953 				u64 num_bytes, struct ulist *old_roots,
2954 				struct ulist *new_roots)
2955 {
2956 	struct btrfs_fs_info *fs_info = trans->fs_info;
2957 	LIST_HEAD(qgroups);
2958 	u64 seq;
2959 	u64 nr_new_roots = 0;
2960 	u64 nr_old_roots = 0;
2961 	int ret = 0;
2962 
2963 	/*
2964 	 * If quotas get disabled meanwhile, the resources need to be freed and
2965 	 * we can't just exit here.
2966 	 */
2967 	if (!btrfs_qgroup_full_accounting(fs_info) ||
2968 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
2969 		goto out_free;
2970 
2971 	if (new_roots) {
2972 		if (!maybe_fs_roots(new_roots))
2973 			goto out_free;
2974 		nr_new_roots = new_roots->nnodes;
2975 	}
2976 	if (old_roots) {
2977 		if (!maybe_fs_roots(old_roots))
2978 			goto out_free;
2979 		nr_old_roots = old_roots->nnodes;
2980 	}
2981 
2982 	/* Quick exit, either not fs tree roots, or won't affect any qgroup */
2983 	if (nr_old_roots == 0 && nr_new_roots == 0)
2984 		goto out_free;
2985 
2986 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2987 					num_bytes, nr_old_roots, nr_new_roots);
2988 
2989 	mutex_lock(&fs_info->qgroup_rescan_lock);
2990 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2991 		if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2992 			mutex_unlock(&fs_info->qgroup_rescan_lock);
2993 			ret = 0;
2994 			goto out_free;
2995 		}
2996 	}
2997 	mutex_unlock(&fs_info->qgroup_rescan_lock);
2998 
2999 	spin_lock(&fs_info->qgroup_lock);
3000 	seq = fs_info->qgroup_seq;
3001 
3002 	/* Update old refcnts using old_roots */
3003 	qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
3004 
3005 	/* Update new refcnts using new_roots */
3006 	qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
3007 
3008 	qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
3009 			       num_bytes, seq);
3010 
3011 	/*
3012 	 * We're done using the iterator, release all its qgroups while holding
3013 	 * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup()
3014 	 * and trigger use-after-free accesses to qgroups.
3015 	 */
3016 	qgroup_iterator_nested_clean(&qgroups);
3017 
3018 	/*
3019 	 * Bump qgroup_seq to avoid seq overlap
3020 	 */
3021 	fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
3022 	spin_unlock(&fs_info->qgroup_lock);
3023 out_free:
3024 	ulist_free(old_roots);
3025 	ulist_free(new_roots);
3026 	return ret;
3027 }
3028 
btrfs_qgroup_account_extents(struct btrfs_trans_handle * trans)3029 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3030 {
3031 	struct btrfs_fs_info *fs_info = trans->fs_info;
3032 	struct btrfs_qgroup_extent_record *record;
3033 	struct btrfs_delayed_ref_root *delayed_refs;
3034 	struct ulist *new_roots = NULL;
3035 	unsigned long index;
3036 	u64 num_dirty_extents = 0;
3037 	u64 qgroup_to_skip;
3038 	int ret = 0;
3039 
3040 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3041 		return 0;
3042 
3043 	delayed_refs = &trans->transaction->delayed_refs;
3044 	qgroup_to_skip = delayed_refs->qgroup_to_skip;
3045 	xa_for_each(&delayed_refs->dirty_extents, index, record) {
3046 		num_dirty_extents++;
3047 		trace_btrfs_qgroup_account_extents(fs_info, record);
3048 
3049 		if (!ret && !(fs_info->qgroup_flags &
3050 			      BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) {
3051 			struct btrfs_backref_walk_ctx ctx = { 0 };
3052 
3053 			ctx.bytenr = record->bytenr;
3054 			ctx.fs_info = fs_info;
3055 
3056 			/*
3057 			 * Old roots should be searched when inserting qgroup
3058 			 * extent record.
3059 			 *
3060 			 * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
3061 			 * we may have some record inserted during
3062 			 * NO_ACCOUNTING (thus no old_roots populated), but
3063 			 * later we start rescan, which clears NO_ACCOUNTING,
3064 			 * leaving some inserted records without old_roots
3065 			 * populated.
3066 			 *
3067 			 * Those cases are rare and should not cause too much
3068 			 * time spent during commit_transaction().
3069 			 */
3070 			if (!record->old_roots) {
3071 				/* Search commit root to find old_roots */
3072 				ret = btrfs_find_all_roots(&ctx, false);
3073 				if (ret < 0)
3074 					goto cleanup;
3075 				record->old_roots = ctx.roots;
3076 				ctx.roots = NULL;
3077 			}
3078 
3079 			/*
3080 			 * Use BTRFS_SEQ_LAST as time_seq to do special search,
3081 			 * which doesn't lock tree or delayed_refs and search
3082 			 * current root. It's safe inside commit_transaction().
3083 			 */
3084 			ctx.trans = trans;
3085 			ctx.time_seq = BTRFS_SEQ_LAST;
3086 			ret = btrfs_find_all_roots(&ctx, false);
3087 			if (ret < 0)
3088 				goto cleanup;
3089 			new_roots = ctx.roots;
3090 			if (qgroup_to_skip) {
3091 				ulist_del(new_roots, qgroup_to_skip, 0);
3092 				ulist_del(record->old_roots, qgroup_to_skip,
3093 					  0);
3094 			}
3095 			ret = btrfs_qgroup_account_extent(trans, record->bytenr,
3096 							  record->num_bytes,
3097 							  record->old_roots,
3098 							  new_roots);
3099 			record->old_roots = NULL;
3100 			new_roots = NULL;
3101 		}
3102 		/* Free the reserved data space */
3103 		btrfs_qgroup_free_refroot(fs_info,
3104 				record->data_rsv_refroot,
3105 				record->data_rsv,
3106 				BTRFS_QGROUP_RSV_DATA);
3107 cleanup:
3108 		ulist_free(record->old_roots);
3109 		ulist_free(new_roots);
3110 		new_roots = NULL;
3111 		xa_erase(&delayed_refs->dirty_extents, index);
3112 		kfree(record);
3113 
3114 	}
3115 	trace_qgroup_num_dirty_extents(fs_info, trans->transid,
3116 				       num_dirty_extents);
3117 	return ret;
3118 }
3119 
3120 /*
3121  * Writes all changed qgroups to disk.
3122  * Called by the transaction commit path and the qgroup assign ioctl.
3123  */
btrfs_run_qgroups(struct btrfs_trans_handle * trans)3124 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
3125 {
3126 	struct btrfs_fs_info *fs_info = trans->fs_info;
3127 	int ret = 0;
3128 
3129 	/*
3130 	 * In case we are called from the qgroup assign ioctl, assert that we
3131 	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
3132 	 * disable operation (ioctl) and access a freed quota root.
3133 	 */
3134 	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
3135 		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
3136 
3137 	if (!fs_info->quota_root)
3138 		return ret;
3139 
3140 	spin_lock(&fs_info->qgroup_lock);
3141 	while (!list_empty(&fs_info->dirty_qgroups)) {
3142 		struct btrfs_qgroup *qgroup;
3143 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
3144 					  struct btrfs_qgroup, dirty);
3145 		list_del_init(&qgroup->dirty);
3146 		spin_unlock(&fs_info->qgroup_lock);
3147 		ret = update_qgroup_info_item(trans, qgroup);
3148 		if (ret)
3149 			qgroup_mark_inconsistent(fs_info);
3150 		ret = update_qgroup_limit_item(trans, qgroup);
3151 		if (ret)
3152 			qgroup_mark_inconsistent(fs_info);
3153 		spin_lock(&fs_info->qgroup_lock);
3154 	}
3155 	if (btrfs_qgroup_enabled(fs_info))
3156 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
3157 	else
3158 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
3159 	spin_unlock(&fs_info->qgroup_lock);
3160 
3161 	ret = update_qgroup_status_item(trans);
3162 	if (ret)
3163 		qgroup_mark_inconsistent(fs_info);
3164 
3165 	return ret;
3166 }
3167 
btrfs_qgroup_check_inherit(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_inherit * inherit,size_t size)3168 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
3169 			       struct btrfs_qgroup_inherit *inherit,
3170 			       size_t size)
3171 {
3172 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
3173 		return -EOPNOTSUPP;
3174 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
3175 		return -EINVAL;
3176 
3177 	/*
3178 	 * In the past we allowed btrfs_qgroup_inherit to specify to copy
3179 	 * rfer/excl numbers directly from other qgroups.  This behavior has
3180 	 * been disabled in userspace for a very long time, but here we should
3181 	 * also disable it in kernel, as this behavior is known to mark qgroup
3182 	 * inconsistent, and a rescan would wipe out the changes anyway.
3183 	 *
3184 	 * Reject any btrfs_qgroup_inherit with num_ref_copies or num_excl_copies.
3185 	 */
3186 	if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
3187 		return -EINVAL;
3188 
3189 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
3190 		return -EINVAL;
3191 
3192 	/*
3193 	 * Skip the inherit source qgroups check if qgroup is not enabled.
3194 	 * Qgroup can still be later enabled causing problems, but in that case
3195 	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
3196 	 */
3197 	if (!btrfs_qgroup_enabled(fs_info))
3198 		return 0;
3199 
3200 	/*
3201 	 * Now check all the remaining qgroups, they should all:
3202 	 *
3203 	 * - Exist
3204 	 * - Be higher level qgroups.
3205 	 */
3206 	for (int i = 0; i < inherit->num_qgroups; i++) {
3207 		struct btrfs_qgroup *qgroup;
3208 		u64 qgroupid = inherit->qgroups[i];
3209 
3210 		if (btrfs_qgroup_level(qgroupid) == 0)
3211 			return -EINVAL;
3212 
3213 		spin_lock(&fs_info->qgroup_lock);
3214 		qgroup = find_qgroup_rb(fs_info, qgroupid);
3215 		if (!qgroup) {
3216 			spin_unlock(&fs_info->qgroup_lock);
3217 			return -ENOENT;
3218 		}
3219 		spin_unlock(&fs_info->qgroup_lock);
3220 	}
3221 	return 0;
3222 }
3223 
qgroup_auto_inherit(struct btrfs_fs_info * fs_info,u64 inode_rootid,struct btrfs_qgroup_inherit ** inherit)3224 static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
3225 			       u64 inode_rootid,
3226 			       struct btrfs_qgroup_inherit **inherit)
3227 {
3228 	int i = 0;
3229 	u64 num_qgroups = 0;
3230 	struct btrfs_qgroup *inode_qg;
3231 	struct btrfs_qgroup_list *qg_list;
3232 	struct btrfs_qgroup_inherit *res;
3233 	size_t struct_sz;
3234 	u64 *qgids;
3235 
3236 	if (*inherit)
3237 		return -EEXIST;
3238 
3239 	inode_qg = find_qgroup_rb(fs_info, inode_rootid);
3240 	if (!inode_qg)
3241 		return -ENOENT;
3242 
3243 	num_qgroups = list_count_nodes(&inode_qg->groups);
3244 
3245 	if (!num_qgroups)
3246 		return 0;
3247 
3248 	struct_sz = struct_size(res, qgroups, num_qgroups);
3249 	if (struct_sz == SIZE_MAX)
3250 		return -ERANGE;
3251 
3252 	res = kzalloc(struct_sz, GFP_NOFS);
3253 	if (!res)
3254 		return -ENOMEM;
3255 	res->num_qgroups = num_qgroups;
3256 	qgids = res->qgroups;
3257 
3258 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
3259 		qgids[i++] = qg_list->group->qgroupid;
3260 
3261 	*inherit = res;
3262 	return 0;
3263 }
3264 
3265 /*
3266  * Check if we can skip rescan when inheriting qgroups.  If @src has a single
3267  * @parent, and that @parent is owning all its bytes exclusively, we can skip
3268  * the full rescan, by just adding nodesize to the @parent's excl/rfer.
3269  *
3270  * Return <0 for fatal errors (like srcid/parentid has no qgroup).
3271  * Return 0 if a quick inherit is done.
3272  * Return >0 if a quick inherit is not possible, and a full rescan is needed.
3273  */
qgroup_snapshot_quick_inherit(struct btrfs_fs_info * fs_info,u64 srcid,u64 parentid)3274 static int qgroup_snapshot_quick_inherit(struct btrfs_fs_info *fs_info,
3275 					 u64 srcid, u64 parentid)
3276 {
3277 	struct btrfs_qgroup *src;
3278 	struct btrfs_qgroup *parent;
3279 	struct btrfs_qgroup_list *list;
3280 	int nr_parents = 0;
3281 
3282 	src = find_qgroup_rb(fs_info, srcid);
3283 	if (!src)
3284 		return -ENOENT;
3285 	parent = find_qgroup_rb(fs_info, parentid);
3286 	if (!parent)
3287 		return -ENOENT;
3288 
3289 	/*
3290 	 * Source has no parent qgroup, but our new qgroup would have one.
3291 	 * Qgroup numbers would become inconsistent.
3292 	 */
3293 	if (list_empty(&src->groups))
3294 		return 1;
3295 
3296 	list_for_each_entry(list, &src->groups, next_group) {
3297 		/* The parent is not the same, quick update is not possible. */
3298 		if (list->group->qgroupid != parentid)
3299 			return 1;
3300 		nr_parents++;
3301 		/*
3302 		 * More than one parent qgroup, we can't be sure about accounting
3303 		 * consistency.
3304 		 */
3305 		if (nr_parents > 1)
3306 			return 1;
3307 	}
3308 
3309 	/*
3310 	 * The parent is not exclusively owning all its bytes.  We're not sure
3311 	 * if the source has any bytes not fully owned by the parent.
3312 	 */
3313 	if (parent->excl != parent->rfer)
3314 		return 1;
3315 
3316 	parent->excl += fs_info->nodesize;
3317 	parent->rfer += fs_info->nodesize;
3318 	return 0;
3319 }
3320 
3321 /*
3322  * Copy the accounting information between qgroups. This is necessary
3323  * when a snapshot or a subvolume is created. Throwing an error will
3324  * cause a transaction abort so we take extra care here to only error
3325  * when a readonly fs is a reasonable outcome.
3326  */
btrfs_qgroup_inherit(struct btrfs_trans_handle * trans,u64 srcid,u64 objectid,u64 inode_rootid,struct btrfs_qgroup_inherit * inherit)3327 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
3328 			 u64 objectid, u64 inode_rootid,
3329 			 struct btrfs_qgroup_inherit *inherit)
3330 {
3331 	int ret = 0;
3332 	u64 *i_qgroups;
3333 	bool committing = false;
3334 	struct btrfs_fs_info *fs_info = trans->fs_info;
3335 	struct btrfs_root *quota_root;
3336 	struct btrfs_qgroup *srcgroup;
3337 	struct btrfs_qgroup *dstgroup;
3338 	struct btrfs_qgroup *prealloc;
3339 	struct btrfs_qgroup_list **qlist_prealloc = NULL;
3340 	bool free_inherit = false;
3341 	bool need_rescan = false;
3342 	u32 level_size = 0;
3343 	u64 nums;
3344 
3345 	prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
3346 	if (!prealloc)
3347 		return -ENOMEM;
3348 
3349 	/*
3350 	 * There are only two callers of this function.
3351 	 *
3352 	 * One in create_subvol() in the ioctl context, which needs to hold
3353 	 * the qgroup_ioctl_lock.
3354 	 *
3355 	 * The other one in create_pending_snapshot() where no other qgroup
3356 	 * code can modify the fs as they all need to either start a new trans
3357 	 * or hold a trans handler, thus we don't need to hold
3358 	 * qgroup_ioctl_lock.
3359 	 * This would avoid long and complex lock chain and make lockdep happy.
3360 	 */
3361 	spin_lock(&fs_info->trans_lock);
3362 	if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
3363 		committing = true;
3364 	spin_unlock(&fs_info->trans_lock);
3365 
3366 	if (!committing)
3367 		mutex_lock(&fs_info->qgroup_ioctl_lock);
3368 	if (!btrfs_qgroup_enabled(fs_info))
3369 		goto out;
3370 
3371 	quota_root = fs_info->quota_root;
3372 	if (!quota_root) {
3373 		ret = -EINVAL;
3374 		goto out;
3375 	}
3376 
3377 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
3378 		ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
3379 		if (ret)
3380 			goto out;
3381 		free_inherit = true;
3382 	}
3383 
3384 	if (inherit) {
3385 		i_qgroups = (u64 *)(inherit + 1);
3386 		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
3387 		       2 * inherit->num_excl_copies;
3388 		for (int i = 0; i < nums; i++) {
3389 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
3390 
3391 			/*
3392 			 * Zero out invalid groups so we can ignore
3393 			 * them later.
3394 			 */
3395 			if (!srcgroup ||
3396 			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
3397 				*i_qgroups = 0ULL;
3398 
3399 			++i_qgroups;
3400 		}
3401 	}
3402 
3403 	/*
3404 	 * create a tracking group for the subvol itself
3405 	 */
3406 	ret = add_qgroup_item(trans, quota_root, objectid);
3407 	if (ret)
3408 		goto out;
3409 
3410 	/*
3411 	 * add qgroup to all inherited groups
3412 	 */
3413 	if (inherit) {
3414 		i_qgroups = (u64 *)(inherit + 1);
3415 		for (int i = 0; i < inherit->num_qgroups; i++, i_qgroups++) {
3416 			if (*i_qgroups == 0)
3417 				continue;
3418 			ret = add_qgroup_relation_item(trans, objectid,
3419 						       *i_qgroups);
3420 			if (ret && ret != -EEXIST)
3421 				goto out;
3422 			ret = add_qgroup_relation_item(trans, *i_qgroups,
3423 						       objectid);
3424 			if (ret && ret != -EEXIST)
3425 				goto out;
3426 		}
3427 		ret = 0;
3428 
3429 		qlist_prealloc = kcalloc(inherit->num_qgroups,
3430 					 sizeof(struct btrfs_qgroup_list *),
3431 					 GFP_NOFS);
3432 		if (!qlist_prealloc) {
3433 			ret = -ENOMEM;
3434 			goto out;
3435 		}
3436 		for (int i = 0; i < inherit->num_qgroups; i++) {
3437 			qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
3438 						    GFP_NOFS);
3439 			if (!qlist_prealloc[i]) {
3440 				ret = -ENOMEM;
3441 				goto out;
3442 			}
3443 		}
3444 	}
3445 
3446 	spin_lock(&fs_info->qgroup_lock);
3447 
3448 	dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
3449 	prealloc = NULL;
3450 
3451 	if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
3452 		dstgroup->lim_flags = inherit->lim.flags;
3453 		dstgroup->max_rfer = inherit->lim.max_rfer;
3454 		dstgroup->max_excl = inherit->lim.max_excl;
3455 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
3456 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
3457 
3458 		qgroup_dirty(fs_info, dstgroup);
3459 	}
3460 
3461 	if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
3462 		srcgroup = find_qgroup_rb(fs_info, srcid);
3463 		if (!srcgroup)
3464 			goto unlock;
3465 
3466 		/*
3467 		 * We call inherit after we clone the root in order to make sure
3468 		 * our counts don't go crazy, so at this point the only
3469 		 * difference between the two roots should be the root node.
3470 		 */
3471 		level_size = fs_info->nodesize;
3472 		dstgroup->rfer = srcgroup->rfer;
3473 		dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
3474 		dstgroup->excl = level_size;
3475 		dstgroup->excl_cmpr = level_size;
3476 		srcgroup->excl = level_size;
3477 		srcgroup->excl_cmpr = level_size;
3478 
3479 		/* inherit the limit info */
3480 		dstgroup->lim_flags = srcgroup->lim_flags;
3481 		dstgroup->max_rfer = srcgroup->max_rfer;
3482 		dstgroup->max_excl = srcgroup->max_excl;
3483 		dstgroup->rsv_rfer = srcgroup->rsv_rfer;
3484 		dstgroup->rsv_excl = srcgroup->rsv_excl;
3485 
3486 		qgroup_dirty(fs_info, dstgroup);
3487 		qgroup_dirty(fs_info, srcgroup);
3488 
3489 		/*
3490 		 * If the source qgroup has parent but the new one doesn't,
3491 		 * we need a full rescan.
3492 		 */
3493 		if (!inherit && !list_empty(&srcgroup->groups))
3494 			need_rescan = true;
3495 	}
3496 
3497 	if (!inherit)
3498 		goto unlock;
3499 
3500 	i_qgroups = (u64 *)(inherit + 1);
3501 	for (int i = 0; i < inherit->num_qgroups; i++) {
3502 		if (*i_qgroups) {
3503 			ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
3504 					      *i_qgroups);
3505 			qlist_prealloc[i] = NULL;
3506 			if (ret)
3507 				goto unlock;
3508 		}
3509 		if (srcid) {
3510 			/* Check if we can do a quick inherit. */
3511 			ret = qgroup_snapshot_quick_inherit(fs_info, srcid, *i_qgroups);
3512 			if (ret < 0)
3513 				goto unlock;
3514 			if (ret > 0)
3515 				need_rescan = true;
3516 			ret = 0;
3517 		}
3518 		++i_qgroups;
3519 	}
3520 
3521 	for (int i = 0; i < inherit->num_ref_copies; i++, i_qgroups += 2) {
3522 		struct btrfs_qgroup *src;
3523 		struct btrfs_qgroup *dst;
3524 
3525 		if (!i_qgroups[0] || !i_qgroups[1])
3526 			continue;
3527 
3528 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3529 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3530 
3531 		if (!src || !dst) {
3532 			ret = -EINVAL;
3533 			goto unlock;
3534 		}
3535 
3536 		dst->rfer = src->rfer - level_size;
3537 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
3538 
3539 		/* Manually tweaking numbers certainly needs a rescan */
3540 		need_rescan = true;
3541 	}
3542 	for (int i = 0; i < inherit->num_excl_copies; i++, i_qgroups += 2) {
3543 		struct btrfs_qgroup *src;
3544 		struct btrfs_qgroup *dst;
3545 
3546 		if (!i_qgroups[0] || !i_qgroups[1])
3547 			continue;
3548 
3549 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
3550 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
3551 
3552 		if (!src || !dst) {
3553 			ret = -EINVAL;
3554 			goto unlock;
3555 		}
3556 
3557 		dst->excl = src->excl + level_size;
3558 		dst->excl_cmpr = src->excl_cmpr + level_size;
3559 		need_rescan = true;
3560 	}
3561 
3562 unlock:
3563 	spin_unlock(&fs_info->qgroup_lock);
3564 	if (!ret)
3565 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
3566 out:
3567 	if (!committing)
3568 		mutex_unlock(&fs_info->qgroup_ioctl_lock);
3569 	if (need_rescan)
3570 		qgroup_mark_inconsistent(fs_info);
3571 	if (qlist_prealloc) {
3572 		for (int i = 0; i < inherit->num_qgroups; i++)
3573 			kfree(qlist_prealloc[i]);
3574 		kfree(qlist_prealloc);
3575 	}
3576 	if (free_inherit)
3577 		kfree(inherit);
3578 	kfree(prealloc);
3579 	return ret;
3580 }
3581 
qgroup_check_limits(const struct btrfs_qgroup * qg,u64 num_bytes)3582 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
3583 {
3584 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3585 	    qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
3586 		return false;
3587 
3588 	if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
3589 	    qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
3590 		return false;
3591 
3592 	return true;
3593 }
3594 
qgroup_reserve(struct btrfs_root * root,u64 num_bytes,bool enforce,enum btrfs_qgroup_rsv_type type)3595 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
3596 			  enum btrfs_qgroup_rsv_type type)
3597 {
3598 	struct btrfs_qgroup *qgroup;
3599 	struct btrfs_fs_info *fs_info = root->fs_info;
3600 	u64 ref_root = btrfs_root_id(root);
3601 	int ret = 0;
3602 	LIST_HEAD(qgroup_list);
3603 
3604 	if (!is_fstree(ref_root))
3605 		return 0;
3606 
3607 	if (num_bytes == 0)
3608 		return 0;
3609 
3610 	if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
3611 	    capable(CAP_SYS_RESOURCE))
3612 		enforce = false;
3613 
3614 	spin_lock(&fs_info->qgroup_lock);
3615 	if (!fs_info->quota_root)
3616 		goto out;
3617 
3618 	qgroup = find_qgroup_rb(fs_info, ref_root);
3619 	if (!qgroup)
3620 		goto out;
3621 
3622 	qgroup_iterator_add(&qgroup_list, qgroup);
3623 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3624 		struct btrfs_qgroup_list *glist;
3625 
3626 		if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3627 			ret = -EDQUOT;
3628 			goto out;
3629 		}
3630 
3631 		list_for_each_entry(glist, &qgroup->groups, next_group)
3632 			qgroup_iterator_add(&qgroup_list, glist->group);
3633 	}
3634 
3635 	ret = 0;
3636 	/*
3637 	 * no limits exceeded, now record the reservation into all qgroups
3638 	 */
3639 	list_for_each_entry(qgroup, &qgroup_list, iterator)
3640 		qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3641 
3642 out:
3643 	qgroup_iterator_clean(&qgroup_list);
3644 	spin_unlock(&fs_info->qgroup_lock);
3645 	return ret;
3646 }
3647 
3648 /*
3649  * Free @num_bytes of reserved space with @type for qgroup.  (Normally level 0
3650  * qgroup).
3651  *
3652  * Will handle all higher level qgroup too.
3653  *
3654  * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3655  * This special case is only used for META_PERTRANS type.
3656  */
btrfs_qgroup_free_refroot(struct btrfs_fs_info * fs_info,u64 ref_root,u64 num_bytes,enum btrfs_qgroup_rsv_type type)3657 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
3658 			       u64 ref_root, u64 num_bytes,
3659 			       enum btrfs_qgroup_rsv_type type)
3660 {
3661 	struct btrfs_qgroup *qgroup;
3662 	LIST_HEAD(qgroup_list);
3663 
3664 	if (!is_fstree(ref_root))
3665 		return;
3666 
3667 	if (num_bytes == 0)
3668 		return;
3669 
3670 	if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3671 		WARN(1, "%s: Invalid type to free", __func__);
3672 		return;
3673 	}
3674 	spin_lock(&fs_info->qgroup_lock);
3675 
3676 	if (!fs_info->quota_root)
3677 		goto out;
3678 
3679 	qgroup = find_qgroup_rb(fs_info, ref_root);
3680 	if (!qgroup)
3681 		goto out;
3682 
3683 	if (num_bytes == (u64)-1)
3684 		/*
3685 		 * We're freeing all pertrans rsv, get reserved value from
3686 		 * level 0 qgroup as real num_bytes to free.
3687 		 */
3688 		num_bytes = qgroup->rsv.values[type];
3689 
3690 	qgroup_iterator_add(&qgroup_list, qgroup);
3691 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
3692 		struct btrfs_qgroup_list *glist;
3693 
3694 		qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
3695 		list_for_each_entry(glist, &qgroup->groups, next_group) {
3696 			qgroup_iterator_add(&qgroup_list, glist->group);
3697 		}
3698 	}
3699 out:
3700 	qgroup_iterator_clean(&qgroup_list);
3701 	spin_unlock(&fs_info->qgroup_lock);
3702 }
3703 
3704 /*
3705  * Check if the leaf is the last leaf. Which means all node pointers
3706  * are at their last position.
3707  */
is_last_leaf(struct btrfs_path * path)3708 static bool is_last_leaf(struct btrfs_path *path)
3709 {
3710 	int i;
3711 
3712 	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3713 		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3714 			return false;
3715 	}
3716 	return true;
3717 }
3718 
3719 /*
3720  * returns < 0 on error, 0 when more leafs are to be scanned.
3721  * returns 1 when done.
3722  */
qgroup_rescan_leaf(struct btrfs_trans_handle * trans,struct btrfs_path * path)3723 static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3724 			      struct btrfs_path *path)
3725 {
3726 	struct btrfs_fs_info *fs_info = trans->fs_info;
3727 	struct btrfs_root *extent_root;
3728 	struct btrfs_key found;
3729 	struct extent_buffer *scratch_leaf = NULL;
3730 	u64 num_bytes;
3731 	bool done;
3732 	int slot;
3733 	int ret;
3734 
3735 	if (!btrfs_qgroup_full_accounting(fs_info))
3736 		return 1;
3737 
3738 	mutex_lock(&fs_info->qgroup_rescan_lock);
3739 	extent_root = btrfs_extent_root(fs_info,
3740 				fs_info->qgroup_rescan_progress.objectid);
3741 	ret = btrfs_search_slot_for_read(extent_root,
3742 					 &fs_info->qgroup_rescan_progress,
3743 					 path, 1, 0);
3744 
3745 	btrfs_debug(fs_info,
3746 		"current progress key (%llu %u %llu), search_slot ret %d",
3747 		fs_info->qgroup_rescan_progress.objectid,
3748 		fs_info->qgroup_rescan_progress.type,
3749 		fs_info->qgroup_rescan_progress.offset, ret);
3750 
3751 	if (ret) {
3752 		/*
3753 		 * The rescan is about to end, we will not be scanning any
3754 		 * further blocks. We cannot unset the RESCAN flag here, because
3755 		 * we want to commit the transaction if everything went well.
3756 		 * To make the live accounting work in this phase, we set our
3757 		 * scan progress pointer such that every real extent objectid
3758 		 * will be smaller.
3759 		 */
3760 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3761 		btrfs_release_path(path);
3762 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3763 		return ret;
3764 	}
3765 	done = is_last_leaf(path);
3766 
3767 	btrfs_item_key_to_cpu(path->nodes[0], &found,
3768 			      btrfs_header_nritems(path->nodes[0]) - 1);
3769 	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3770 
3771 	scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3772 	if (!scratch_leaf) {
3773 		ret = -ENOMEM;
3774 		mutex_unlock(&fs_info->qgroup_rescan_lock);
3775 		goto out;
3776 	}
3777 	slot = path->slots[0];
3778 	btrfs_release_path(path);
3779 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3780 
3781 	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3782 		struct btrfs_backref_walk_ctx ctx = { 0 };
3783 
3784 		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
3785 		if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3786 		    found.type != BTRFS_METADATA_ITEM_KEY)
3787 			continue;
3788 		if (found.type == BTRFS_METADATA_ITEM_KEY)
3789 			num_bytes = fs_info->nodesize;
3790 		else
3791 			num_bytes = found.offset;
3792 
3793 		ctx.bytenr = found.objectid;
3794 		ctx.fs_info = fs_info;
3795 
3796 		ret = btrfs_find_all_roots(&ctx, false);
3797 		if (ret < 0)
3798 			goto out;
3799 		/* For rescan, just pass old_roots as NULL */
3800 		ret = btrfs_qgroup_account_extent(trans, found.objectid,
3801 						  num_bytes, NULL, ctx.roots);
3802 		if (ret < 0)
3803 			goto out;
3804 	}
3805 out:
3806 	if (scratch_leaf)
3807 		free_extent_buffer(scratch_leaf);
3808 
3809 	if (done && !ret) {
3810 		ret = 1;
3811 		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3812 	}
3813 	return ret;
3814 }
3815 
rescan_should_stop(struct btrfs_fs_info * fs_info)3816 static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3817 {
3818 	if (btrfs_fs_closing(fs_info))
3819 		return true;
3820 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
3821 		return true;
3822 	if (!btrfs_qgroup_enabled(fs_info))
3823 		return true;
3824 	if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3825 		return true;
3826 	return false;
3827 }
3828 
btrfs_qgroup_rescan_worker(struct btrfs_work * work)3829 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
3830 {
3831 	struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3832 						     qgroup_rescan_work);
3833 	struct btrfs_path *path;
3834 	struct btrfs_trans_handle *trans = NULL;
3835 	int ret = 0;
3836 	bool stopped = false;
3837 	bool did_leaf_rescans = false;
3838 
3839 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
3840 		return;
3841 
3842 	path = btrfs_alloc_path();
3843 	if (!path) {
3844 		ret = -ENOMEM;
3845 		goto out;
3846 	}
3847 	/*
3848 	 * Rescan should only search for commit root, and any later difference
3849 	 * should be recorded by qgroup
3850 	 */
3851 	path->search_commit_root = 1;
3852 	path->skip_locking = 1;
3853 
3854 	while (!ret && !(stopped = rescan_should_stop(fs_info))) {
3855 		trans = btrfs_start_transaction(fs_info->fs_root, 0);
3856 		if (IS_ERR(trans)) {
3857 			ret = PTR_ERR(trans);
3858 			break;
3859 		}
3860 
3861 		ret = qgroup_rescan_leaf(trans, path);
3862 		did_leaf_rescans = true;
3863 
3864 		if (ret > 0)
3865 			btrfs_commit_transaction(trans);
3866 		else
3867 			btrfs_end_transaction(trans);
3868 	}
3869 
3870 out:
3871 	btrfs_free_path(path);
3872 
3873 	mutex_lock(&fs_info->qgroup_rescan_lock);
3874 	if (ret > 0 &&
3875 	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3876 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3877 	} else if (ret < 0 || stopped) {
3878 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3879 	}
3880 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3881 
3882 	/*
3883 	 * Only update status, since the previous part has already updated the
3884 	 * qgroup info, and only if we did any actual work. This also prevents
3885 	 * race with a concurrent quota disable, which has already set
3886 	 * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
3887 	 * btrfs_quota_disable().
3888 	 */
3889 	if (did_leaf_rescans) {
3890 		trans = btrfs_start_transaction(fs_info->quota_root, 1);
3891 		if (IS_ERR(trans)) {
3892 			ret = PTR_ERR(trans);
3893 			trans = NULL;
3894 			btrfs_err(fs_info,
3895 				  "fail to start transaction for status update: %d",
3896 				  ret);
3897 		}
3898 	} else {
3899 		trans = NULL;
3900 	}
3901 
3902 	mutex_lock(&fs_info->qgroup_rescan_lock);
3903 	if (!stopped ||
3904 	    fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
3905 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3906 	if (trans) {
3907 		int ret2 = update_qgroup_status_item(trans);
3908 
3909 		if (ret2 < 0) {
3910 			ret = ret2;
3911 			btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
3912 		}
3913 	}
3914 	fs_info->qgroup_rescan_running = false;
3915 	fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
3916 	complete_all(&fs_info->qgroup_rescan_completion);
3917 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3918 
3919 	if (!trans)
3920 		return;
3921 
3922 	btrfs_end_transaction(trans);
3923 
3924 	if (stopped) {
3925 		btrfs_info(fs_info, "qgroup scan paused");
3926 	} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
3927 		btrfs_info(fs_info, "qgroup scan cancelled");
3928 	} else if (ret >= 0) {
3929 		btrfs_info(fs_info, "qgroup scan completed%s",
3930 			ret > 0 ? " (inconsistency flag cleared)" : "");
3931 	} else {
3932 		btrfs_err(fs_info, "qgroup scan failed with %d", ret);
3933 	}
3934 }
3935 
3936 /*
3937  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3938  * memory required for the rescan context.
3939  */
3940 static int
qgroup_rescan_init(struct btrfs_fs_info * fs_info,u64 progress_objectid,int init_flags)3941 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3942 		   int init_flags)
3943 {
3944 	int ret = 0;
3945 
3946 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3947 		btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
3948 		return -EINVAL;
3949 	}
3950 
3951 	if (!init_flags) {
3952 		/* we're resuming qgroup rescan at mount time */
3953 		if (!(fs_info->qgroup_flags &
3954 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
3955 			btrfs_debug(fs_info,
3956 			"qgroup rescan init failed, qgroup rescan is not queued");
3957 			ret = -EINVAL;
3958 		} else if (!(fs_info->qgroup_flags &
3959 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3960 			btrfs_debug(fs_info,
3961 			"qgroup rescan init failed, qgroup is not enabled");
3962 			ret = -ENOTCONN;
3963 		}
3964 
3965 		if (ret)
3966 			return ret;
3967 	}
3968 
3969 	mutex_lock(&fs_info->qgroup_rescan_lock);
3970 
3971 	if (init_flags) {
3972 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3973 			ret = -EINPROGRESS;
3974 		} else if (!(fs_info->qgroup_flags &
3975 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
3976 			btrfs_debug(fs_info,
3977 			"qgroup rescan init failed, qgroup is not enabled");
3978 			ret = -ENOTCONN;
3979 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
3980 			/* Quota disable is in progress */
3981 			ret = -EBUSY;
3982 		}
3983 
3984 		if (ret) {
3985 			mutex_unlock(&fs_info->qgroup_rescan_lock);
3986 			return ret;
3987 		}
3988 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3989 	}
3990 
3991 	memset(&fs_info->qgroup_rescan_progress, 0,
3992 		sizeof(fs_info->qgroup_rescan_progress));
3993 	fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
3994 				   BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
3995 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
3996 	init_completion(&fs_info->qgroup_rescan_completion);
3997 	mutex_unlock(&fs_info->qgroup_rescan_lock);
3998 
3999 	btrfs_init_work(&fs_info->qgroup_rescan_work,
4000 			btrfs_qgroup_rescan_worker, NULL);
4001 	return 0;
4002 }
4003 
4004 static void
qgroup_rescan_zero_tracking(struct btrfs_fs_info * fs_info)4005 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
4006 {
4007 	struct rb_node *n;
4008 	struct btrfs_qgroup *qgroup;
4009 
4010 	spin_lock(&fs_info->qgroup_lock);
4011 	/* clear all current qgroup tracking information */
4012 	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
4013 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
4014 		qgroup->rfer = 0;
4015 		qgroup->rfer_cmpr = 0;
4016 		qgroup->excl = 0;
4017 		qgroup->excl_cmpr = 0;
4018 		qgroup_dirty(fs_info, qgroup);
4019 	}
4020 	spin_unlock(&fs_info->qgroup_lock);
4021 }
4022 
4023 int
btrfs_qgroup_rescan(struct btrfs_fs_info * fs_info)4024 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
4025 {
4026 	int ret = 0;
4027 
4028 	ret = qgroup_rescan_init(fs_info, 0, 1);
4029 	if (ret)
4030 		return ret;
4031 
4032 	/*
4033 	 * We have set the rescan_progress to 0, which means no more
4034 	 * delayed refs will be accounted by btrfs_qgroup_account_ref.
4035 	 * However, btrfs_qgroup_account_ref may be right after its call
4036 	 * to btrfs_find_all_roots, in which case it would still do the
4037 	 * accounting.
4038 	 * To solve this, we're committing the transaction, which will
4039 	 * ensure we run all delayed refs and only after that, we are
4040 	 * going to clear all tracking information for a clean start.
4041 	 */
4042 
4043 	ret = btrfs_commit_current_transaction(fs_info->fs_root);
4044 	if (ret) {
4045 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
4046 		return ret;
4047 	}
4048 
4049 	qgroup_rescan_zero_tracking(fs_info);
4050 
4051 	mutex_lock(&fs_info->qgroup_rescan_lock);
4052 	fs_info->qgroup_rescan_running = true;
4053 	btrfs_queue_work(fs_info->qgroup_rescan_workers,
4054 			 &fs_info->qgroup_rescan_work);
4055 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4056 
4057 	return 0;
4058 }
4059 
btrfs_qgroup_wait_for_completion(struct btrfs_fs_info * fs_info,bool interruptible)4060 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
4061 				     bool interruptible)
4062 {
4063 	int running;
4064 	int ret = 0;
4065 
4066 	mutex_lock(&fs_info->qgroup_rescan_lock);
4067 	running = fs_info->qgroup_rescan_running;
4068 	mutex_unlock(&fs_info->qgroup_rescan_lock);
4069 
4070 	if (!running)
4071 		return 0;
4072 
4073 	if (interruptible)
4074 		ret = wait_for_completion_interruptible(
4075 					&fs_info->qgroup_rescan_completion);
4076 	else
4077 		wait_for_completion(&fs_info->qgroup_rescan_completion);
4078 
4079 	return ret;
4080 }
4081 
4082 /*
4083  * this is only called from open_ctree where we're still single threaded, thus
4084  * locking is omitted here.
4085  */
4086 void
btrfs_qgroup_rescan_resume(struct btrfs_fs_info * fs_info)4087 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
4088 {
4089 	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
4090 		mutex_lock(&fs_info->qgroup_rescan_lock);
4091 		fs_info->qgroup_rescan_running = true;
4092 		btrfs_queue_work(fs_info->qgroup_rescan_workers,
4093 				 &fs_info->qgroup_rescan_work);
4094 		mutex_unlock(&fs_info->qgroup_rescan_lock);
4095 	}
4096 }
4097 
4098 #define rbtree_iterate_from_safe(node, next, start)				\
4099        for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
4100 
qgroup_unreserve_range(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len)4101 static int qgroup_unreserve_range(struct btrfs_inode *inode,
4102 				  struct extent_changeset *reserved, u64 start,
4103 				  u64 len)
4104 {
4105 	struct rb_node *node;
4106 	struct rb_node *next;
4107 	struct ulist_node *entry;
4108 	int ret = 0;
4109 
4110 	node = reserved->range_changed.root.rb_node;
4111 	if (!node)
4112 		return 0;
4113 	while (node) {
4114 		entry = rb_entry(node, struct ulist_node, rb_node);
4115 		if (entry->val < start)
4116 			node = node->rb_right;
4117 		else
4118 			node = node->rb_left;
4119 	}
4120 
4121 	if (entry->val > start && rb_prev(&entry->rb_node))
4122 		entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
4123 				 rb_node);
4124 
4125 	rbtree_iterate_from_safe(node, next, &entry->rb_node) {
4126 		u64 entry_start;
4127 		u64 entry_end;
4128 		u64 entry_len;
4129 		int clear_ret;
4130 
4131 		entry = rb_entry(node, struct ulist_node, rb_node);
4132 		entry_start = entry->val;
4133 		entry_end = entry->aux;
4134 		entry_len = entry_end - entry_start + 1;
4135 
4136 		if (entry_start >= start + len)
4137 			break;
4138 		if (entry_start + entry_len <= start)
4139 			continue;
4140 		/*
4141 		 * Now the entry is in [start, start + len), revert the
4142 		 * EXTENT_QGROUP_RESERVED bit.
4143 		 */
4144 		clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
4145 					      entry_end, EXTENT_QGROUP_RESERVED);
4146 		if (!ret && clear_ret < 0)
4147 			ret = clear_ret;
4148 
4149 		ulist_del(&reserved->range_changed, entry->val, entry->aux);
4150 		if (likely(reserved->bytes_changed >= entry_len)) {
4151 			reserved->bytes_changed -= entry_len;
4152 		} else {
4153 			WARN_ON(1);
4154 			reserved->bytes_changed = 0;
4155 		}
4156 	}
4157 
4158 	return ret;
4159 }
4160 
4161 /*
4162  * Try to free some space for qgroup.
4163  *
4164  * For qgroup, there are only 3 ways to free qgroup space:
4165  * - Flush nodatacow write
4166  *   Any nodatacow write will free its reserved data space at run_delalloc_range().
4167  *   In theory, we should only flush nodatacow inodes, but it's not yet
4168  *   possible, so we need to flush the whole root.
4169  *
4170  * - Wait for ordered extents
4171  *   When ordered extents are finished, their reserved metadata is finally
4172  *   converted to per_trans status, which can be freed by later commit
4173  *   transaction.
4174  *
4175  * - Commit transaction
4176  *   This would free the meta_per_trans space.
4177  *   In theory this shouldn't provide much space, but any more qgroup space
4178  *   is needed.
4179  */
try_flush_qgroup(struct btrfs_root * root)4180 static int try_flush_qgroup(struct btrfs_root *root)
4181 {
4182 	int ret;
4183 
4184 	/* Can't hold an open transaction or we run the risk of deadlocking. */
4185 	ASSERT(current->journal_info == NULL);
4186 	if (WARN_ON(current->journal_info))
4187 		return 0;
4188 
4189 	/*
4190 	 * We don't want to run flush again and again, so if there is a running
4191 	 * one, we won't try to start a new flush, but exit directly.
4192 	 */
4193 	if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
4194 		wait_event(root->qgroup_flush_wait,
4195 			!test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
4196 		return 0;
4197 	}
4198 
4199 	btrfs_run_delayed_iputs(root->fs_info);
4200 	btrfs_wait_on_delayed_iputs(root->fs_info);
4201 	ret = btrfs_start_delalloc_snapshot(root, true);
4202 	if (ret < 0)
4203 		goto out;
4204 	btrfs_wait_ordered_extents(root, U64_MAX, NULL);
4205 
4206 	ret = btrfs_commit_current_transaction(root);
4207 out:
4208 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
4209 	wake_up(&root->qgroup_flush_wait);
4210 	return ret;
4211 }
4212 
qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4213 static int qgroup_reserve_data(struct btrfs_inode *inode,
4214 			struct extent_changeset **reserved_ret, u64 start,
4215 			u64 len)
4216 {
4217 	struct btrfs_root *root = inode->root;
4218 	struct extent_changeset *reserved;
4219 	bool new_reserved = false;
4220 	u64 orig_reserved;
4221 	u64 to_reserve;
4222 	int ret;
4223 
4224 	if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4225 	    !is_fstree(btrfs_root_id(root)) || len == 0)
4226 		return 0;
4227 
4228 	/* @reserved parameter is mandatory for qgroup */
4229 	if (WARN_ON(!reserved_ret))
4230 		return -EINVAL;
4231 	if (!*reserved_ret) {
4232 		new_reserved = true;
4233 		*reserved_ret = extent_changeset_alloc();
4234 		if (!*reserved_ret)
4235 			return -ENOMEM;
4236 	}
4237 	reserved = *reserved_ret;
4238 	/* Record already reserved space */
4239 	orig_reserved = reserved->bytes_changed;
4240 	ret = set_record_extent_bits(&inode->io_tree, start,
4241 			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
4242 
4243 	/* Newly reserved space */
4244 	to_reserve = reserved->bytes_changed - orig_reserved;
4245 	trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
4246 					to_reserve, QGROUP_RESERVE);
4247 	if (ret < 0)
4248 		goto out;
4249 	ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
4250 	if (ret < 0)
4251 		goto cleanup;
4252 
4253 	return ret;
4254 
4255 cleanup:
4256 	qgroup_unreserve_range(inode, reserved, start, len);
4257 out:
4258 	if (new_reserved) {
4259 		extent_changeset_free(reserved);
4260 		*reserved_ret = NULL;
4261 	}
4262 	return ret;
4263 }
4264 
4265 /*
4266  * Reserve qgroup space for range [start, start + len).
4267  *
4268  * This function will either reserve space from related qgroups or do nothing
4269  * if the range is already reserved.
4270  *
4271  * Return 0 for successful reservation
4272  * Return <0 for error (including -EQUOT)
4273  *
4274  * NOTE: This function may sleep for memory allocation, dirty page flushing and
4275  *	 commit transaction. So caller should not hold any dirty page locked.
4276  */
btrfs_qgroup_reserve_data(struct btrfs_inode * inode,struct extent_changeset ** reserved_ret,u64 start,u64 len)4277 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
4278 			struct extent_changeset **reserved_ret, u64 start,
4279 			u64 len)
4280 {
4281 	int ret;
4282 
4283 	ret = qgroup_reserve_data(inode, reserved_ret, start, len);
4284 	if (ret <= 0 && ret != -EDQUOT)
4285 		return ret;
4286 
4287 	ret = try_flush_qgroup(inode->root);
4288 	if (ret < 0)
4289 		return ret;
4290 	return qgroup_reserve_data(inode, reserved_ret, start, len);
4291 }
4292 
4293 /* Free ranges specified by @reserved, normally in error path */
qgroup_free_reserved_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed_ret)4294 static int qgroup_free_reserved_data(struct btrfs_inode *inode,
4295 				     struct extent_changeset *reserved,
4296 				     u64 start, u64 len, u64 *freed_ret)
4297 {
4298 	struct btrfs_root *root = inode->root;
4299 	struct ulist_node *unode;
4300 	struct ulist_iterator uiter;
4301 	struct extent_changeset changeset;
4302 	u64 freed = 0;
4303 	int ret;
4304 
4305 	extent_changeset_init(&changeset);
4306 	len = round_up(start + len, root->fs_info->sectorsize);
4307 	start = round_down(start, root->fs_info->sectorsize);
4308 
4309 	ULIST_ITER_INIT(&uiter);
4310 	while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
4311 		u64 range_start = unode->val;
4312 		/* unode->aux is the inclusive end */
4313 		u64 range_len = unode->aux - range_start + 1;
4314 		u64 free_start;
4315 		u64 free_len;
4316 
4317 		extent_changeset_release(&changeset);
4318 
4319 		/* Only free range in range [start, start + len) */
4320 		if (range_start >= start + len ||
4321 		    range_start + range_len <= start)
4322 			continue;
4323 		free_start = max(range_start, start);
4324 		free_len = min(start + len, range_start + range_len) -
4325 			   free_start;
4326 		/*
4327 		 * TODO: To also modify reserved->ranges_reserved to reflect
4328 		 * the modification.
4329 		 *
4330 		 * However as long as we free qgroup reserved according to
4331 		 * EXTENT_QGROUP_RESERVED, we won't double free.
4332 		 * So not need to rush.
4333 		 */
4334 		ret = clear_record_extent_bits(&inode->io_tree, free_start,
4335 				free_start + free_len - 1,
4336 				EXTENT_QGROUP_RESERVED, &changeset);
4337 		if (ret < 0)
4338 			goto out;
4339 		freed += changeset.bytes_changed;
4340 	}
4341 	btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
4342 				  BTRFS_QGROUP_RSV_DATA);
4343 	if (freed_ret)
4344 		*freed_ret = freed;
4345 	ret = 0;
4346 out:
4347 	extent_changeset_release(&changeset);
4348 	return ret;
4349 }
4350 
__btrfs_qgroup_release_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * released,int free)4351 static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
4352 			struct extent_changeset *reserved, u64 start, u64 len,
4353 			u64 *released, int free)
4354 {
4355 	struct extent_changeset changeset;
4356 	int trace_op = QGROUP_RELEASE;
4357 	int ret;
4358 
4359 	if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
4360 		return clear_record_extent_bits(&inode->io_tree, start,
4361 						start + len - 1,
4362 						EXTENT_QGROUP_RESERVED, NULL);
4363 	}
4364 
4365 	/* In release case, we shouldn't have @reserved */
4366 	WARN_ON(!free && reserved);
4367 	if (free && reserved)
4368 		return qgroup_free_reserved_data(inode, reserved, start, len, released);
4369 	extent_changeset_init(&changeset);
4370 	ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
4371 				       EXTENT_QGROUP_RESERVED, &changeset);
4372 	if (ret < 0)
4373 		goto out;
4374 
4375 	if (free)
4376 		trace_op = QGROUP_FREE;
4377 	trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
4378 					changeset.bytes_changed, trace_op);
4379 	if (free)
4380 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4381 				btrfs_root_id(inode->root),
4382 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4383 	if (released)
4384 		*released = changeset.bytes_changed;
4385 out:
4386 	extent_changeset_release(&changeset);
4387 	return ret;
4388 }
4389 
4390 /*
4391  * Free a reserved space range from io_tree and related qgroups
4392  *
4393  * Should be called when a range of pages get invalidated before reaching disk.
4394  * Or for error cleanup case.
4395  * if @reserved is given, only reserved range in [@start, @start + @len) will
4396  * be freed.
4397  *
4398  * For data written to disk, use btrfs_qgroup_release_data().
4399  *
4400  * NOTE: This function may sleep for memory allocation.
4401  */
btrfs_qgroup_free_data(struct btrfs_inode * inode,struct extent_changeset * reserved,u64 start,u64 len,u64 * freed)4402 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
4403 			   struct extent_changeset *reserved,
4404 			   u64 start, u64 len, u64 *freed)
4405 {
4406 	return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
4407 }
4408 
4409 /*
4410  * Release a reserved space range from io_tree only.
4411  *
4412  * Should be called when a range of pages get written to disk and corresponding
4413  * FILE_EXTENT is inserted into corresponding root.
4414  *
4415  * Since new qgroup accounting framework will only update qgroup numbers at
4416  * commit_transaction() time, its reserved space shouldn't be freed from
4417  * related qgroups.
4418  *
4419  * But we should release the range from io_tree, to allow further write to be
4420  * COWed.
4421  *
4422  * NOTE: This function may sleep for memory allocation.
4423  */
btrfs_qgroup_release_data(struct btrfs_inode * inode,u64 start,u64 len,u64 * released)4424 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
4425 {
4426 	return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
4427 }
4428 
add_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4429 static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4430 			      enum btrfs_qgroup_rsv_type type)
4431 {
4432 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4433 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4434 		return;
4435 	if (num_bytes == 0)
4436 		return;
4437 
4438 	spin_lock(&root->qgroup_meta_rsv_lock);
4439 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
4440 		root->qgroup_meta_rsv_prealloc += num_bytes;
4441 	else
4442 		root->qgroup_meta_rsv_pertrans += num_bytes;
4443 	spin_unlock(&root->qgroup_meta_rsv_lock);
4444 }
4445 
sub_root_meta_rsv(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4446 static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
4447 			     enum btrfs_qgroup_rsv_type type)
4448 {
4449 	if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
4450 	    type != BTRFS_QGROUP_RSV_META_PERTRANS)
4451 		return 0;
4452 	if (num_bytes == 0)
4453 		return 0;
4454 
4455 	spin_lock(&root->qgroup_meta_rsv_lock);
4456 	if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
4457 		num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
4458 				  num_bytes);
4459 		root->qgroup_meta_rsv_prealloc -= num_bytes;
4460 	} else {
4461 		num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
4462 				  num_bytes);
4463 		root->qgroup_meta_rsv_pertrans -= num_bytes;
4464 	}
4465 	spin_unlock(&root->qgroup_meta_rsv_lock);
4466 	return num_bytes;
4467 }
4468 
btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce)4469 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4470 			      enum btrfs_qgroup_rsv_type type, bool enforce)
4471 {
4472 	struct btrfs_fs_info *fs_info = root->fs_info;
4473 	int ret;
4474 
4475 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4476 	    !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
4477 		return 0;
4478 
4479 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4480 	trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
4481 	ret = qgroup_reserve(root, num_bytes, enforce, type);
4482 	if (ret < 0)
4483 		return ret;
4484 	/*
4485 	 * Record what we have reserved into root.
4486 	 *
4487 	 * To avoid quota disabled->enabled underflow.
4488 	 * In that case, we may try to free space we haven't reserved
4489 	 * (since quota was disabled), so record what we reserved into root.
4490 	 * And ensure later release won't underflow this number.
4491 	 */
4492 	add_root_meta_rsv(root, num_bytes, type);
4493 	return ret;
4494 }
4495 
__btrfs_qgroup_reserve_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type,bool enforce,bool noflush)4496 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
4497 				enum btrfs_qgroup_rsv_type type, bool enforce,
4498 				bool noflush)
4499 {
4500 	int ret;
4501 
4502 	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4503 	if ((ret <= 0 && ret != -EDQUOT) || noflush)
4504 		return ret;
4505 
4506 	ret = try_flush_qgroup(root);
4507 	if (ret < 0)
4508 		return ret;
4509 	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
4510 }
4511 
4512 /*
4513  * Per-transaction meta reservation should be all freed at transaction commit
4514  * time
4515  */
btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root * root)4516 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
4517 {
4518 	struct btrfs_fs_info *fs_info = root->fs_info;
4519 
4520 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4521 	    !is_fstree(btrfs_root_id(root)))
4522 		return;
4523 
4524 	/* TODO: Update trace point to handle such free */
4525 	trace_qgroup_meta_free_all_pertrans(root);
4526 	/* Special value -1 means to free all reserved space */
4527 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
4528 				  BTRFS_QGROUP_RSV_META_PERTRANS);
4529 }
4530 
__btrfs_qgroup_free_meta(struct btrfs_root * root,int num_bytes,enum btrfs_qgroup_rsv_type type)4531 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
4532 			      enum btrfs_qgroup_rsv_type type)
4533 {
4534 	struct btrfs_fs_info *fs_info = root->fs_info;
4535 
4536 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4537 	    !is_fstree(btrfs_root_id(root)))
4538 		return;
4539 
4540 	/*
4541 	 * reservation for META_PREALLOC can happen before quota is enabled,
4542 	 * which can lead to underflow.
4543 	 * Here ensure we will only free what we really have reserved.
4544 	 */
4545 	num_bytes = sub_root_meta_rsv(root, num_bytes, type);
4546 	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
4547 	trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
4548 	btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
4549 }
4550 
qgroup_convert_meta(struct btrfs_fs_info * fs_info,u64 ref_root,int num_bytes)4551 static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
4552 				int num_bytes)
4553 {
4554 	struct btrfs_qgroup *qgroup;
4555 	LIST_HEAD(qgroup_list);
4556 
4557 	if (num_bytes == 0)
4558 		return;
4559 	if (!fs_info->quota_root)
4560 		return;
4561 
4562 	spin_lock(&fs_info->qgroup_lock);
4563 	qgroup = find_qgroup_rb(fs_info, ref_root);
4564 	if (!qgroup)
4565 		goto out;
4566 
4567 	qgroup_iterator_add(&qgroup_list, qgroup);
4568 	list_for_each_entry(qgroup, &qgroup_list, iterator) {
4569 		struct btrfs_qgroup_list *glist;
4570 
4571 		qgroup_rsv_release(fs_info, qgroup, num_bytes,
4572 				BTRFS_QGROUP_RSV_META_PREALLOC);
4573 		if (!sb_rdonly(fs_info->sb))
4574 			qgroup_rsv_add(fs_info, qgroup, num_bytes,
4575 				       BTRFS_QGROUP_RSV_META_PERTRANS);
4576 
4577 		list_for_each_entry(glist, &qgroup->groups, next_group)
4578 			qgroup_iterator_add(&qgroup_list, glist->group);
4579 	}
4580 out:
4581 	qgroup_iterator_clean(&qgroup_list);
4582 	spin_unlock(&fs_info->qgroup_lock);
4583 }
4584 
4585 /*
4586  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
4587  *
4588  * This is called when preallocated meta reservation needs to be used.
4589  * Normally after btrfs_join_transaction() call.
4590  */
btrfs_qgroup_convert_reserved_meta(struct btrfs_root * root,int num_bytes)4591 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4592 {
4593 	struct btrfs_fs_info *fs_info = root->fs_info;
4594 
4595 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
4596 	    !is_fstree(btrfs_root_id(root)))
4597 		return;
4598 	/* Same as btrfs_qgroup_free_meta_prealloc() */
4599 	num_bytes = sub_root_meta_rsv(root, num_bytes,
4600 				      BTRFS_QGROUP_RSV_META_PREALLOC);
4601 	trace_qgroup_meta_convert(root, num_bytes);
4602 	qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
4603 	if (!sb_rdonly(fs_info->sb))
4604 		add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
4605 }
4606 
4607 /*
4608  * Check qgroup reserved space leaking, normally at destroy inode
4609  * time
4610  */
btrfs_qgroup_check_reserved_leak(struct btrfs_inode * inode)4611 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4612 {
4613 	struct extent_changeset changeset;
4614 	struct ulist_node *unode;
4615 	struct ulist_iterator iter;
4616 	int ret;
4617 
4618 	extent_changeset_init(&changeset);
4619 	ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4620 			EXTENT_QGROUP_RESERVED, &changeset);
4621 
4622 	WARN_ON(ret < 0);
4623 	if (WARN_ON(changeset.bytes_changed)) {
4624 		ULIST_ITER_INIT(&iter);
4625 		while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4626 			btrfs_warn(inode->root->fs_info,
4627 		"leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4628 				btrfs_ino(inode), unode->val, unode->aux);
4629 		}
4630 		btrfs_qgroup_free_refroot(inode->root->fs_info,
4631 				btrfs_root_id(inode->root),
4632 				changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
4633 
4634 	}
4635 	extent_changeset_release(&changeset);
4636 }
4637 
btrfs_qgroup_init_swapped_blocks(struct btrfs_qgroup_swapped_blocks * swapped_blocks)4638 void btrfs_qgroup_init_swapped_blocks(
4639 	struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4640 {
4641 	int i;
4642 
4643 	spin_lock_init(&swapped_blocks->lock);
4644 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4645 		swapped_blocks->blocks[i] = RB_ROOT;
4646 	swapped_blocks->swapped = false;
4647 }
4648 
4649 /*
4650  * Delete all swapped blocks record of @root.
4651  * Every record here means we skipped a full subtree scan for qgroup.
4652  *
4653  * Gets called when committing one transaction.
4654  */
btrfs_qgroup_clean_swapped_blocks(struct btrfs_root * root)4655 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4656 {
4657 	struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4658 	int i;
4659 
4660 	swapped_blocks = &root->swapped_blocks;
4661 
4662 	spin_lock(&swapped_blocks->lock);
4663 	if (!swapped_blocks->swapped)
4664 		goto out;
4665 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4666 		struct rb_root *cur_root = &swapped_blocks->blocks[i];
4667 		struct btrfs_qgroup_swapped_block *entry;
4668 		struct btrfs_qgroup_swapped_block *next;
4669 
4670 		rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4671 						     node)
4672 			kfree(entry);
4673 		swapped_blocks->blocks[i] = RB_ROOT;
4674 	}
4675 	swapped_blocks->swapped = false;
4676 out:
4677 	spin_unlock(&swapped_blocks->lock);
4678 }
4679 
4680 /*
4681  * Add subtree roots record into @subvol_root.
4682  *
4683  * @subvol_root:	tree root of the subvolume tree get swapped
4684  * @bg:			block group under balance
4685  * @subvol_parent/slot:	pointer to the subtree root in subvolume tree
4686  * @reloc_parent/slot:	pointer to the subtree root in reloc tree
4687  *			BOTH POINTERS ARE BEFORE TREE SWAP
4688  * @last_snapshot:	last snapshot generation of the subvolume tree
4689  */
btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle * trans,struct btrfs_root * subvol_root,struct btrfs_block_group * bg,struct extent_buffer * subvol_parent,int subvol_slot,struct extent_buffer * reloc_parent,int reloc_slot,u64 last_snapshot)4690 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4691 		struct btrfs_root *subvol_root,
4692 		struct btrfs_block_group *bg,
4693 		struct extent_buffer *subvol_parent, int subvol_slot,
4694 		struct extent_buffer *reloc_parent, int reloc_slot,
4695 		u64 last_snapshot)
4696 {
4697 	struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4698 	struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4699 	struct btrfs_qgroup_swapped_block *block;
4700 	struct rb_node **cur;
4701 	struct rb_node *parent = NULL;
4702 	int level = btrfs_header_level(subvol_parent) - 1;
4703 	int ret = 0;
4704 
4705 	if (!btrfs_qgroup_full_accounting(fs_info))
4706 		return 0;
4707 
4708 	if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4709 	    btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4710 		btrfs_err_rl(fs_info,
4711 		"%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4712 			__func__,
4713 			btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4714 			btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4715 		return -EUCLEAN;
4716 	}
4717 
4718 	block = kmalloc(sizeof(*block), GFP_NOFS);
4719 	if (!block) {
4720 		ret = -ENOMEM;
4721 		goto out;
4722 	}
4723 
4724 	/*
4725 	 * @reloc_parent/slot is still before swap, while @block is going to
4726 	 * record the bytenr after swap, so we do the swap here.
4727 	 */
4728 	block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4729 	block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4730 							     reloc_slot);
4731 	block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4732 	block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4733 							    subvol_slot);
4734 	block->last_snapshot = last_snapshot;
4735 	block->level = level;
4736 
4737 	/*
4738 	 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4739 	 * no one else can modify tree blocks thus we qgroup will not change
4740 	 * no matter the value of trace_leaf.
4741 	 */
4742 	if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
4743 		block->trace_leaf = true;
4744 	else
4745 		block->trace_leaf = false;
4746 	btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4747 
4748 	/* Insert @block into @blocks */
4749 	spin_lock(&blocks->lock);
4750 	cur = &blocks->blocks[level].rb_node;
4751 	while (*cur) {
4752 		struct btrfs_qgroup_swapped_block *entry;
4753 
4754 		parent = *cur;
4755 		entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4756 				 node);
4757 
4758 		if (entry->subvol_bytenr < block->subvol_bytenr) {
4759 			cur = &(*cur)->rb_left;
4760 		} else if (entry->subvol_bytenr > block->subvol_bytenr) {
4761 			cur = &(*cur)->rb_right;
4762 		} else {
4763 			if (entry->subvol_generation !=
4764 					block->subvol_generation ||
4765 			    entry->reloc_bytenr != block->reloc_bytenr ||
4766 			    entry->reloc_generation !=
4767 					block->reloc_generation) {
4768 				/*
4769 				 * Duplicated but mismatch entry found.
4770 				 * Shouldn't happen.
4771 				 *
4772 				 * Marking qgroup inconsistent should be enough
4773 				 * for end users.
4774 				 */
4775 				WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4776 				ret = -EEXIST;
4777 			}
4778 			kfree(block);
4779 			goto out_unlock;
4780 		}
4781 	}
4782 	rb_link_node(&block->node, parent, cur);
4783 	rb_insert_color(&block->node, &blocks->blocks[level]);
4784 	blocks->swapped = true;
4785 out_unlock:
4786 	spin_unlock(&blocks->lock);
4787 out:
4788 	if (ret < 0)
4789 		qgroup_mark_inconsistent(fs_info);
4790 	return ret;
4791 }
4792 
4793 /*
4794  * Check if the tree block is a subtree root, and if so do the needed
4795  * delayed subtree trace for qgroup.
4796  *
4797  * This is called during btrfs_cow_block().
4798  */
btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * subvol_eb)4799 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4800 					 struct btrfs_root *root,
4801 					 struct extent_buffer *subvol_eb)
4802 {
4803 	struct btrfs_fs_info *fs_info = root->fs_info;
4804 	struct btrfs_tree_parent_check check = { 0 };
4805 	struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4806 	struct btrfs_qgroup_swapped_block *block;
4807 	struct extent_buffer *reloc_eb = NULL;
4808 	struct rb_node *node;
4809 	bool found = false;
4810 	bool swapped = false;
4811 	int level = btrfs_header_level(subvol_eb);
4812 	int ret = 0;
4813 	int i;
4814 
4815 	if (!btrfs_qgroup_full_accounting(fs_info))
4816 		return 0;
4817 	if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
4818 		return 0;
4819 
4820 	spin_lock(&blocks->lock);
4821 	if (!blocks->swapped) {
4822 		spin_unlock(&blocks->lock);
4823 		return 0;
4824 	}
4825 	node = blocks->blocks[level].rb_node;
4826 
4827 	while (node) {
4828 		block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4829 		if (block->subvol_bytenr < subvol_eb->start) {
4830 			node = node->rb_left;
4831 		} else if (block->subvol_bytenr > subvol_eb->start) {
4832 			node = node->rb_right;
4833 		} else {
4834 			found = true;
4835 			break;
4836 		}
4837 	}
4838 	if (!found) {
4839 		spin_unlock(&blocks->lock);
4840 		goto out;
4841 	}
4842 	/* Found one, remove it from @blocks first and update blocks->swapped */
4843 	rb_erase(&block->node, &blocks->blocks[level]);
4844 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4845 		if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4846 			swapped = true;
4847 			break;
4848 		}
4849 	}
4850 	blocks->swapped = swapped;
4851 	spin_unlock(&blocks->lock);
4852 
4853 	check.level = block->level;
4854 	check.transid = block->reloc_generation;
4855 	check.has_first_key = true;
4856 	memcpy(&check.first_key, &block->first_key, sizeof(check.first_key));
4857 
4858 	/* Read out reloc subtree root */
4859 	reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check);
4860 	if (IS_ERR(reloc_eb)) {
4861 		ret = PTR_ERR(reloc_eb);
4862 		reloc_eb = NULL;
4863 		goto free_out;
4864 	}
4865 	if (!extent_buffer_uptodate(reloc_eb)) {
4866 		ret = -EIO;
4867 		goto free_out;
4868 	}
4869 
4870 	ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4871 			block->last_snapshot, block->trace_leaf);
4872 free_out:
4873 	kfree(block);
4874 	free_extent_buffer(reloc_eb);
4875 out:
4876 	if (ret < 0) {
4877 		btrfs_err_rl(fs_info,
4878 			     "failed to account subtree at bytenr %llu: %d",
4879 			     subvol_eb->start, ret);
4880 		qgroup_mark_inconsistent(fs_info);
4881 	}
4882 	return ret;
4883 }
4884 
btrfs_qgroup_destroy_extent_records(struct btrfs_transaction * trans)4885 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4886 {
4887 	struct btrfs_qgroup_extent_record *entry;
4888 	unsigned long index;
4889 
4890 	xa_for_each(&trans->delayed_refs.dirty_extents, index, entry) {
4891 		ulist_free(entry->old_roots);
4892 		kfree(entry);
4893 	}
4894 	xa_destroy(&trans->delayed_refs.dirty_extents);
4895 }
4896 
btrfs_free_squota_rsv(struct btrfs_fs_info * fs_info,u64 root,u64 rsv_bytes)4897 void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes)
4898 {
4899 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4900 		return;
4901 
4902 	if (!is_fstree(root))
4903 		return;
4904 
4905 	btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA);
4906 }
4907 
btrfs_record_squota_delta(struct btrfs_fs_info * fs_info,const struct btrfs_squota_delta * delta)4908 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
4909 			      const struct btrfs_squota_delta *delta)
4910 {
4911 	int ret;
4912 	struct btrfs_qgroup *qgroup;
4913 	struct btrfs_qgroup *qg;
4914 	LIST_HEAD(qgroup_list);
4915 	u64 root = delta->root;
4916 	u64 num_bytes = delta->num_bytes;
4917 	const int sign = (delta->is_inc ? 1 : -1);
4918 
4919 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
4920 		return 0;
4921 
4922 	if (!is_fstree(root))
4923 		return 0;
4924 
4925 	/* If the extent predates enabling quotas, don't count it. */
4926 	if (delta->generation < fs_info->qgroup_enable_gen)
4927 		return 0;
4928 
4929 	spin_lock(&fs_info->qgroup_lock);
4930 	qgroup = find_qgroup_rb(fs_info, root);
4931 	if (!qgroup) {
4932 		ret = -ENOENT;
4933 		goto out;
4934 	}
4935 
4936 	ret = 0;
4937 	qgroup_iterator_add(&qgroup_list, qgroup);
4938 	list_for_each_entry(qg, &qgroup_list, iterator) {
4939 		struct btrfs_qgroup_list *glist;
4940 
4941 		qg->excl += num_bytes * sign;
4942 		qg->rfer += num_bytes * sign;
4943 		qgroup_dirty(fs_info, qg);
4944 
4945 		list_for_each_entry(glist, &qg->groups, next_group)
4946 			qgroup_iterator_add(&qgroup_list, glist->group);
4947 	}
4948 	qgroup_iterator_clean(&qgroup_list);
4949 
4950 out:
4951 	spin_unlock(&fs_info->qgroup_lock);
4952 	return ret;
4953 }
4954