xref: /linux/fs/btrfs/qgroup.h (revision fd71def6d9abc5ae362fb9995d46049b7b0ed391)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2014 Facebook.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_QGROUP_H
7 #define BTRFS_QGROUP_H
8 
9 #include <linux/types.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/kobject.h>
13 #include <linux/list.h>
14 #include <uapi/linux/btrfs_tree.h>
15 
16 struct extent_buffer;
17 struct extent_changeset;
18 struct btrfs_delayed_extent_op;
19 struct btrfs_fs_info;
20 struct btrfs_root;
21 struct btrfs_ioctl_quota_ctl_args;
22 struct btrfs_trans_handle;
23 struct btrfs_delayed_ref_root;
24 struct btrfs_inode;
25 struct btrfs_transaction;
26 struct btrfs_block_group;
27 struct btrfs_qgroup_swapped_blocks;
28 
29 /*
30  * Btrfs qgroup overview
31  *
32  * Btrfs qgroup splits into 3 main part:
33  * 1) Reserve
34  *    Reserve metadata/data space for incoming operations
35  *    Affect how qgroup limit works
36  *
37  * 2) Trace
38  *    Tell btrfs qgroup to trace dirty extents.
39  *
40  *    Dirty extents including:
41  *    - Newly allocated extents
42  *    - Extents going to be deleted (in this trans)
43  *    - Extents whose owner is going to be modified
44  *
45  *    This is the main part affects whether qgroup numbers will stay
46  *    consistent.
47  *    Btrfs qgroup can trace clean extents and won't cause any problem,
48  *    but it will consume extra CPU time, it should be avoided if possible.
49  *
50  * 3) Account
51  *    Btrfs qgroup will updates its numbers, based on dirty extents traced
52  *    in previous step.
53  *
54  *    Normally at qgroup rescan and transaction commit time.
55  */
56 
57 /*
58  * Special performance optimization for balance.
59  *
60  * For balance, we need to swap subtree of subvolume and reloc trees.
61  * In theory, we need to trace all subtree blocks of both subvolume and reloc
62  * trees, since their owner has changed during such swap.
63  *
64  * However since balance has ensured that both subtrees are containing the
65  * same contents and have the same tree structures, such swap won't cause
66  * qgroup number change.
67  *
68  * But there is a race window between subtree swap and transaction commit,
69  * during that window, if we increase/decrease tree level or merge/split tree
70  * blocks, we still need to trace the original subtrees.
71  *
72  * So for balance, we use a delayed subtree tracing, whose workflow is:
73  *
74  * 1) Record the subtree root block get swapped.
75  *
76  *    During subtree swap:
77  *    O = Old tree blocks
78  *    N = New tree blocks
79  *          reloc tree                     subvolume tree X
80  *             Root                               Root
81  *            /    \                             /    \
82  *          NA     OB                          OA      OB
83  *        /  |     |  \                      /  |      |  \
84  *      NC  ND     OE  OF                   OC  OD     OE  OF
85  *
86  *   In this case, NA and OA are going to be swapped, record (NA, OA) into
87  *   subvolume tree X.
88  *
89  * 2) After subtree swap.
90  *          reloc tree                     subvolume tree X
91  *             Root                               Root
92  *            /    \                             /    \
93  *          OA     OB                          NA      OB
94  *        /  |     |  \                      /  |      |  \
95  *      OC  OD     OE  OF                   NC  ND     OE  OF
96  *
97  * 3a) COW happens for OB
98  *     If we are going to COW tree block OB, we check OB's bytenr against
99  *     tree X's swapped_blocks structure.
100  *     If it doesn't fit any, nothing will happen.
101  *
102  * 3b) COW happens for NA
103  *     Check NA's bytenr against tree X's swapped_blocks, and get a hit.
104  *     Then we do subtree scan on both subtrees OA and NA.
105  *     Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
106  *
107  *     Then no matter what we do to subvolume tree X, qgroup numbers will
108  *     still be correct.
109  *     Then NA's record gets removed from X's swapped_blocks.
110  *
111  * 4)  Transaction commit
112  *     Any record in X's swapped_blocks gets removed, since there is no
113  *     modification to the swapped subtrees, no need to trigger heavy qgroup
114  *     subtree rescan for them.
115  */
116 
117 /*
118  * These flags share the flags field of the btrfs_qgroup_status_item with the
119  * persisted flags defined in btrfs_tree.h.
120  *
121  * To minimize the chance of collision with new persisted status flags, these
122  * count backwards from the MSB.
123  */
124 #define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN		(1ULL << 63)
125 #define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING		(1ULL << 62)
126 
127 #define BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT		(3)
128 
129 /*
130  * Record a dirty extent, and info qgroup to update quota on it
131  */
132 struct btrfs_qgroup_extent_record {
133 	/*
134 	 * The bytenr of the extent is given by its index in the dirty_extents
135 	 * xarray of struct btrfs_delayed_ref_root left shifted by
136 	 * fs_info->sectorsize_bits.
137 	 */
138 
139 	u64 num_bytes;
140 
141 	/*
142 	 * For qgroup reserved data space freeing.
143 	 *
144 	 * @data_rsv_refroot and @data_rsv will be recorded after
145 	 * BTRFS_ADD_DELAYED_EXTENT is called.
146 	 * And will be used to free reserved qgroup space at
147 	 * transaction commit time.
148 	 */
149 	u32 data_rsv;		/* reserved data space needs to be freed */
150 	u64 data_rsv_refroot;	/* which root the reserved data belongs to */
151 	struct ulist *old_roots;
152 };
153 
154 struct btrfs_qgroup_swapped_block {
155 	struct rb_node node;
156 
157 	int level;
158 	bool trace_leaf;
159 
160 	/* bytenr/generation of the tree block in subvolume tree after swap */
161 	u64 subvol_bytenr;
162 	u64 subvol_generation;
163 
164 	/* bytenr/generation of the tree block in reloc tree after swap */
165 	u64 reloc_bytenr;
166 	u64 reloc_generation;
167 
168 	u64 last_snapshot;
169 	struct btrfs_key first_key;
170 };
171 
172 /*
173  * Qgroup reservation types:
174  *
175  * DATA:
176  *	space reserved for data
177  *
178  * META_PERTRANS:
179  * 	Space reserved for metadata (per-transaction)
180  * 	Due to the fact that qgroup data is only updated at transaction commit
181  * 	time, reserved space for metadata must be kept until transaction
182  * 	commits.
183  * 	Any metadata reserved that are used in btrfs_start_transaction() should
184  * 	be of this type.
185  *
186  * META_PREALLOC:
187  *	There are cases where metadata space is reserved before starting
188  *	transaction, and then btrfs_join_transaction() to get a trans handle.
189  *	Any metadata reserved for such usage should be of this type.
190  *	And after join_transaction() part (or all) of such reservation should
191  *	be converted into META_PERTRANS.
192  */
193 enum btrfs_qgroup_rsv_type {
194 	BTRFS_QGROUP_RSV_DATA,
195 	BTRFS_QGROUP_RSV_META_PERTRANS,
196 	BTRFS_QGROUP_RSV_META_PREALLOC,
197 	BTRFS_QGROUP_RSV_LAST,
198 };
199 
200 /*
201  * Represents how many bytes we have reserved for this qgroup.
202  *
203  * Each type should have different reservation behavior.
204  * E.g, data follows its io_tree flag modification, while
205  * *currently* meta is just reserve-and-clear during transaction.
206  *
207  * TODO: Add new type for reservation which can survive transaction commit.
208  * Current metadata reservation behavior is not suitable for such case.
209  */
210 struct btrfs_qgroup_rsv {
211 	u64 values[BTRFS_QGROUP_RSV_LAST];
212 };
213 
214 /*
215  * one struct for each qgroup, organized in fs_info->qgroup_tree.
216  */
217 struct btrfs_qgroup {
218 	u64 qgroupid;
219 
220 	/*
221 	 * state
222 	 */
223 	u64 rfer;	/* referenced */
224 	u64 rfer_cmpr;	/* referenced compressed */
225 	u64 excl;	/* exclusive */
226 	u64 excl_cmpr;	/* exclusive compressed */
227 
228 	/*
229 	 * limits
230 	 */
231 	u64 lim_flags;	/* which limits are set */
232 	u64 max_rfer;
233 	u64 max_excl;
234 	u64 rsv_rfer;
235 	u64 rsv_excl;
236 
237 	/*
238 	 * reservation tracking
239 	 */
240 	struct btrfs_qgroup_rsv rsv;
241 
242 	/*
243 	 * lists
244 	 */
245 	struct list_head groups;  /* groups this group is member of */
246 	struct list_head members; /* groups that are members of this group */
247 	struct list_head dirty;   /* dirty groups */
248 
249 	/*
250 	 * For qgroup iteration usage.
251 	 *
252 	 * The iteration list should always be empty until qgroup_iterator_add()
253 	 * is called.  And should be reset to empty after the iteration is
254 	 * finished.
255 	 */
256 	struct list_head iterator;
257 
258 	/*
259 	 * For nested iterator usage.
260 	 *
261 	 * Here we support at most one level of nested iterator calls like:
262 	 *
263 	 *	LIST_HEAD(all_qgroups);
264 	 *	{
265 	 *		LIST_HEAD(local_qgroups);
266 	 *		qgroup_iterator_add(local_qgroups, qg);
267 	 *		qgroup_iterator_nested_add(all_qgroups, qg);
268 	 *		do_some_work(local_qgroups);
269 	 *		qgroup_iterator_clean(local_qgroups);
270 	 *	}
271 	 *	do_some_work(all_qgroups);
272 	 *	qgroup_iterator_nested_clean(all_qgroups);
273 	 */
274 	struct list_head nested_iterator;
275 	struct rb_node node;	  /* tree of qgroups */
276 
277 	/*
278 	 * temp variables for accounting operations
279 	 * Refer to qgroup_shared_accounting() for details.
280 	 */
281 	u64 old_refcnt;
282 	u64 new_refcnt;
283 
284 	/*
285 	 * Sysfs kobjectid
286 	 */
287 	struct kobject kobj;
288 };
289 
290 /* Glue structure to represent the relations between qgroups. */
291 struct btrfs_qgroup_list {
292 	struct list_head next_group;
293 	struct list_head next_member;
294 	struct btrfs_qgroup *group;
295 	struct btrfs_qgroup *member;
296 };
297 
298 struct btrfs_squota_delta {
299 	/* The fstree root this delta counts against. */
300 	u64 root;
301 	/* The number of bytes in the extent being counted. */
302 	u64 num_bytes;
303 	/* The generation the extent was created in. */
304 	u64 generation;
305 	/* Whether we are using or freeing the extent. */
306 	bool is_inc;
307 	/* Whether the extent is data or metadata. */
308 	bool is_data;
309 };
310 
btrfs_qgroup_subvolid(u64 qgroupid)311 static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
312 {
313 	return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
314 }
315 
316 /*
317  * For qgroup event trace points only
318  */
319 enum {
320 	ENUM_BIT(QGROUP_RESERVE),
321 	ENUM_BIT(QGROUP_RELEASE),
322 	ENUM_BIT(QGROUP_FREE),
323 };
324 
325 enum btrfs_qgroup_mode {
326 	BTRFS_QGROUP_MODE_DISABLED,
327 	BTRFS_QGROUP_MODE_FULL,
328 	BTRFS_QGROUP_MODE_SIMPLE
329 };
330 
331 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info);
332 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info);
333 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info);
334 int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
335 		       struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
336 int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
337 int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
338 void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
339 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
340 				     bool interruptible);
341 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst,
342 			      struct btrfs_qgroup_list *prealloc);
343 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
344 			      u64 dst);
345 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
346 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
347 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid);
348 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
349 		       struct btrfs_qgroup_limit *limit);
350 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
351 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
352 
353 int btrfs_qgroup_trace_extent_nolock(
354 		struct btrfs_fs_info *fs_info,
355 		struct btrfs_delayed_ref_root *delayed_refs,
356 		struct btrfs_qgroup_extent_record *record,
357 		u64 bytenr);
358 int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
359 				   struct btrfs_qgroup_extent_record *qrecord,
360 				   u64 bytenr);
361 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
362 			      u64 num_bytes);
363 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
364 				  struct extent_buffer *eb);
365 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
366 			       struct extent_buffer *root_eb,
367 			       u64 root_gen, int root_level);
368 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
369 				u64 num_bytes, struct ulist *old_roots,
370 				struct ulist *new_roots);
371 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
372 int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
373 int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
374 			       struct btrfs_qgroup_inherit *inherit,
375 			       size_t size);
376 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
377 			 u64 objectid, u64 inode_rootid,
378 			 struct btrfs_qgroup_inherit *inherit);
379 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
380 			       u64 ref_root, u64 num_bytes,
381 			       enum btrfs_qgroup_rsv_type type);
382 
383 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
384 int btrfs_verify_qgroup_counts(const struct btrfs_fs_info *fs_info, u64 qgroupid,
385 			       u64 rfer, u64 excl);
386 #endif
387 
388 /* New io_tree based accurate qgroup reserve API */
389 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
390 			struct extent_changeset **reserved, u64 start, u64 len);
391 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
392 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
393 			   struct extent_changeset *reserved, u64 start,
394 			   u64 len, u64 *freed);
395 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
396 			      enum btrfs_qgroup_rsv_type type, bool enforce);
397 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
398 				enum btrfs_qgroup_rsv_type type, bool enforce,
399 				bool noflush);
400 /* Reserve metadata space for pertrans and prealloc type */
btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root * root,int num_bytes,bool enforce)401 static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
402 				int num_bytes, bool enforce)
403 {
404 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
405 					   BTRFS_QGROUP_RSV_META_PERTRANS,
406 					   enforce, false);
407 }
btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root * root,int num_bytes,bool enforce,bool noflush)408 static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
409 						     int num_bytes, bool enforce,
410 						     bool noflush)
411 {
412 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
413 					   BTRFS_QGROUP_RSV_META_PREALLOC,
414 					   enforce, noflush);
415 }
416 
417 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
418 			     enum btrfs_qgroup_rsv_type type);
419 
420 /* Free per-transaction meta reservation for error handling */
btrfs_qgroup_free_meta_pertrans(struct btrfs_root * root,int num_bytes)421 static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
422 						   int num_bytes)
423 {
424 	__btrfs_qgroup_free_meta(root, num_bytes,
425 			BTRFS_QGROUP_RSV_META_PERTRANS);
426 }
427 
428 /* Pre-allocated meta reservation can be freed at need */
btrfs_qgroup_free_meta_prealloc(struct btrfs_root * root,int num_bytes)429 static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
430 						   int num_bytes)
431 {
432 	__btrfs_qgroup_free_meta(root, num_bytes,
433 			BTRFS_QGROUP_RSV_META_PREALLOC);
434 }
435 
436 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
437 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
438 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
439 
440 /* btrfs_qgroup_swapped_blocks related functions */
441 void btrfs_qgroup_init_swapped_blocks(
442 	struct btrfs_qgroup_swapped_blocks *swapped_blocks);
443 
444 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
445 int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root,
446 		struct btrfs_block_group *bg,
447 		struct extent_buffer *subvol_parent, int subvol_slot,
448 		struct extent_buffer *reloc_parent, int reloc_slot,
449 		u64 last_snapshot);
450 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
451 		struct btrfs_root *root, struct extent_buffer *eb);
452 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
453 bool btrfs_check_quota_leak(const struct btrfs_fs_info *fs_info);
454 int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
455 			      const struct btrfs_squota_delta *delta);
456 
457 #endif
458