xref: /linux/fs/btrfs/qgroup.h (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2014 Facebook.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_QGROUP_H
7 #define BTRFS_QGROUP_H
8 
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/kobject.h>
12 #include "ulist.h"
13 #include "delayed-ref.h"
14 
15 /*
16  * Btrfs qgroup overview
17  *
18  * Btrfs qgroup splits into 3 main part:
19  * 1) Reserve
20  *    Reserve metadata/data space for incoming operations
21  *    Affect how qgroup limit works
22  *
23  * 2) Trace
24  *    Tell btrfs qgroup to trace dirty extents.
25  *
26  *    Dirty extents including:
27  *    - Newly allocated extents
28  *    - Extents going to be deleted (in this trans)
29  *    - Extents whose owner is going to be modified
30  *
31  *    This is the main part affects whether qgroup numbers will stay
32  *    consistent.
33  *    Btrfs qgroup can trace clean extents and won't cause any problem,
34  *    but it will consume extra CPU time, it should be avoided if possible.
35  *
36  * 3) Account
37  *    Btrfs qgroup will updates its numbers, based on dirty extents traced
38  *    in previous step.
39  *
40  *    Normally at qgroup rescan and transaction commit time.
41  */
42 
43 /*
44  * Special performance optimization for balance.
45  *
46  * For balance, we need to swap subtree of subvolume and reloc trees.
47  * In theory, we need to trace all subtree blocks of both subvolume and reloc
48  * trees, since their owner has changed during such swap.
49  *
50  * However since balance has ensured that both subtrees are containing the
51  * same contents and have the same tree structures, such swap won't cause
52  * qgroup number change.
53  *
54  * But there is a race window between subtree swap and transaction commit,
55  * during that window, if we increase/decrease tree level or merge/split tree
56  * blocks, we still need to trace the original subtrees.
57  *
58  * So for balance, we use a delayed subtree tracing, whose workflow is:
59  *
60  * 1) Record the subtree root block get swapped.
61  *
62  *    During subtree swap:
63  *    O = Old tree blocks
64  *    N = New tree blocks
65  *          reloc tree                     subvolume tree X
66  *             Root                               Root
67  *            /    \                             /    \
68  *          NA     OB                          OA      OB
69  *        /  |     |  \                      /  |      |  \
70  *      NC  ND     OE  OF                   OC  OD     OE  OF
71  *
72  *   In this case, NA and OA are going to be swapped, record (NA, OA) into
73  *   subvolume tree X.
74  *
75  * 2) After subtree swap.
76  *          reloc tree                     subvolume tree X
77  *             Root                               Root
78  *            /    \                             /    \
79  *          OA     OB                          NA      OB
80  *        /  |     |  \                      /  |      |  \
81  *      OC  OD     OE  OF                   NC  ND     OE  OF
82  *
83  * 3a) COW happens for OB
84  *     If we are going to COW tree block OB, we check OB's bytenr against
85  *     tree X's swapped_blocks structure.
86  *     If it doesn't fit any, nothing will happen.
87  *
88  * 3b) COW happens for NA
89  *     Check NA's bytenr against tree X's swapped_blocks, and get a hit.
90  *     Then we do subtree scan on both subtrees OA and NA.
91  *     Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
92  *
93  *     Then no matter what we do to subvolume tree X, qgroup numbers will
94  *     still be correct.
95  *     Then NA's record gets removed from X's swapped_blocks.
96  *
97  * 4)  Transaction commit
98  *     Any record in X's swapped_blocks gets removed, since there is no
99  *     modification to the swapped subtrees, no need to trigger heavy qgroup
100  *     subtree rescan for them.
101  */
102 
103 /*
104  * Record a dirty extent, and info qgroup to update quota on it
105  * TODO: Use kmem cache to alloc it.
106  */
107 struct btrfs_qgroup_extent_record {
108 	struct rb_node node;
109 	u64 bytenr;
110 	u64 num_bytes;
111 
112 	/*
113 	 * For qgroup reserved data space freeing.
114 	 *
115 	 * @data_rsv_refroot and @data_rsv will be recorded after
116 	 * BTRFS_ADD_DELAYED_EXTENT is called.
117 	 * And will be used to free reserved qgroup space at
118 	 * transaction commit time.
119 	 */
120 	u32 data_rsv;		/* reserved data space needs to be freed */
121 	u64 data_rsv_refroot;	/* which root the reserved data belongs to */
122 	struct ulist *old_roots;
123 };
124 
125 struct btrfs_qgroup_swapped_block {
126 	struct rb_node node;
127 
128 	int level;
129 	bool trace_leaf;
130 
131 	/* bytenr/generation of the tree block in subvolume tree after swap */
132 	u64 subvol_bytenr;
133 	u64 subvol_generation;
134 
135 	/* bytenr/generation of the tree block in reloc tree after swap */
136 	u64 reloc_bytenr;
137 	u64 reloc_generation;
138 
139 	u64 last_snapshot;
140 	struct btrfs_key first_key;
141 };
142 
143 /*
144  * Qgroup reservation types:
145  *
146  * DATA:
147  *	space reserved for data
148  *
149  * META_PERTRANS:
150  * 	Space reserved for metadata (per-transaction)
151  * 	Due to the fact that qgroup data is only updated at transaction commit
152  * 	time, reserved space for metadata must be kept until transaction
153  * 	commits.
154  * 	Any metadata reserved that are used in btrfs_start_transaction() should
155  * 	be of this type.
156  *
157  * META_PREALLOC:
158  *	There are cases where metadata space is reserved before starting
159  *	transaction, and then btrfs_join_transaction() to get a trans handle.
160  *	Any metadata reserved for such usage should be of this type.
161  *	And after join_transaction() part (or all) of such reservation should
162  *	be converted into META_PERTRANS.
163  */
164 enum btrfs_qgroup_rsv_type {
165 	BTRFS_QGROUP_RSV_DATA,
166 	BTRFS_QGROUP_RSV_META_PERTRANS,
167 	BTRFS_QGROUP_RSV_META_PREALLOC,
168 	BTRFS_QGROUP_RSV_LAST,
169 };
170 
171 /*
172  * Represents how many bytes we have reserved for this qgroup.
173  *
174  * Each type should have different reservation behavior.
175  * E.g, data follows its io_tree flag modification, while
176  * *currently* meta is just reserve-and-clear during transaction.
177  *
178  * TODO: Add new type for reservation which can survive transaction commit.
179  * Current metadata reservation behavior is not suitable for such case.
180  */
181 struct btrfs_qgroup_rsv {
182 	u64 values[BTRFS_QGROUP_RSV_LAST];
183 };
184 
185 /*
186  * one struct for each qgroup, organized in fs_info->qgroup_tree.
187  */
188 struct btrfs_qgroup {
189 	u64 qgroupid;
190 
191 	/*
192 	 * state
193 	 */
194 	u64 rfer;	/* referenced */
195 	u64 rfer_cmpr;	/* referenced compressed */
196 	u64 excl;	/* exclusive */
197 	u64 excl_cmpr;	/* exclusive compressed */
198 
199 	/*
200 	 * limits
201 	 */
202 	u64 lim_flags;	/* which limits are set */
203 	u64 max_rfer;
204 	u64 max_excl;
205 	u64 rsv_rfer;
206 	u64 rsv_excl;
207 
208 	/*
209 	 * reservation tracking
210 	 */
211 	struct btrfs_qgroup_rsv rsv;
212 
213 	/*
214 	 * lists
215 	 */
216 	struct list_head groups;  /* groups this group is member of */
217 	struct list_head members; /* groups that are members of this group */
218 	struct list_head dirty;   /* dirty groups */
219 	struct rb_node node;	  /* tree of qgroups */
220 
221 	/*
222 	 * temp variables for accounting operations
223 	 * Refer to qgroup_shared_accounting() for details.
224 	 */
225 	u64 old_refcnt;
226 	u64 new_refcnt;
227 
228 	/*
229 	 * Sysfs kobjectid
230 	 */
231 	struct kobject kobj;
232 };
233 
234 static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
235 {
236 	return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
237 }
238 
239 /*
240  * For qgroup event trace points only
241  */
242 #define QGROUP_RESERVE		(1<<0)
243 #define QGROUP_RELEASE		(1<<1)
244 #define QGROUP_FREE		(1<<2)
245 
246 int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
247 int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
248 int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
249 void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
250 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
251 				     bool interruptible);
252 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
253 			      u64 dst);
254 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
255 			      u64 dst);
256 int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
257 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
258 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
259 		       struct btrfs_qgroup_limit *limit);
260 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
261 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
262 struct btrfs_delayed_extent_op;
263 
264 /*
265  * Inform qgroup to trace one dirty extent, its info is recorded in @record.
266  * So qgroup can account it at transaction committing time.
267  *
268  * No lock version, caller must acquire delayed ref lock and allocated memory,
269  * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
270  *
271  * Return 0 for success insert
272  * Return >0 for existing record, caller can free @record safely.
273  * Error is not possible
274  */
275 int btrfs_qgroup_trace_extent_nolock(
276 		struct btrfs_fs_info *fs_info,
277 		struct btrfs_delayed_ref_root *delayed_refs,
278 		struct btrfs_qgroup_extent_record *record);
279 
280 /*
281  * Post handler after qgroup_trace_extent_nolock().
282  *
283  * NOTE: Current qgroup does the expensive backref walk at transaction
284  * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
285  * new transaction.
286  * This is designed to allow btrfs_find_all_roots() to get correct new_roots
287  * result.
288  *
289  * However for old_roots there is no need to do backref walk at that time,
290  * since we search commit roots to walk backref and result will always be
291  * correct.
292  *
293  * Due to the nature of no lock version, we can't do backref there.
294  * So we must call btrfs_qgroup_trace_extent_post() after exiting
295  * spinlock context.
296  *
297  * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
298  * using current root, then we can move all expensive backref walk out of
299  * transaction committing, but not now as qgroup accounting will be wrong again.
300  */
301 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
302 				   struct btrfs_qgroup_extent_record *qrecord);
303 
304 /*
305  * Inform qgroup to trace one dirty extent, specified by @bytenr and
306  * @num_bytes.
307  * So qgroup can account it at commit trans time.
308  *
309  * Better encapsulated version, with memory allocation and backref walk for
310  * commit roots.
311  * So this can sleep.
312  *
313  * Return 0 if the operation is done.
314  * Return <0 for error, like memory allocation failure or invalid parameter
315  * (NULL trans)
316  */
317 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
318 			      u64 num_bytes, gfp_t gfp_flag);
319 
320 /*
321  * Inform qgroup to trace all leaf items of data
322  *
323  * Return 0 for success
324  * Return <0 for error(ENOMEM)
325  */
326 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
327 				  struct extent_buffer *eb);
328 /*
329  * Inform qgroup to trace a whole subtree, including all its child tree
330  * blocks and data.
331  * The root tree block is specified by @root_eb.
332  *
333  * Normally used by relocation(tree block swap) and subvolume deletion.
334  *
335  * Return 0 for success
336  * Return <0 for error(ENOMEM or tree search error)
337  */
338 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
339 			       struct extent_buffer *root_eb,
340 			       u64 root_gen, int root_level);
341 int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
342 				u64 num_bytes, struct ulist *old_roots,
343 				struct ulist *new_roots);
344 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
345 int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
346 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
347 			 u64 objectid, struct btrfs_qgroup_inherit *inherit);
348 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
349 			       u64 ref_root, u64 num_bytes,
350 			       enum btrfs_qgroup_rsv_type type);
351 
352 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
353 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
354 			       u64 rfer, u64 excl);
355 #endif
356 
357 /* New io_tree based accurate qgroup reserve API */
358 int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
359 			struct extent_changeset **reserved, u64 start, u64 len);
360 int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
361 int btrfs_qgroup_free_data(struct btrfs_inode *inode,
362 			   struct extent_changeset *reserved, u64 start,
363 			   u64 len);
364 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
365 			      enum btrfs_qgroup_rsv_type type, bool enforce);
366 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
367 				enum btrfs_qgroup_rsv_type type, bool enforce);
368 /* Reserve metadata space for pertrans and prealloc type */
369 static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
370 				int num_bytes, bool enforce)
371 {
372 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
373 			BTRFS_QGROUP_RSV_META_PERTRANS, enforce);
374 }
375 static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
376 				int num_bytes, bool enforce)
377 {
378 	return __btrfs_qgroup_reserve_meta(root, num_bytes,
379 			BTRFS_QGROUP_RSV_META_PREALLOC, enforce);
380 }
381 
382 void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
383 			     enum btrfs_qgroup_rsv_type type);
384 
385 /* Free per-transaction meta reservation for error handling */
386 static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
387 						   int num_bytes)
388 {
389 	__btrfs_qgroup_free_meta(root, num_bytes,
390 			BTRFS_QGROUP_RSV_META_PERTRANS);
391 }
392 
393 /* Pre-allocated meta reservation can be freed at need */
394 static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
395 						   int num_bytes)
396 {
397 	__btrfs_qgroup_free_meta(root, num_bytes,
398 			BTRFS_QGROUP_RSV_META_PREALLOC);
399 }
400 
401 /*
402  * Per-transaction meta reservation should be all freed at transaction commit
403  * time
404  */
405 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
406 
407 /*
408  * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
409  *
410  * This is called when preallocated meta reservation needs to be used.
411  * Normally after btrfs_join_transaction() call.
412  */
413 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
414 
415 void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
416 
417 /* btrfs_qgroup_swapped_blocks related functions */
418 void btrfs_qgroup_init_swapped_blocks(
419 	struct btrfs_qgroup_swapped_blocks *swapped_blocks);
420 
421 void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
422 int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
423 		struct btrfs_root *subvol_root,
424 		struct btrfs_block_group *bg,
425 		struct extent_buffer *subvol_parent, int subvol_slot,
426 		struct extent_buffer *reloc_parent, int reloc_slot,
427 		u64 last_snapshot);
428 int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
429 		struct btrfs_root *root, struct extent_buffer *eb);
430 void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
431 bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
432 
433 #endif
434