xref: /linux/fs/btrfs/delayed-ref.h (revision e6a901a00822659181c93c86d8bbc2a17779fddc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_DELAYED_REF_H
7 #define BTRFS_DELAYED_REF_H
8 
9 #include <linux/types.h>
10 #include <linux/refcount.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/slab.h>
16 #include <uapi/linux/btrfs_tree.h>
17 
18 struct btrfs_trans_handle;
19 struct btrfs_fs_info;
20 
21 /* these are the possible values of struct btrfs_delayed_ref_node->action */
22 enum btrfs_delayed_ref_action {
23 	/* Add one backref to the tree */
24 	BTRFS_ADD_DELAYED_REF = 1,
25 	/* Delete one backref from the tree */
26 	BTRFS_DROP_DELAYED_REF,
27 	/* Record a full extent allocation */
28 	BTRFS_ADD_DELAYED_EXTENT,
29 	/* Not changing ref count on head ref */
30 	BTRFS_UPDATE_DELAYED_HEAD,
31 } __packed;
32 
33 struct btrfs_delayed_ref_node {
34 	struct rb_node ref_node;
35 	/*
36 	 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
37 	 * ref_head->ref_add_list, then we do not need to iterate the
38 	 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
39 	 */
40 	struct list_head add_list;
41 
42 	/* the starting bytenr of the extent */
43 	u64 bytenr;
44 
45 	/* the size of the extent */
46 	u64 num_bytes;
47 
48 	/* seq number to keep track of insertion order */
49 	u64 seq;
50 
51 	/* ref count on this data structure */
52 	refcount_t refs;
53 
54 	/*
55 	 * how many refs is this entry adding or deleting.  For
56 	 * head refs, this may be a negative number because it is keeping
57 	 * track of the total mods done to the reference count.
58 	 * For individual refs, this will always be a positive number
59 	 *
60 	 * It may be more than one, since it is possible for a single
61 	 * parent to have more than one ref on an extent
62 	 */
63 	int ref_mod;
64 
65 	unsigned int action:8;
66 	unsigned int type:8;
67 };
68 
69 struct btrfs_delayed_extent_op {
70 	struct btrfs_disk_key key;
71 	u8 level;
72 	bool update_key;
73 	bool update_flags;
74 	u64 flags_to_set;
75 };
76 
77 /*
78  * the head refs are used to hold a lock on a given extent, which allows us
79  * to make sure that only one process is running the delayed refs
80  * at a time for a single extent.  They also store the sum of all the
81  * reference count modifications we've queued up.
82  */
83 struct btrfs_delayed_ref_head {
84 	u64 bytenr;
85 	u64 num_bytes;
86 	/*
87 	 * For insertion into struct btrfs_delayed_ref_root::href_root.
88 	 * Keep it in the same cache line as 'bytenr' for more efficient
89 	 * searches in the rbtree.
90 	 */
91 	struct rb_node href_node;
92 	/*
93 	 * the mutex is held while running the refs, and it is also
94 	 * held when checking the sum of reference modifications.
95 	 */
96 	struct mutex mutex;
97 
98 	refcount_t refs;
99 
100 	/* Protects 'ref_tree' and 'ref_add_list'. */
101 	spinlock_t lock;
102 	struct rb_root_cached ref_tree;
103 	/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
104 	struct list_head ref_add_list;
105 
106 	struct btrfs_delayed_extent_op *extent_op;
107 
108 	/*
109 	 * This is used to track the final ref_mod from all the refs associated
110 	 * with this head ref, this is not adjusted as delayed refs are run,
111 	 * this is meant to track if we need to do the csum accounting or not.
112 	 */
113 	int total_ref_mod;
114 
115 	/*
116 	 * This is the current outstanding mod references for this bytenr.  This
117 	 * is used with lookup_extent_info to get an accurate reference count
118 	 * for a bytenr, so it is adjusted as delayed refs are run so that any
119 	 * on disk reference count + ref_mod is accurate.
120 	 */
121 	int ref_mod;
122 
123 	/*
124 	 * The root that triggered the allocation when must_insert_reserved is
125 	 * set to true.
126 	 */
127 	u64 owning_root;
128 
129 	/*
130 	 * Track reserved bytes when setting must_insert_reserved.  On success
131 	 * or cleanup, we will need to free the reservation.
132 	 */
133 	u64 reserved_bytes;
134 
135 	/*
136 	 * when a new extent is allocated, it is just reserved in memory
137 	 * The actual extent isn't inserted into the extent allocation tree
138 	 * until the delayed ref is processed.  must_insert_reserved is
139 	 * used to flag a delayed ref so the accounting can be updated
140 	 * when a full insert is done.
141 	 *
142 	 * It is possible the extent will be freed before it is ever
143 	 * inserted into the extent allocation tree.  In this case
144 	 * we need to update the in ram accounting to properly reflect
145 	 * the free has happened.
146 	 */
147 	bool must_insert_reserved;
148 
149 	bool is_data;
150 	bool is_system;
151 	bool processing;
152 };
153 
154 struct btrfs_delayed_tree_ref {
155 	struct btrfs_delayed_ref_node node;
156 	u64 root;
157 	u64 parent;
158 	int level;
159 };
160 
161 struct btrfs_delayed_data_ref {
162 	struct btrfs_delayed_ref_node node;
163 	u64 root;
164 	u64 parent;
165 	u64 objectid;
166 	u64 offset;
167 };
168 
169 enum btrfs_delayed_ref_flags {
170 	/* Indicate that we are flushing delayed refs for the commit */
171 	BTRFS_DELAYED_REFS_FLUSHING,
172 };
173 
174 struct btrfs_delayed_ref_root {
175 	/* head ref rbtree */
176 	struct rb_root_cached href_root;
177 
178 	/* dirty extent records */
179 	struct rb_root dirty_extent_root;
180 
181 	/* this spin lock protects the rbtree and the entries inside */
182 	spinlock_t lock;
183 
184 	/* how many delayed ref updates we've queued, used by the
185 	 * throttling code
186 	 */
187 	atomic_t num_entries;
188 
189 	/* total number of head nodes in tree */
190 	unsigned long num_heads;
191 
192 	/* total number of head nodes ready for processing */
193 	unsigned long num_heads_ready;
194 
195 	u64 pending_csums;
196 
197 	unsigned long flags;
198 
199 	u64 run_delayed_start;
200 
201 	/*
202 	 * To make qgroup to skip given root.
203 	 * This is for snapshot, as btrfs_qgroup_inherit() will manually
204 	 * modify counters for snapshot and its source, so we should skip
205 	 * the snapshot in new_root/old_roots or it will get calculated twice
206 	 */
207 	u64 qgroup_to_skip;
208 };
209 
210 enum btrfs_ref_type {
211 	BTRFS_REF_NOT_SET,
212 	BTRFS_REF_DATA,
213 	BTRFS_REF_METADATA,
214 	BTRFS_REF_LAST,
215 } __packed;
216 
217 struct btrfs_data_ref {
218 	/* For EXTENT_DATA_REF */
219 
220 	/* Root which owns this data reference. */
221 	u64 ref_root;
222 
223 	/* Inode which refers to this data extent */
224 	u64 ino;
225 
226 	/*
227 	 * file_offset - extent_offset
228 	 *
229 	 * file_offset is the key.offset of the EXTENT_DATA key.
230 	 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
231 	 */
232 	u64 offset;
233 };
234 
235 struct btrfs_tree_ref {
236 	/*
237 	 * Level of this tree block
238 	 *
239 	 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
240 	 */
241 	int level;
242 
243 	/*
244 	 * Root which owns this tree block reference.
245 	 *
246 	 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
247 	 */
248 	u64 ref_root;
249 
250 	/* For non-skinny metadata, no special member needed */
251 };
252 
253 struct btrfs_ref {
254 	enum btrfs_ref_type type;
255 	enum btrfs_delayed_ref_action action;
256 
257 	/*
258 	 * Whether this extent should go through qgroup record.
259 	 *
260 	 * Normally false, but for certain cases like delayed subtree scan,
261 	 * setting this flag can hugely reduce qgroup overhead.
262 	 */
263 	bool skip_qgroup;
264 
265 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
266 	/* Through which root is this modification. */
267 	u64 real_root;
268 #endif
269 	u64 bytenr;
270 	u64 len;
271 	u64 owning_root;
272 
273 	/* Bytenr of the parent tree block */
274 	u64 parent;
275 	union {
276 		struct btrfs_data_ref data_ref;
277 		struct btrfs_tree_ref tree_ref;
278 	};
279 };
280 
281 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
282 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
283 extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
284 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
285 
286 int __init btrfs_delayed_ref_init(void);
287 void __cold btrfs_delayed_ref_exit(void);
288 
289 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info,
290 					       int num_delayed_refs)
291 {
292 	u64 num_bytes;
293 
294 	num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
295 
296 	/*
297 	 * We have to check the mount option here because we could be enabling
298 	 * the free space tree for the first time and don't have the compat_ro
299 	 * option set yet.
300 	 *
301 	 * We need extra reservations if we have the free space tree because
302 	 * we'll have to modify that tree as well.
303 	 */
304 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
305 		num_bytes *= 2;
306 
307 	return num_bytes;
308 }
309 
310 static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
311 						    int num_csum_items)
312 {
313 	/*
314 	 * Deleting csum items does not result in new nodes/leaves and does not
315 	 * require changing the free space tree, only the csum tree, so this is
316 	 * all we need.
317 	 */
318 	return btrfs_calc_metadata_size(fs_info, num_csum_items);
319 }
320 
321 void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, int action, u64 bytenr,
322 			    u64 len, u64 parent, u64 owning_root);
323 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
324 			 u64 mod_root, bool skip_qgroup);
325 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ref_root, u64 ino,
326 			 u64 offset, u64 mod_root, bool skip_qgroup);
327 
328 static inline struct btrfs_delayed_extent_op *
329 btrfs_alloc_delayed_extent_op(void)
330 {
331 	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
332 }
333 
334 static inline void
335 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
336 {
337 	if (op)
338 		kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
339 }
340 
341 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref);
342 
343 static inline u64 btrfs_ref_head_to_space_flags(
344 				struct btrfs_delayed_ref_head *head_ref)
345 {
346 	if (head_ref->is_data)
347 		return BTRFS_BLOCK_GROUP_DATA;
348 	else if (head_ref->is_system)
349 		return BTRFS_BLOCK_GROUP_SYSTEM;
350 	return BTRFS_BLOCK_GROUP_METADATA;
351 }
352 
353 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
354 {
355 	if (refcount_dec_and_test(&head->refs))
356 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
357 }
358 
359 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
360 			       struct btrfs_ref *generic_ref,
361 			       struct btrfs_delayed_extent_op *extent_op);
362 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
363 			       struct btrfs_ref *generic_ref,
364 			       u64 reserved);
365 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
366 				u64 bytenr, u64 num_bytes,
367 				struct btrfs_delayed_extent_op *extent_op);
368 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
369 			      struct btrfs_delayed_ref_root *delayed_refs,
370 			      struct btrfs_delayed_ref_head *head);
371 
372 struct btrfs_delayed_ref_head *
373 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
374 			    u64 bytenr);
375 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
376 			   struct btrfs_delayed_ref_head *head);
377 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
378 {
379 	mutex_unlock(&head->mutex);
380 }
381 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
382 			   struct btrfs_delayed_ref_head *head);
383 
384 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
385 		struct btrfs_delayed_ref_root *delayed_refs);
386 
387 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
388 
389 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
390 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
391 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
392 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
393 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
394 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
395 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
396 				  enum btrfs_reserve_flush_enum flush);
397 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
398 				       u64 num_bytes);
399 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
400 
401 /*
402  * helper functions to cast a node into its container
403  */
404 static inline struct btrfs_delayed_tree_ref *
405 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
406 {
407 	return container_of(node, struct btrfs_delayed_tree_ref, node);
408 }
409 
410 static inline struct btrfs_delayed_data_ref *
411 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
412 {
413 	return container_of(node, struct btrfs_delayed_data_ref, node);
414 }
415 
416 #endif
417