1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #ifndef BTRFS_DELAYED_REF_H
7 #define BTRFS_DELAYED_REF_H
8
9 #include <linux/types.h>
10 #include <linux/refcount.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/slab.h>
16 #include <uapi/linux/btrfs_tree.h>
17
18 struct btrfs_trans_handle;
19 struct btrfs_fs_info;
20
21 /* these are the possible values of struct btrfs_delayed_ref_node->action */
22 enum btrfs_delayed_ref_action {
23 /* Add one backref to the tree */
24 BTRFS_ADD_DELAYED_REF = 1,
25 /* Delete one backref from the tree */
26 BTRFS_DROP_DELAYED_REF,
27 /* Record a full extent allocation */
28 BTRFS_ADD_DELAYED_EXTENT,
29 /* Not changing ref count on head ref */
30 BTRFS_UPDATE_DELAYED_HEAD,
31 } __packed;
32
33 struct btrfs_data_ref {
34 /* For EXTENT_DATA_REF */
35
36 /* Inode which refers to this data extent */
37 u64 objectid;
38
39 /*
40 * file_offset - extent_offset
41 *
42 * file_offset is the key.offset of the EXTENT_DATA key.
43 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
44 */
45 u64 offset;
46 };
47
48 struct btrfs_tree_ref {
49 /*
50 * Level of this tree block.
51 *
52 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
53 */
54 int level;
55
56 /* For non-skinny metadata, no special member needed */
57 };
58
59 struct btrfs_delayed_ref_node {
60 struct rb_node ref_node;
61 /*
62 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
63 * ref_head->ref_add_list, then we do not need to iterate the
64 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
65 */
66 struct list_head add_list;
67
68 /* the starting bytenr of the extent */
69 u64 bytenr;
70
71 /* the size of the extent */
72 u64 num_bytes;
73
74 /* seq number to keep track of insertion order */
75 u64 seq;
76
77 /* The ref_root for this ref */
78 u64 ref_root;
79
80 /*
81 * The parent for this ref, if this isn't set the ref_root is the
82 * reference owner.
83 */
84 u64 parent;
85
86 /* ref count on this data structure */
87 refcount_t refs;
88
89 /*
90 * how many refs is this entry adding or deleting. For
91 * head refs, this may be a negative number because it is keeping
92 * track of the total mods done to the reference count.
93 * For individual refs, this will always be a positive number
94 *
95 * It may be more than one, since it is possible for a single
96 * parent to have more than one ref on an extent
97 */
98 int ref_mod;
99
100 unsigned int action:8;
101 unsigned int type:8;
102
103 union {
104 struct btrfs_tree_ref tree_ref;
105 struct btrfs_data_ref data_ref;
106 };
107 };
108
109 struct btrfs_delayed_extent_op {
110 struct btrfs_disk_key key;
111 bool update_key;
112 bool update_flags;
113 u64 flags_to_set;
114 };
115
116 /*
117 * the head refs are used to hold a lock on a given extent, which allows us
118 * to make sure that only one process is running the delayed refs
119 * at a time for a single extent. They also store the sum of all the
120 * reference count modifications we've queued up.
121 */
122 struct btrfs_delayed_ref_head {
123 u64 bytenr;
124 u64 num_bytes;
125 /*
126 * For insertion into struct btrfs_delayed_ref_root::href_root.
127 * Keep it in the same cache line as 'bytenr' for more efficient
128 * searches in the rbtree.
129 */
130 struct rb_node href_node;
131 /*
132 * the mutex is held while running the refs, and it is also
133 * held when checking the sum of reference modifications.
134 */
135 struct mutex mutex;
136
137 refcount_t refs;
138
139 /* Protects 'ref_tree' and 'ref_add_list'. */
140 spinlock_t lock;
141 struct rb_root_cached ref_tree;
142 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
143 struct list_head ref_add_list;
144
145 struct btrfs_delayed_extent_op *extent_op;
146
147 /*
148 * This is used to track the final ref_mod from all the refs associated
149 * with this head ref, this is not adjusted as delayed refs are run,
150 * this is meant to track if we need to do the csum accounting or not.
151 */
152 int total_ref_mod;
153
154 /*
155 * This is the current outstanding mod references for this bytenr. This
156 * is used with lookup_extent_info to get an accurate reference count
157 * for a bytenr, so it is adjusted as delayed refs are run so that any
158 * on disk reference count + ref_mod is accurate.
159 */
160 int ref_mod;
161
162 /*
163 * The root that triggered the allocation when must_insert_reserved is
164 * set to true.
165 */
166 u64 owning_root;
167
168 /*
169 * Track reserved bytes when setting must_insert_reserved. On success
170 * or cleanup, we will need to free the reservation.
171 */
172 u64 reserved_bytes;
173
174 /* Tree block level, for metadata only. */
175 u8 level;
176
177 /*
178 * when a new extent is allocated, it is just reserved in memory
179 * The actual extent isn't inserted into the extent allocation tree
180 * until the delayed ref is processed. must_insert_reserved is
181 * used to flag a delayed ref so the accounting can be updated
182 * when a full insert is done.
183 *
184 * It is possible the extent will be freed before it is ever
185 * inserted into the extent allocation tree. In this case
186 * we need to update the in ram accounting to properly reflect
187 * the free has happened.
188 */
189 bool must_insert_reserved;
190
191 bool is_data;
192 bool is_system;
193 bool processing;
194 };
195
196 enum btrfs_delayed_ref_flags {
197 /* Indicate that we are flushing delayed refs for the commit */
198 BTRFS_DELAYED_REFS_FLUSHING,
199 };
200
201 struct btrfs_delayed_ref_root {
202 /* head ref rbtree */
203 struct rb_root_cached href_root;
204
205 /*
206 * Track dirty extent records.
207 * The keys correspond to the logical address of the extent ("bytenr")
208 * right shifted by fs_info->sectorsize_bits. This is both to get a more
209 * dense index space (optimizes xarray structure) and because indexes in
210 * xarrays are of "unsigned long" type, meaning they are 32 bits wide on
211 * 32 bits platforms, limiting the extent range to 4G which is too low
212 * and makes it unusable (truncated index values) on 32 bits platforms.
213 */
214 struct xarray dirty_extents;
215
216 /* this spin lock protects the rbtree and the entries inside */
217 spinlock_t lock;
218
219 /* how many delayed ref updates we've queued, used by the
220 * throttling code
221 */
222 atomic_t num_entries;
223
224 /* total number of head nodes in tree */
225 unsigned long num_heads;
226
227 /* total number of head nodes ready for processing */
228 unsigned long num_heads_ready;
229
230 u64 pending_csums;
231
232 unsigned long flags;
233
234 u64 run_delayed_start;
235
236 /*
237 * To make qgroup to skip given root.
238 * This is for snapshot, as btrfs_qgroup_inherit() will manually
239 * modify counters for snapshot and its source, so we should skip
240 * the snapshot in new_root/old_roots or it will get calculated twice
241 */
242 u64 qgroup_to_skip;
243 };
244
245 enum btrfs_ref_type {
246 BTRFS_REF_NOT_SET,
247 BTRFS_REF_DATA,
248 BTRFS_REF_METADATA,
249 BTRFS_REF_LAST,
250 } __packed;
251
252 struct btrfs_ref {
253 enum btrfs_ref_type type;
254 enum btrfs_delayed_ref_action action;
255
256 /*
257 * Whether this extent should go through qgroup record.
258 *
259 * Normally false, but for certain cases like delayed subtree scan,
260 * setting this flag can hugely reduce qgroup overhead.
261 */
262 bool skip_qgroup;
263
264 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
265 /* Through which root is this modification. */
266 u64 real_root;
267 #endif
268 u64 bytenr;
269 u64 num_bytes;
270 u64 owning_root;
271
272 /*
273 * The root that owns the reference for this reference, this will be set
274 * or ->parent will be set, depending on what type of reference this is.
275 */
276 u64 ref_root;
277
278 /* Bytenr of the parent tree block */
279 u64 parent;
280 union {
281 struct btrfs_data_ref data_ref;
282 struct btrfs_tree_ref tree_ref;
283 };
284 };
285
286 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
287 extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
288 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
289
290 int __init btrfs_delayed_ref_init(void);
291 void __cold btrfs_delayed_ref_exit(void);
292
btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info * fs_info,int num_delayed_refs)293 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info,
294 int num_delayed_refs)
295 {
296 u64 num_bytes;
297
298 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
299
300 /*
301 * We have to check the mount option here because we could be enabling
302 * the free space tree for the first time and don't have the compat_ro
303 * option set yet.
304 *
305 * We need extra reservations if we have the free space tree because
306 * we'll have to modify that tree as well.
307 */
308 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
309 num_bytes *= 2;
310
311 return num_bytes;
312 }
313
btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info * fs_info,int num_csum_items)314 static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
315 int num_csum_items)
316 {
317 /*
318 * Deleting csum items does not result in new nodes/leaves and does not
319 * require changing the free space tree, only the csum tree, so this is
320 * all we need.
321 */
322 return btrfs_calc_metadata_size(fs_info, num_csum_items);
323 }
324
325 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
326 bool skip_qgroup);
327 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
328 u64 mod_root, bool skip_qgroup);
329
330 static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)331 btrfs_alloc_delayed_extent_op(void)
332 {
333 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
334 }
335
336 static inline void
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op * op)337 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
338 {
339 if (op)
340 kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
341 }
342
343 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref);
344
btrfs_ref_head_to_space_flags(struct btrfs_delayed_ref_head * head_ref)345 static inline u64 btrfs_ref_head_to_space_flags(
346 struct btrfs_delayed_ref_head *head_ref)
347 {
348 if (head_ref->is_data)
349 return BTRFS_BLOCK_GROUP_DATA;
350 else if (head_ref->is_system)
351 return BTRFS_BLOCK_GROUP_SYSTEM;
352 return BTRFS_BLOCK_GROUP_METADATA;
353 }
354
btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head * head)355 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
356 {
357 if (refcount_dec_and_test(&head->refs))
358 kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
359 }
360
361 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
362 struct btrfs_ref *generic_ref,
363 struct btrfs_delayed_extent_op *extent_op);
364 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
365 struct btrfs_ref *generic_ref,
366 u64 reserved);
367 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
368 u64 bytenr, u64 num_bytes, u8 level,
369 struct btrfs_delayed_extent_op *extent_op);
370 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
371 struct btrfs_delayed_ref_root *delayed_refs,
372 struct btrfs_delayed_ref_head *head);
373
374 struct btrfs_delayed_ref_head *
375 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
376 u64 bytenr);
377 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
378 struct btrfs_delayed_ref_head *head);
btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head * head)379 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
380 {
381 mutex_unlock(&head->mutex);
382 }
383 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
384 struct btrfs_delayed_ref_head *head);
385
386 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
387 struct btrfs_delayed_ref_root *delayed_refs);
388
389 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
390
391 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
392 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
393 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
394 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
395 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
396 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
397 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
398 enum btrfs_reserve_flush_enum flush);
399 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
400 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
401 u64 root, u64 parent);
402
btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node * node)403 static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
404 {
405 if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
406 node->type == BTRFS_SHARED_DATA_REF_KEY)
407 return node->data_ref.objectid;
408 return node->tree_ref.level;
409 }
410
btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node * node)411 static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node)
412 {
413 if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
414 node->type == BTRFS_SHARED_DATA_REF_KEY)
415 return node->data_ref.offset;
416 return 0;
417 }
418
btrfs_ref_type(struct btrfs_ref * ref)419 static inline u8 btrfs_ref_type(struct btrfs_ref *ref)
420 {
421 ASSERT(ref->type == BTRFS_REF_DATA || ref->type == BTRFS_REF_METADATA);
422
423 if (ref->type == BTRFS_REF_DATA) {
424 if (ref->parent)
425 return BTRFS_SHARED_DATA_REF_KEY;
426 else
427 return BTRFS_EXTENT_DATA_REF_KEY;
428 } else {
429 if (ref->parent)
430 return BTRFS_SHARED_BLOCK_REF_KEY;
431 else
432 return BTRFS_TREE_BLOCK_REF_KEY;
433 }
434
435 return 0;
436 }
437
438 #endif
439