xref: /linux/fs/btrfs/delayed-ref.c (revision 0eb4aaa230d725fa9b1cd758c0f17abca5597af6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "extent-tree.h"
13 #include "transaction.h"
14 #include "qgroup.h"
15 #include "space-info.h"
16 #include "tree-mod-log.h"
17 #include "fs.h"
18 
19 struct kmem_cache *btrfs_delayed_ref_head_cachep;
20 struct kmem_cache *btrfs_delayed_ref_node_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23  * delayed back reference update tracking.  For subvolume trees
24  * we queue up extent allocations and backref maintenance for
25  * delayed processing.   This avoids deep call chains where we
26  * add extents in the middle of btrfs_search_slot, and it allows
27  * us to buffer up frequently modified backrefs in an rb tree instead
28  * of hammering updates on the extent allocation tree.
29  */
30 
btrfs_check_space_for_delayed_refs(struct btrfs_fs_info * fs_info)31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 	bool ret = false;
36 	u64 reserved;
37 
38 	spin_lock(&global_rsv->lock);
39 	reserved = global_rsv->reserved;
40 	spin_unlock(&global_rsv->lock);
41 
42 	/*
43 	 * Since the global reserve is just kind of magic we don't really want
44 	 * to rely on it to save our bacon, so if our size is more than the
45 	 * delayed_refs_rsv and the global rsv then it's time to think about
46 	 * bailing.
47 	 */
48 	spin_lock(&delayed_refs_rsv->lock);
49 	reserved += delayed_refs_rsv->reserved;
50 	if (delayed_refs_rsv->size >= reserved)
51 		ret = true;
52 	spin_unlock(&delayed_refs_rsv->lock);
53 	return ret;
54 }
55 
56 /*
57  * Release a ref head's reservation.
58  *
59  * @fs_info:  the filesystem
60  * @nr_refs:  number of delayed refs to drop
61  * @nr_csums: number of csum items to drop
62  *
63  * Drops the delayed ref head's count from the delayed refs rsv and free any
64  * excess reservation we had.
65  */
btrfs_delayed_refs_rsv_release(struct btrfs_fs_info * fs_info,int nr_refs,int nr_csums)66 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67 {
68 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 	u64 num_bytes;
70 	u64 released;
71 
72 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74 
75 	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 	if (released)
77 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 					      0, released, 0);
79 }
80 
81 /*
82  * Adjust the size of the delayed refs rsv.
83  *
84  * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85  * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86  * add it to the delayed_refs_rsv.
87  */
btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle * trans)88 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89 {
90 	struct btrfs_fs_info *fs_info = trans->fs_info;
91 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 	struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 	u64 num_bytes;
94 	u64 reserved_bytes;
95 
96 	if (btrfs_is_testing(fs_info))
97 		return;
98 
99 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
100 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
101 						       trans->delayed_ref_csum_deletions);
102 
103 	if (num_bytes == 0)
104 		return;
105 
106 	/*
107 	 * Try to take num_bytes from the transaction's local delayed reserve.
108 	 * If not possible, try to take as much as it's available. If the local
109 	 * reserve doesn't have enough reserved space, the delayed refs reserve
110 	 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
111 	 * by someone or if a transaction commit is triggered before that, the
112 	 * global block reserve will be used. We want to minimize using the
113 	 * global block reserve for cases we can account for in advance, to
114 	 * avoid exhausting it and reach -ENOSPC during a transaction commit.
115 	 */
116 	spin_lock(&local_rsv->lock);
117 	reserved_bytes = min(num_bytes, local_rsv->reserved);
118 	local_rsv->reserved -= reserved_bytes;
119 	local_rsv->full = (local_rsv->reserved >= local_rsv->size);
120 	spin_unlock(&local_rsv->lock);
121 
122 	spin_lock(&delayed_rsv->lock);
123 	delayed_rsv->size += num_bytes;
124 	delayed_rsv->reserved += reserved_bytes;
125 	delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
126 	spin_unlock(&delayed_rsv->lock);
127 	trans->delayed_ref_updates = 0;
128 	trans->delayed_ref_csum_deletions = 0;
129 }
130 
131 /*
132  * Adjust the size of the delayed refs block reserve for 1 block group item
133  * insertion, used after allocating a block group.
134  */
btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)135 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
136 {
137 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
138 
139 	spin_lock(&delayed_rsv->lock);
140 	/*
141 	 * Inserting a block group item does not require changing the free space
142 	 * tree, only the extent tree or the block group tree, so this is all we
143 	 * need.
144 	 */
145 	delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
146 	delayed_rsv->full = false;
147 	spin_unlock(&delayed_rsv->lock);
148 }
149 
150 /*
151  * Adjust the size of the delayed refs block reserve to release space for 1
152  * block group item insertion.
153  */
btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)154 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
155 {
156 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
157 	const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
158 	u64 released;
159 
160 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
161 	if (released > 0)
162 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
163 					      0, released, 0);
164 }
165 
166 /*
167  * Adjust the size of the delayed refs block reserve for 1 block group item
168  * update.
169  */
btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)170 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
171 {
172 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
173 
174 	spin_lock(&delayed_rsv->lock);
175 	/*
176 	 * Updating a block group item does not result in new nodes/leaves and
177 	 * does not require changing the free space tree, only the extent tree
178 	 * or the block group tree, so this is all we need.
179 	 */
180 	delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
181 	delayed_rsv->full = false;
182 	spin_unlock(&delayed_rsv->lock);
183 }
184 
185 /*
186  * Adjust the size of the delayed refs block reserve to release space for 1
187  * block group item update.
188  */
btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)189 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
190 {
191 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
192 	const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
193 	u64 released;
194 
195 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
196 	if (released > 0)
197 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
198 					      0, released, 0);
199 }
200 
201 /*
202  * Refill based on our delayed refs usage.
203  *
204  * @fs_info: the filesystem
205  * @flush:   control how we can flush for this reservation.
206  *
207  * This will refill the delayed block_rsv up to 1 items size worth of space and
208  * will return -ENOSPC if we can't make the reservation.
209  */
btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush)210 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
211 				  enum btrfs_reserve_flush_enum flush)
212 {
213 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
214 	struct btrfs_space_info *space_info = block_rsv->space_info;
215 	u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
216 	u64 num_bytes = 0;
217 	u64 refilled_bytes;
218 	u64 to_free;
219 	int ret = -ENOSPC;
220 
221 	spin_lock(&block_rsv->lock);
222 	if (block_rsv->reserved < block_rsv->size) {
223 		num_bytes = block_rsv->size - block_rsv->reserved;
224 		num_bytes = min(num_bytes, limit);
225 	}
226 	spin_unlock(&block_rsv->lock);
227 
228 	if (!num_bytes)
229 		return 0;
230 
231 	ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
232 	if (ret)
233 		return ret;
234 
235 	/*
236 	 * We may have raced with someone else, so check again if we the block
237 	 * reserve is still not full and release any excess space.
238 	 */
239 	spin_lock(&block_rsv->lock);
240 	if (block_rsv->reserved < block_rsv->size) {
241 		u64 needed = block_rsv->size - block_rsv->reserved;
242 
243 		if (num_bytes >= needed) {
244 			block_rsv->reserved += needed;
245 			block_rsv->full = true;
246 			to_free = num_bytes - needed;
247 			refilled_bytes = needed;
248 		} else {
249 			block_rsv->reserved += num_bytes;
250 			to_free = 0;
251 			refilled_bytes = num_bytes;
252 		}
253 	} else {
254 		to_free = num_bytes;
255 		refilled_bytes = 0;
256 	}
257 	spin_unlock(&block_rsv->lock);
258 
259 	if (to_free > 0)
260 		btrfs_space_info_free_bytes_may_use(space_info, to_free);
261 
262 	if (refilled_bytes > 0)
263 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
264 					      refilled_bytes, 1);
265 	return 0;
266 }
267 
268 /*
269  * compare two delayed data backrefs with same bytenr and type
270  */
comp_data_refs(const struct btrfs_delayed_ref_node * ref1,const struct btrfs_delayed_ref_node * ref2)271 static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
272 			  const struct btrfs_delayed_ref_node *ref2)
273 {
274 	if (ref1->data_ref.objectid < ref2->data_ref.objectid)
275 		return -1;
276 	if (ref1->data_ref.objectid > ref2->data_ref.objectid)
277 		return 1;
278 	if (ref1->data_ref.offset < ref2->data_ref.offset)
279 		return -1;
280 	if (ref1->data_ref.offset > ref2->data_ref.offset)
281 		return 1;
282 	return 0;
283 }
284 
comp_refs(const struct btrfs_delayed_ref_node * ref1,const struct btrfs_delayed_ref_node * ref2,bool check_seq)285 static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
286 		     const struct btrfs_delayed_ref_node *ref2,
287 		     bool check_seq)
288 {
289 	int ret = 0;
290 
291 	if (ref1->type < ref2->type)
292 		return -1;
293 	if (ref1->type > ref2->type)
294 		return 1;
295 	if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
296 	    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
297 		if (ref1->parent < ref2->parent)
298 			return -1;
299 		if (ref1->parent > ref2->parent)
300 			return 1;
301 	} else {
302 		if (ref1->ref_root < ref2->ref_root)
303 			return -1;
304 		if (ref1->ref_root > ref2->ref_root)
305 			return 1;
306 		if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
307 			ret = comp_data_refs(ref1, ref2);
308 	}
309 	if (ret)
310 		return ret;
311 	if (check_seq) {
312 		if (ref1->seq < ref2->seq)
313 			return -1;
314 		if (ref1->seq > ref2->seq)
315 			return 1;
316 	}
317 	return 0;
318 }
319 
cmp_refs_node(const struct rb_node * new,const struct rb_node * exist)320 static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
321 {
322 	const struct btrfs_delayed_ref_node *new_node =
323 		rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
324 	const struct btrfs_delayed_ref_node *exist_node =
325 		rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
326 
327 	return comp_refs(new_node, exist_node, true);
328 }
329 
tree_insert(struct rb_root_cached * root,struct btrfs_delayed_ref_node * ins)330 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
331 		struct btrfs_delayed_ref_node *ins)
332 {
333 	struct rb_node *node = &ins->ref_node;
334 	struct rb_node *exist;
335 
336 	exist = rb_find_add_cached(node, root, cmp_refs_node);
337 	if (exist)
338 		return rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
339 	return NULL;
340 }
341 
find_first_ref_head(struct btrfs_delayed_ref_root * dr)342 static struct btrfs_delayed_ref_head *find_first_ref_head(
343 		struct btrfs_delayed_ref_root *dr)
344 {
345 	unsigned long from = 0;
346 
347 	lockdep_assert_held(&dr->lock);
348 
349 	return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
350 }
351 
btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)352 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
353 				   struct btrfs_delayed_ref_head *head)
354 {
355 	lockdep_assert_held(&delayed_refs->lock);
356 	if (mutex_trylock(&head->mutex))
357 		return true;
358 
359 	refcount_inc(&head->refs);
360 	spin_unlock(&delayed_refs->lock);
361 
362 	mutex_lock(&head->mutex);
363 	spin_lock(&delayed_refs->lock);
364 	if (!head->tracked) {
365 		mutex_unlock(&head->mutex);
366 		btrfs_put_delayed_ref_head(head);
367 		return false;
368 	}
369 	btrfs_put_delayed_ref_head(head);
370 	return true;
371 }
372 
drop_delayed_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)373 static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
374 				    struct btrfs_delayed_ref_root *delayed_refs,
375 				    struct btrfs_delayed_ref_head *head,
376 				    struct btrfs_delayed_ref_node *ref)
377 {
378 	lockdep_assert_held(&head->lock);
379 	rb_erase_cached(&ref->ref_node, &head->ref_tree);
380 	RB_CLEAR_NODE(&ref->ref_node);
381 	if (!list_empty(&ref->add_list))
382 		list_del(&ref->add_list);
383 	btrfs_put_delayed_ref(ref);
384 	btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
385 }
386 
merge_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)387 static bool merge_ref(struct btrfs_fs_info *fs_info,
388 		      struct btrfs_delayed_ref_root *delayed_refs,
389 		      struct btrfs_delayed_ref_head *head,
390 		      struct btrfs_delayed_ref_node *ref,
391 		      u64 seq)
392 {
393 	struct btrfs_delayed_ref_node *next;
394 	struct rb_node *node = rb_next(&ref->ref_node);
395 	bool done = false;
396 
397 	while (!done && node) {
398 		int mod;
399 
400 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
401 		node = rb_next(node);
402 		if (seq && next->seq >= seq)
403 			break;
404 		if (comp_refs(ref, next, false))
405 			break;
406 
407 		if (ref->action == next->action) {
408 			mod = next->ref_mod;
409 		} else {
410 			if (ref->ref_mod < next->ref_mod) {
411 				swap(ref, next);
412 				done = true;
413 			}
414 			mod = -next->ref_mod;
415 		}
416 
417 		drop_delayed_ref(fs_info, delayed_refs, head, next);
418 		ref->ref_mod += mod;
419 		if (ref->ref_mod == 0) {
420 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
421 			done = true;
422 		} else {
423 			/*
424 			 * Can't have multiples of the same ref on a tree block.
425 			 */
426 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
427 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
428 		}
429 	}
430 
431 	return done;
432 }
433 
btrfs_merge_delayed_refs(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)434 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
435 			      struct btrfs_delayed_ref_root *delayed_refs,
436 			      struct btrfs_delayed_ref_head *head)
437 {
438 	struct btrfs_delayed_ref_node *ref;
439 	struct rb_node *node;
440 	u64 seq = 0;
441 
442 	lockdep_assert_held(&head->lock);
443 
444 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
445 		return;
446 
447 	/* We don't have too many refs to merge for data. */
448 	if (head->is_data)
449 		return;
450 
451 	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
452 again:
453 	for (node = rb_first_cached(&head->ref_tree); node;
454 	     node = rb_next(node)) {
455 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
456 		if (seq && ref->seq >= seq)
457 			continue;
458 		if (merge_ref(fs_info, delayed_refs, head, ref, seq))
459 			goto again;
460 	}
461 }
462 
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,u64 seq)463 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
464 {
465 	int ret = 0;
466 	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
467 
468 	if (min_seq != 0 && seq >= min_seq) {
469 		btrfs_debug(fs_info,
470 			    "holding back delayed_ref %llu, lowest is %llu",
471 			    seq, min_seq);
472 		ret = 1;
473 	}
474 
475 	return ret;
476 }
477 
btrfs_select_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs)478 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
479 		const struct btrfs_fs_info *fs_info,
480 		struct btrfs_delayed_ref_root *delayed_refs)
481 {
482 	struct btrfs_delayed_ref_head *head;
483 	unsigned long start_index;
484 	unsigned long found_index;
485 	bool found_head = false;
486 	bool locked;
487 
488 	spin_lock(&delayed_refs->lock);
489 again:
490 	start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
491 	xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
492 		if (!head->processing) {
493 			found_head = true;
494 			break;
495 		}
496 	}
497 	if (!found_head) {
498 		if (delayed_refs->run_delayed_start == 0) {
499 			spin_unlock(&delayed_refs->lock);
500 			return NULL;
501 		}
502 		delayed_refs->run_delayed_start = 0;
503 		goto again;
504 	}
505 
506 	head->processing = true;
507 	WARN_ON(delayed_refs->num_heads_ready == 0);
508 	delayed_refs->num_heads_ready--;
509 	delayed_refs->run_delayed_start = head->bytenr +
510 		head->num_bytes;
511 
512 	locked = btrfs_delayed_ref_lock(delayed_refs, head);
513 	spin_unlock(&delayed_refs->lock);
514 
515 	/*
516 	 * We may have dropped the spin lock to get the head mutex lock, and
517 	 * that might have given someone else time to free the head.  If that's
518 	 * true, it has been removed from our list and we can move on.
519 	 */
520 	if (!locked)
521 		return ERR_PTR(-EAGAIN);
522 
523 	return head;
524 }
525 
btrfs_unselect_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)526 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
527 			     struct btrfs_delayed_ref_head *head)
528 {
529 	spin_lock(&delayed_refs->lock);
530 	head->processing = false;
531 	delayed_refs->num_heads_ready++;
532 	spin_unlock(&delayed_refs->lock);
533 	btrfs_delayed_ref_unlock(head);
534 }
535 
btrfs_delete_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)536 void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
537 			   struct btrfs_delayed_ref_root *delayed_refs,
538 			   struct btrfs_delayed_ref_head *head)
539 {
540 	const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
541 
542 	lockdep_assert_held(&delayed_refs->lock);
543 	lockdep_assert_held(&head->lock);
544 
545 	xa_erase(&delayed_refs->head_refs, index);
546 	head->tracked = false;
547 	delayed_refs->num_heads--;
548 	if (!head->processing)
549 		delayed_refs->num_heads_ready--;
550 }
551 
btrfs_select_delayed_ref(struct btrfs_delayed_ref_head * head)552 struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
553 {
554 	struct btrfs_delayed_ref_node *ref;
555 
556 	lockdep_assert_held(&head->mutex);
557 	lockdep_assert_held(&head->lock);
558 
559 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
560 		return NULL;
561 
562 	/*
563 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
564 	 * This is to prevent a ref count from going down to zero, which deletes
565 	 * the extent item from the extent tree, when there still are references
566 	 * to add, which would fail because they would not find the extent item.
567 	 */
568 	if (!list_empty(&head->ref_add_list))
569 		return list_first_entry(&head->ref_add_list,
570 					struct btrfs_delayed_ref_node, add_list);
571 
572 	ref = rb_entry(rb_first_cached(&head->ref_tree),
573 		       struct btrfs_delayed_ref_node, ref_node);
574 	ASSERT(list_empty(&ref->add_list));
575 	return ref;
576 }
577 
578 /*
579  * Helper to insert the ref_node to the tail or merge with tail.
580  *
581  * Return false if the ref was inserted.
582  * Return true if the ref was merged into an existing one (and therefore can be
583  * freed by the caller).
584  */
insert_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)585 static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
586 			       struct btrfs_delayed_ref_head *href,
587 			       struct btrfs_delayed_ref_node *ref)
588 {
589 	struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
590 	struct btrfs_delayed_ref_node *exist;
591 	int mod;
592 
593 	spin_lock(&href->lock);
594 	exist = tree_insert(&href->ref_tree, ref);
595 	if (!exist) {
596 		if (ref->action == BTRFS_ADD_DELAYED_REF)
597 			list_add_tail(&ref->add_list, &href->ref_add_list);
598 		spin_unlock(&href->lock);
599 		trans->delayed_ref_updates++;
600 		return false;
601 	}
602 
603 	/* Now we are sure we can merge */
604 	if (exist->action == ref->action) {
605 		mod = ref->ref_mod;
606 	} else {
607 		/* Need to change action */
608 		if (exist->ref_mod < ref->ref_mod) {
609 			exist->action = ref->action;
610 			mod = -exist->ref_mod;
611 			exist->ref_mod = ref->ref_mod;
612 			if (ref->action == BTRFS_ADD_DELAYED_REF)
613 				list_add_tail(&exist->add_list,
614 					      &href->ref_add_list);
615 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
616 				ASSERT(!list_empty(&exist->add_list));
617 				list_del_init(&exist->add_list);
618 			} else {
619 				ASSERT(0);
620 			}
621 		} else
622 			mod = -ref->ref_mod;
623 	}
624 	exist->ref_mod += mod;
625 
626 	/* remove existing tail if its ref_mod is zero */
627 	if (exist->ref_mod == 0)
628 		drop_delayed_ref(trans->fs_info, root, href, exist);
629 	spin_unlock(&href->lock);
630 	return true;
631 }
632 
633 /*
634  * helper function to update the accounting in the head ref
635  * existing and update must have the same bytenr
636  */
update_existing_head_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * existing,struct btrfs_delayed_ref_head * update)637 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
638 			 struct btrfs_delayed_ref_head *existing,
639 			 struct btrfs_delayed_ref_head *update)
640 {
641 	struct btrfs_delayed_ref_root *delayed_refs =
642 		&trans->transaction->delayed_refs;
643 	struct btrfs_fs_info *fs_info = trans->fs_info;
644 	int old_ref_mod;
645 
646 	BUG_ON(existing->is_data != update->is_data);
647 
648 	spin_lock(&existing->lock);
649 
650 	/*
651 	 * When freeing an extent, we may not know the owning root when we
652 	 * first create the head_ref. However, some deref before the last deref
653 	 * will know it, so we just need to update the head_ref accordingly.
654 	 */
655 	if (!existing->owning_root)
656 		existing->owning_root = update->owning_root;
657 
658 	if (update->must_insert_reserved) {
659 		/* if the extent was freed and then
660 		 * reallocated before the delayed ref
661 		 * entries were processed, we can end up
662 		 * with an existing head ref without
663 		 * the must_insert_reserved flag set.
664 		 * Set it again here
665 		 */
666 		existing->must_insert_reserved = update->must_insert_reserved;
667 		existing->owning_root = update->owning_root;
668 
669 		/*
670 		 * update the num_bytes so we make sure the accounting
671 		 * is done correctly
672 		 */
673 		existing->num_bytes = update->num_bytes;
674 
675 	}
676 
677 	if (update->extent_op) {
678 		if (!existing->extent_op) {
679 			existing->extent_op = update->extent_op;
680 		} else {
681 			if (update->extent_op->update_key) {
682 				memcpy(&existing->extent_op->key,
683 				       &update->extent_op->key,
684 				       sizeof(update->extent_op->key));
685 				existing->extent_op->update_key = true;
686 			}
687 			if (update->extent_op->update_flags) {
688 				existing->extent_op->flags_to_set |=
689 					update->extent_op->flags_to_set;
690 				existing->extent_op->update_flags = true;
691 			}
692 			btrfs_free_delayed_extent_op(update->extent_op);
693 		}
694 	}
695 	/*
696 	 * update the reference mod on the head to reflect this new operation,
697 	 * only need the lock for this case cause we could be processing it
698 	 * currently, for refs we just added we know we're a-ok.
699 	 */
700 	old_ref_mod = existing->total_ref_mod;
701 	existing->ref_mod += update->ref_mod;
702 	existing->total_ref_mod += update->ref_mod;
703 
704 	/*
705 	 * If we are going to from a positive ref mod to a negative or vice
706 	 * versa we need to make sure to adjust pending_csums accordingly.
707 	 * We reserve bytes for csum deletion when adding or updating a ref head
708 	 * see add_delayed_ref_head() for more details.
709 	 */
710 	if (existing->is_data) {
711 		u64 csum_leaves =
712 			btrfs_csum_bytes_to_leaves(fs_info,
713 						   existing->num_bytes);
714 
715 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
716 			delayed_refs->pending_csums -= existing->num_bytes;
717 			btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
718 		}
719 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
720 			delayed_refs->pending_csums += existing->num_bytes;
721 			trans->delayed_ref_csum_deletions += csum_leaves;
722 		}
723 	}
724 
725 	spin_unlock(&existing->lock);
726 }
727 
init_delayed_ref_head(struct btrfs_delayed_ref_head * head_ref,struct btrfs_ref * generic_ref,struct btrfs_qgroup_extent_record * qrecord,u64 reserved)728 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
729 				  struct btrfs_ref *generic_ref,
730 				  struct btrfs_qgroup_extent_record *qrecord,
731 				  u64 reserved)
732 {
733 	int count_mod = 1;
734 	bool must_insert_reserved = false;
735 
736 	/* If reserved is provided, it must be a data extent. */
737 	BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
738 
739 	switch (generic_ref->action) {
740 	case BTRFS_ADD_DELAYED_REF:
741 		/* count_mod is already set to 1. */
742 		break;
743 	case BTRFS_UPDATE_DELAYED_HEAD:
744 		count_mod = 0;
745 		break;
746 	case BTRFS_DROP_DELAYED_REF:
747 		/*
748 		 * The head node stores the sum of all the mods, so dropping a ref
749 		 * should drop the sum in the head node by one.
750 		 */
751 		count_mod = -1;
752 		break;
753 	case BTRFS_ADD_DELAYED_EXTENT:
754 		/*
755 		 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
756 		 * reserved accounting when the extent is finally added, or if a
757 		 * later modification deletes the delayed ref without ever
758 		 * inserting the extent into the extent allocation tree.
759 		 * ref->must_insert_reserved is the flag used to record that
760 		 * accounting mods are required.
761 		 *
762 		 * Once we record must_insert_reserved, switch the action to
763 		 * BTRFS_ADD_DELAYED_REF because other special casing is not
764 		 * required.
765 		 */
766 		must_insert_reserved = true;
767 		break;
768 	}
769 
770 	refcount_set(&head_ref->refs, 1);
771 	head_ref->bytenr = generic_ref->bytenr;
772 	head_ref->num_bytes = generic_ref->num_bytes;
773 	head_ref->ref_mod = count_mod;
774 	head_ref->reserved_bytes = reserved;
775 	head_ref->must_insert_reserved = must_insert_reserved;
776 	head_ref->owning_root = generic_ref->owning_root;
777 	head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
778 	head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
779 	head_ref->ref_tree = RB_ROOT_CACHED;
780 	INIT_LIST_HEAD(&head_ref->ref_add_list);
781 	head_ref->tracked = false;
782 	head_ref->processing = false;
783 	head_ref->total_ref_mod = count_mod;
784 	spin_lock_init(&head_ref->lock);
785 	mutex_init(&head_ref->mutex);
786 
787 	/* If not metadata set an impossible level to help debugging. */
788 	if (generic_ref->type == BTRFS_REF_METADATA)
789 		head_ref->level = generic_ref->tree_ref.level;
790 	else
791 		head_ref->level = U8_MAX;
792 
793 	if (qrecord) {
794 		if (generic_ref->ref_root && reserved) {
795 			qrecord->data_rsv = reserved;
796 			qrecord->data_rsv_refroot = generic_ref->ref_root;
797 		}
798 		qrecord->num_bytes = generic_ref->num_bytes;
799 		qrecord->old_roots = NULL;
800 	}
801 }
802 
803 /*
804  * helper function to actually insert a head node into the rbtree.
805  * this does all the dirty work in terms of maintaining the correct
806  * overall modification count.
807  *
808  * Returns an error pointer in case of an error.
809  */
810 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,int action,bool * qrecord_inserted_ret)811 add_delayed_ref_head(struct btrfs_trans_handle *trans,
812 		     struct btrfs_delayed_ref_head *head_ref,
813 		     struct btrfs_qgroup_extent_record *qrecord,
814 		     int action, bool *qrecord_inserted_ret)
815 {
816 	struct btrfs_fs_info *fs_info = trans->fs_info;
817 	struct btrfs_delayed_ref_head *existing;
818 	struct btrfs_delayed_ref_root *delayed_refs;
819 	const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
820 	bool qrecord_inserted = false;
821 
822 	delayed_refs = &trans->transaction->delayed_refs;
823 	lockdep_assert_held(&delayed_refs->lock);
824 
825 #if BITS_PER_LONG == 32
826 	if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
827 		if (qrecord)
828 			xa_release(&delayed_refs->dirty_extents, index);
829 		btrfs_err_rl(fs_info,
830 "delayed ref head %llu is beyond 32bit page cache and xarray index limit",
831 			     head_ref->bytenr);
832 		btrfs_err_32bit_limit(fs_info);
833 		return ERR_PTR(-EOVERFLOW);
834 	}
835 #endif
836 
837 	/* Record qgroup extent info if provided */
838 	if (qrecord) {
839 		int ret;
840 
841 		ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
842 						       head_ref->bytenr);
843 		if (ret) {
844 			/* Clean up if insertion fails or item exists. */
845 			xa_release(&delayed_refs->dirty_extents, index);
846 			/* Caller responsible for freeing qrecord on error. */
847 			if (ret < 0)
848 				return ERR_PTR(ret);
849 			kfree(qrecord);
850 		} else {
851 			qrecord_inserted = true;
852 		}
853 	}
854 
855 	trace_add_delayed_ref_head(fs_info, head_ref, action);
856 
857 	existing = xa_load(&delayed_refs->head_refs, index);
858 	if (existing) {
859 		update_existing_head_ref(trans, existing, head_ref);
860 		/*
861 		 * we've updated the existing ref, free the newly
862 		 * allocated ref
863 		 */
864 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
865 		head_ref = existing;
866 	} else {
867 		existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
868 		if (xa_is_err(existing)) {
869 			/* Memory was preallocated by the caller. */
870 			ASSERT(xa_err(existing) != -ENOMEM);
871 			return ERR_PTR(xa_err(existing));
872 		} else if (WARN_ON(existing)) {
873 			/*
874 			 * Shouldn't happen we just did a lookup before under
875 			 * delayed_refs->lock.
876 			 */
877 			return ERR_PTR(-EEXIST);
878 		}
879 		head_ref->tracked = true;
880 		/*
881 		 * We reserve the amount of bytes needed to delete csums when
882 		 * adding the ref head and not when adding individual drop refs
883 		 * since the csum items are deleted only after running the last
884 		 * delayed drop ref (the data extent's ref count drops to 0).
885 		 */
886 		if (head_ref->is_data && head_ref->ref_mod < 0) {
887 			delayed_refs->pending_csums += head_ref->num_bytes;
888 			trans->delayed_ref_csum_deletions +=
889 				btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
890 		}
891 		delayed_refs->num_heads++;
892 		delayed_refs->num_heads_ready++;
893 	}
894 	if (qrecord_inserted_ret)
895 		*qrecord_inserted_ret = qrecord_inserted;
896 
897 	return head_ref;
898 }
899 
900 /*
901  * Initialize the structure which represents a modification to a an extent.
902  *
903  * @fs_info:    Internal to the mounted filesystem mount structure.
904  *
905  * @ref:	The structure which is going to be initialized.
906  *
907  * @bytenr:	The logical address of the extent for which a modification is
908  *		going to be recorded.
909  *
910  * @num_bytes:  Size of the extent whose modification is being recorded.
911  *
912  * @ref_root:	The id of the root where this modification has originated, this
913  *		can be either one of the well-known metadata trees or the
914  *		subvolume id which references this extent.
915  *
916  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
917  *		BTRFS_ADD_DELAYED_EXTENT
918  *
919  * @ref_type:	Holds the type of the extent which is being recorded, can be
920  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
921  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
922  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
923  */
init_delayed_ref_common(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_node * ref,struct btrfs_ref * generic_ref)924 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
925 				    struct btrfs_delayed_ref_node *ref,
926 				    struct btrfs_ref *generic_ref)
927 {
928 	int action = generic_ref->action;
929 	u64 seq = 0;
930 
931 	if (action == BTRFS_ADD_DELAYED_EXTENT)
932 		action = BTRFS_ADD_DELAYED_REF;
933 
934 	if (is_fstree(generic_ref->ref_root))
935 		seq = atomic64_read(&fs_info->tree_mod_seq);
936 
937 	refcount_set(&ref->refs, 1);
938 	ref->bytenr = generic_ref->bytenr;
939 	ref->num_bytes = generic_ref->num_bytes;
940 	ref->ref_mod = 1;
941 	ref->action = action;
942 	ref->seq = seq;
943 	ref->type = btrfs_ref_type(generic_ref);
944 	ref->ref_root = generic_ref->ref_root;
945 	ref->parent = generic_ref->parent;
946 	RB_CLEAR_NODE(&ref->ref_node);
947 	INIT_LIST_HEAD(&ref->add_list);
948 
949 	if (generic_ref->type == BTRFS_REF_DATA)
950 		ref->data_ref = generic_ref->data_ref;
951 	else
952 		ref->tree_ref = generic_ref->tree_ref;
953 }
954 
btrfs_init_tree_ref(struct btrfs_ref * generic_ref,int level,u64 mod_root,bool skip_qgroup)955 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
956 			 bool skip_qgroup)
957 {
958 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
959 	/* If @real_root not set, use @root as fallback */
960 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
961 #endif
962 	generic_ref->tree_ref.level = level;
963 	generic_ref->type = BTRFS_REF_METADATA;
964 	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
965 			     (!mod_root || is_fstree(mod_root))))
966 		generic_ref->skip_qgroup = true;
967 	else
968 		generic_ref->skip_qgroup = false;
969 
970 }
971 
btrfs_init_data_ref(struct btrfs_ref * generic_ref,u64 ino,u64 offset,u64 mod_root,bool skip_qgroup)972 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
973 			 u64 mod_root, bool skip_qgroup)
974 {
975 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
976 	/* If @real_root not set, use @root as fallback */
977 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
978 #endif
979 	generic_ref->data_ref.objectid = ino;
980 	generic_ref->data_ref.offset = offset;
981 	generic_ref->type = BTRFS_REF_DATA;
982 	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
983 			     (!mod_root || is_fstree(mod_root))))
984 		generic_ref->skip_qgroup = true;
985 	else
986 		generic_ref->skip_qgroup = false;
987 }
988 
add_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op,u64 reserved)989 static int add_delayed_ref(struct btrfs_trans_handle *trans,
990 			   struct btrfs_ref *generic_ref,
991 			   struct btrfs_delayed_extent_op *extent_op,
992 			   u64 reserved)
993 {
994 	struct btrfs_fs_info *fs_info = trans->fs_info;
995 	struct btrfs_delayed_ref_node *node;
996 	struct btrfs_delayed_ref_head *head_ref;
997 	struct btrfs_delayed_ref_head *new_head_ref;
998 	struct btrfs_delayed_ref_root *delayed_refs;
999 	struct btrfs_qgroup_extent_record *record = NULL;
1000 	const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
1001 	bool qrecord_reserved = false;
1002 	bool qrecord_inserted;
1003 	int action = generic_ref->action;
1004 	bool merged;
1005 	int ret;
1006 
1007 	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
1008 	if (!node)
1009 		return -ENOMEM;
1010 
1011 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1012 	if (!head_ref) {
1013 		ret = -ENOMEM;
1014 		goto free_node;
1015 	}
1016 
1017 	delayed_refs = &trans->transaction->delayed_refs;
1018 
1019 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1020 		record = kzalloc(sizeof(*record), GFP_NOFS);
1021 		if (!record) {
1022 			ret = -ENOMEM;
1023 			goto free_head_ref;
1024 		}
1025 		if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1026 			ret = -ENOMEM;
1027 			goto free_record;
1028 		}
1029 		qrecord_reserved = true;
1030 	}
1031 
1032 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1033 	if (ret) {
1034 		if (qrecord_reserved)
1035 			xa_release(&delayed_refs->dirty_extents, index);
1036 		goto free_record;
1037 	}
1038 
1039 	init_delayed_ref_common(fs_info, node, generic_ref);
1040 	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1041 	head_ref->extent_op = extent_op;
1042 
1043 	spin_lock(&delayed_refs->lock);
1044 
1045 	/*
1046 	 * insert both the head node and the new ref without dropping
1047 	 * the spin lock
1048 	 */
1049 	new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1050 					    action, &qrecord_inserted);
1051 	if (IS_ERR(new_head_ref)) {
1052 		xa_release(&delayed_refs->head_refs, index);
1053 		spin_unlock(&delayed_refs->lock);
1054 		ret = PTR_ERR(new_head_ref);
1055 		goto free_record;
1056 	}
1057 	head_ref = new_head_ref;
1058 
1059 	merged = insert_delayed_ref(trans, head_ref, node);
1060 	spin_unlock(&delayed_refs->lock);
1061 
1062 	/*
1063 	 * Need to update the delayed_refs_rsv with any changes we may have
1064 	 * made.
1065 	 */
1066 	btrfs_update_delayed_refs_rsv(trans);
1067 
1068 	if (generic_ref->type == BTRFS_REF_DATA)
1069 		trace_add_delayed_data_ref(trans->fs_info, node);
1070 	else
1071 		trace_add_delayed_tree_ref(trans->fs_info, node);
1072 	if (merged)
1073 		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1074 
1075 	if (qrecord_inserted)
1076 		return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1077 	return 0;
1078 
1079 free_record:
1080 	kfree(record);
1081 free_head_ref:
1082 	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1083 free_node:
1084 	kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1085 	return ret;
1086 }
1087 
1088 /*
1089  * Add a delayed tree ref. This does all of the accounting required to make sure
1090  * the delayed ref is eventually processed before this transaction commits.
1091  */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op)1092 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1093 			       struct btrfs_ref *generic_ref,
1094 			       struct btrfs_delayed_extent_op *extent_op)
1095 {
1096 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1097 	return add_delayed_ref(trans, generic_ref, extent_op, 0);
1098 }
1099 
1100 /*
1101  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1102  */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,u64 reserved)1103 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1104 			       struct btrfs_ref *generic_ref,
1105 			       u64 reserved)
1106 {
1107 	ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1108 	return add_delayed_ref(trans, generic_ref, NULL, reserved);
1109 }
1110 
btrfs_add_delayed_extent_op(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u8 level,struct btrfs_delayed_extent_op * extent_op)1111 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1112 				u64 bytenr, u64 num_bytes, u8 level,
1113 				struct btrfs_delayed_extent_op *extent_op)
1114 {
1115 	const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1116 	struct btrfs_delayed_ref_head *head_ref;
1117 	struct btrfs_delayed_ref_head *head_ref_ret;
1118 	struct btrfs_delayed_ref_root *delayed_refs;
1119 	struct btrfs_ref generic_ref = {
1120 		.type = BTRFS_REF_METADATA,
1121 		.action = BTRFS_UPDATE_DELAYED_HEAD,
1122 		.bytenr = bytenr,
1123 		.num_bytes = num_bytes,
1124 		.tree_ref.level = level,
1125 	};
1126 	int ret;
1127 
1128 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1129 	if (!head_ref)
1130 		return -ENOMEM;
1131 
1132 	init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1133 	head_ref->extent_op = extent_op;
1134 
1135 	delayed_refs = &trans->transaction->delayed_refs;
1136 
1137 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1138 	if (ret) {
1139 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1140 		return ret;
1141 	}
1142 
1143 	spin_lock(&delayed_refs->lock);
1144 	head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1145 					    BTRFS_UPDATE_DELAYED_HEAD, NULL);
1146 	if (IS_ERR(head_ref_ret)) {
1147 		xa_release(&delayed_refs->head_refs, index);
1148 		spin_unlock(&delayed_refs->lock);
1149 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1150 		return PTR_ERR(head_ref_ret);
1151 	}
1152 	spin_unlock(&delayed_refs->lock);
1153 
1154 	/*
1155 	 * Need to update the delayed_refs_rsv with any changes we may have
1156 	 * made.
1157 	 */
1158 	btrfs_update_delayed_refs_rsv(trans);
1159 	return 0;
1160 }
1161 
btrfs_put_delayed_ref(struct btrfs_delayed_ref_node * ref)1162 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1163 {
1164 	if (refcount_dec_and_test(&ref->refs)) {
1165 		WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1166 		kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1167 	}
1168 }
1169 
1170 /*
1171  * This does a simple search for the head node for a given extent.  Returns the
1172  * head node if found, or NULL if not.
1173  */
1174 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,u64 bytenr)1175 btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1176 			    struct btrfs_delayed_ref_root *delayed_refs,
1177 			    u64 bytenr)
1178 {
1179 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1180 
1181 	lockdep_assert_held(&delayed_refs->lock);
1182 
1183 	return xa_load(&delayed_refs->head_refs, index);
1184 }
1185 
find_comp(struct btrfs_delayed_ref_node * entry,u64 root,u64 parent)1186 static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
1187 {
1188 	int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
1189 
1190 	if (type < entry->type)
1191 		return -1;
1192 	if (type > entry->type)
1193 		return 1;
1194 
1195 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1196 		if (root < entry->ref_root)
1197 			return -1;
1198 		if (root > entry->ref_root)
1199 			return 1;
1200 	} else {
1201 		if (parent < entry->parent)
1202 			return -1;
1203 		if (parent > entry->parent)
1204 			return 1;
1205 	}
1206 	return 0;
1207 }
1208 
1209 /*
1210  * Check to see if a given root/parent reference is attached to the head.  This
1211  * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1212  * indicates the reference exists for the given root or parent.  This is for
1213  * tree blocks only.
1214  *
1215  * @head: the head of the bytenr we're searching.
1216  * @root: the root objectid of the reference if it is a normal reference.
1217  * @parent: the parent if this is a shared backref.
1218  */
btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head * head,u64 root,u64 parent)1219 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1220 				 u64 root, u64 parent)
1221 {
1222 	struct rb_node *node;
1223 	bool found = false;
1224 
1225 	lockdep_assert_held(&head->mutex);
1226 
1227 	spin_lock(&head->lock);
1228 	node = head->ref_tree.rb_root.rb_node;
1229 	while (node) {
1230 		struct btrfs_delayed_ref_node *entry;
1231 		int ret;
1232 
1233 		entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1234 		ret = find_comp(entry, root, parent);
1235 		if (ret < 0) {
1236 			node = node->rb_left;
1237 		} else if (ret > 0) {
1238 			node = node->rb_right;
1239 		} else {
1240 			/*
1241 			 * We only want to count ADD actions, as drops mean the
1242 			 * ref doesn't exist.
1243 			 */
1244 			if (entry->action == BTRFS_ADD_DELAYED_REF)
1245 				found = true;
1246 			break;
1247 		}
1248 	}
1249 	spin_unlock(&head->lock);
1250 	return found;
1251 }
1252 
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans)1253 void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
1254 {
1255 	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1256 	struct btrfs_fs_info *fs_info = trans->fs_info;
1257 	bool testing = btrfs_is_testing(fs_info);
1258 
1259 	spin_lock(&delayed_refs->lock);
1260 	while (true) {
1261 		struct btrfs_delayed_ref_head *head;
1262 		struct rb_node *n;
1263 		bool pin_bytes = false;
1264 
1265 		head = find_first_ref_head(delayed_refs);
1266 		if (!head)
1267 			break;
1268 
1269 		if (!btrfs_delayed_ref_lock(delayed_refs, head))
1270 			continue;
1271 
1272 		spin_lock(&head->lock);
1273 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1274 			struct btrfs_delayed_ref_node *ref;
1275 
1276 			ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1277 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
1278 		}
1279 		if (head->must_insert_reserved)
1280 			pin_bytes = true;
1281 		btrfs_free_delayed_extent_op(head->extent_op);
1282 		btrfs_delete_ref_head(fs_info, delayed_refs, head);
1283 		spin_unlock(&head->lock);
1284 		spin_unlock(&delayed_refs->lock);
1285 		mutex_unlock(&head->mutex);
1286 
1287 		if (!testing && pin_bytes) {
1288 			struct btrfs_block_group *bg;
1289 
1290 			bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1291 			if (WARN_ON_ONCE(bg == NULL)) {
1292 				/*
1293 				 * Unexpected and there's nothing we can do here
1294 				 * because we are in a transaction abort path,
1295 				 * so any errors can only be ignored or reported
1296 				 * while attempting to cleanup all resources.
1297 				 */
1298 				btrfs_err(fs_info,
1299 "block group for delayed ref at %llu was not found while destroying ref head",
1300 					  head->bytenr);
1301 			} else {
1302 				spin_lock(&bg->space_info->lock);
1303 				spin_lock(&bg->lock);
1304 				bg->pinned += head->num_bytes;
1305 				btrfs_space_info_update_bytes_pinned(bg->space_info,
1306 								     head->num_bytes);
1307 				bg->reserved -= head->num_bytes;
1308 				bg->space_info->bytes_reserved -= head->num_bytes;
1309 				spin_unlock(&bg->lock);
1310 				spin_unlock(&bg->space_info->lock);
1311 
1312 				btrfs_put_block_group(bg);
1313 			}
1314 
1315 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1316 				head->bytenr + head->num_bytes - 1);
1317 		}
1318 		if (!testing)
1319 			btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1320 		btrfs_put_delayed_ref_head(head);
1321 		cond_resched();
1322 		spin_lock(&delayed_refs->lock);
1323 	}
1324 
1325 	if (!testing)
1326 		btrfs_qgroup_destroy_extent_records(trans);
1327 
1328 	spin_unlock(&delayed_refs->lock);
1329 }
1330 
btrfs_delayed_ref_exit(void)1331 void __cold btrfs_delayed_ref_exit(void)
1332 {
1333 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1334 	kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
1335 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1336 }
1337 
btrfs_delayed_ref_init(void)1338 int __init btrfs_delayed_ref_init(void)
1339 {
1340 	btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1341 	if (!btrfs_delayed_ref_head_cachep)
1342 		goto fail;
1343 
1344 	btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1345 	if (!btrfs_delayed_ref_node_cachep)
1346 		goto fail;
1347 
1348 	btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1349 	if (!btrfs_delayed_extent_op_cachep)
1350 		goto fail;
1351 
1352 	return 0;
1353 fail:
1354 	btrfs_delayed_ref_exit();
1355 	return -ENOMEM;
1356 }
1357