xref: /linux/fs/btrfs/delayed-ref.c (revision 7696286034ac72cf9b46499be1715ac62fd302c3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "extent-tree.h"
13 #include "transaction.h"
14 #include "qgroup.h"
15 #include "space-info.h"
16 #include "tree-mod-log.h"
17 #include "fs.h"
18 
19 struct kmem_cache *btrfs_delayed_ref_head_cachep;
20 struct kmem_cache *btrfs_delayed_ref_node_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23  * delayed back reference update tracking.  For subvolume trees
24  * we queue up extent allocations and backref maintenance for
25  * delayed processing.   This avoids deep call chains where we
26  * add extents in the middle of btrfs_search_slot, and it allows
27  * us to buffer up frequently modified backrefs in an rb tree instead
28  * of hammering updates on the extent allocation tree.
29  */
30 
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 	bool ret = false;
36 	u64 reserved;
37 
38 	spin_lock(&global_rsv->lock);
39 	reserved = global_rsv->reserved;
40 	spin_unlock(&global_rsv->lock);
41 
42 	/*
43 	 * Since the global reserve is just kind of magic we don't really want
44 	 * to rely on it to save our bacon, so if our size is more than the
45 	 * delayed_refs_rsv and the global rsv then it's time to think about
46 	 * bailing.
47 	 */
48 	spin_lock(&delayed_refs_rsv->lock);
49 	reserved += delayed_refs_rsv->reserved;
50 	if (delayed_refs_rsv->size >= reserved)
51 		ret = true;
52 	spin_unlock(&delayed_refs_rsv->lock);
53 	return ret;
54 }
55 
56 /*
57  * Release a ref head's reservation.
58  *
59  * @fs_info:  the filesystem
60  * @nr_refs:  number of delayed refs to drop
61  * @nr_csums: number of csum items to drop
62  *
63  * Drops the delayed ref head's count from the delayed refs rsv and free any
64  * excess reservation we had.
65  */
66 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67 {
68 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 	u64 num_bytes;
70 	u64 released;
71 
72 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74 
75 	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 	if (released)
77 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 					      0, released, 0);
79 }
80 
81 /*
82  * Adjust the size of the delayed refs rsv.
83  *
84  * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85  * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86  * add it to the delayed_refs_rsv.
87  */
88 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89 {
90 	struct btrfs_fs_info *fs_info = trans->fs_info;
91 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 	struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 	u64 num_bytes;
94 	u64 reserved_bytes;
95 
96 	if (btrfs_is_testing(fs_info))
97 		return;
98 
99 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
100 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
101 						       trans->delayed_ref_csum_deletions);
102 
103 	if (num_bytes == 0)
104 		return;
105 
106 	/*
107 	 * Try to take num_bytes from the transaction's local delayed reserve.
108 	 * If not possible, try to take as much as it's available. If the local
109 	 * reserve doesn't have enough reserved space, the delayed refs reserve
110 	 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
111 	 * by someone or if a transaction commit is triggered before that, the
112 	 * global block reserve will be used. We want to minimize using the
113 	 * global block reserve for cases we can account for in advance, to
114 	 * avoid exhausting it and reach -ENOSPC during a transaction commit.
115 	 */
116 	spin_lock(&local_rsv->lock);
117 	reserved_bytes = min(num_bytes, local_rsv->reserved);
118 	local_rsv->reserved -= reserved_bytes;
119 	local_rsv->full = (local_rsv->reserved >= local_rsv->size);
120 	spin_unlock(&local_rsv->lock);
121 
122 	spin_lock(&delayed_rsv->lock);
123 	delayed_rsv->size += num_bytes;
124 	delayed_rsv->reserved += reserved_bytes;
125 	delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
126 	spin_unlock(&delayed_rsv->lock);
127 	trans->delayed_ref_updates = 0;
128 	trans->delayed_ref_csum_deletions = 0;
129 }
130 
131 /*
132  * Adjust the size of the delayed refs block reserve for 1 block group item
133  * insertion, used after allocating a block group.
134  */
135 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
136 {
137 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
138 
139 	spin_lock(&delayed_rsv->lock);
140 	/*
141 	 * Inserting a block group item does not require changing the free space
142 	 * tree, only the extent tree or the block group tree, so this is all we
143 	 * need.
144 	 */
145 	delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
146 	delayed_rsv->full = false;
147 	spin_unlock(&delayed_rsv->lock);
148 }
149 
150 /*
151  * Adjust the size of the delayed refs block reserve to release space for 1
152  * block group item insertion.
153  */
154 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
155 {
156 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
157 	const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
158 	u64 released;
159 
160 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
161 	if (released > 0)
162 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
163 					      0, released, 0);
164 }
165 
166 /*
167  * Adjust the size of the delayed refs block reserve for 1 block group item
168  * update.
169  */
170 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
171 {
172 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
173 
174 	spin_lock(&delayed_rsv->lock);
175 	/*
176 	 * Updating a block group item does not result in new nodes/leaves and
177 	 * does not require changing the free space tree, only the extent tree
178 	 * or the block group tree, so this is all we need.
179 	 */
180 	delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
181 	delayed_rsv->full = false;
182 	spin_unlock(&delayed_rsv->lock);
183 }
184 
185 /*
186  * Adjust the size of the delayed refs block reserve to release space for 1
187  * block group item update.
188  */
189 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
190 {
191 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
192 	const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
193 	u64 released;
194 
195 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
196 	if (released > 0)
197 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
198 					      0, released, 0);
199 }
200 
201 /*
202  * Refill based on our delayed refs usage.
203  *
204  * @fs_info: the filesystem
205  * @flush:   control how we can flush for this reservation.
206  *
207  * This will refill the delayed block_rsv up to 1 items size worth of space and
208  * will return -ENOSPC if we can't make the reservation.
209  */
210 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
211 				  enum btrfs_reserve_flush_enum flush)
212 {
213 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
214 	struct btrfs_space_info *space_info = block_rsv->space_info;
215 	u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
216 	u64 num_bytes = 0;
217 	u64 refilled_bytes;
218 	u64 to_free;
219 	int ret = -ENOSPC;
220 
221 	spin_lock(&block_rsv->lock);
222 	if (block_rsv->reserved < block_rsv->size) {
223 		num_bytes = block_rsv->size - block_rsv->reserved;
224 		num_bytes = min(num_bytes, limit);
225 	}
226 	spin_unlock(&block_rsv->lock);
227 
228 	if (!num_bytes)
229 		return 0;
230 
231 	ret = btrfs_reserve_metadata_bytes(space_info, num_bytes, flush);
232 	if (ret)
233 		return ret;
234 
235 	/*
236 	 * We may have raced with someone else, so check again if we the block
237 	 * reserve is still not full and release any excess space.
238 	 */
239 	spin_lock(&block_rsv->lock);
240 	if (block_rsv->reserved < block_rsv->size) {
241 		u64 needed = block_rsv->size - block_rsv->reserved;
242 
243 		if (num_bytes >= needed) {
244 			block_rsv->reserved += needed;
245 			block_rsv->full = true;
246 			to_free = num_bytes - needed;
247 			refilled_bytes = needed;
248 		} else {
249 			block_rsv->reserved += num_bytes;
250 			to_free = 0;
251 			refilled_bytes = num_bytes;
252 		}
253 	} else {
254 		to_free = num_bytes;
255 		refilled_bytes = 0;
256 	}
257 	spin_unlock(&block_rsv->lock);
258 
259 	if (to_free > 0)
260 		btrfs_space_info_free_bytes_may_use(space_info, to_free);
261 
262 	if (refilled_bytes > 0)
263 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
264 					      refilled_bytes, 1);
265 	return 0;
266 }
267 
268 /*
269  * compare two delayed data backrefs with same bytenr and type
270  */
271 static int comp_data_refs(const struct btrfs_delayed_ref_node *ref1,
272 			  const struct btrfs_delayed_ref_node *ref2)
273 {
274 	if (ref1->data_ref.objectid < ref2->data_ref.objectid)
275 		return -1;
276 	if (ref1->data_ref.objectid > ref2->data_ref.objectid)
277 		return 1;
278 	if (ref1->data_ref.offset < ref2->data_ref.offset)
279 		return -1;
280 	if (ref1->data_ref.offset > ref2->data_ref.offset)
281 		return 1;
282 	return 0;
283 }
284 
285 static int comp_refs(const struct btrfs_delayed_ref_node *ref1,
286 		     const struct btrfs_delayed_ref_node *ref2,
287 		     bool check_seq)
288 {
289 	int ret = 0;
290 
291 	if (ref1->type < ref2->type)
292 		return -1;
293 	if (ref1->type > ref2->type)
294 		return 1;
295 	if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
296 	    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
297 		if (ref1->parent < ref2->parent)
298 			return -1;
299 		if (ref1->parent > ref2->parent)
300 			return 1;
301 	} else {
302 		if (ref1->ref_root < ref2->ref_root)
303 			return -1;
304 		if (ref1->ref_root > ref2->ref_root)
305 			return 1;
306 		if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
307 			ret = comp_data_refs(ref1, ref2);
308 	}
309 	if (ret)
310 		return ret;
311 	if (check_seq) {
312 		if (ref1->seq < ref2->seq)
313 			return -1;
314 		if (ref1->seq > ref2->seq)
315 			return 1;
316 	}
317 	return 0;
318 }
319 
320 static int cmp_refs_node(const struct rb_node *new, const struct rb_node *exist)
321 {
322 	const struct btrfs_delayed_ref_node *new_node =
323 		rb_entry(new, struct btrfs_delayed_ref_node, ref_node);
324 	const struct btrfs_delayed_ref_node *exist_node =
325 		rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
326 
327 	return comp_refs(new_node, exist_node, true);
328 }
329 
330 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
331 		struct btrfs_delayed_ref_node *ins)
332 {
333 	struct rb_node *node = &ins->ref_node;
334 	struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
335 
336 	return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
337 }
338 
339 static struct btrfs_delayed_ref_head *find_first_ref_head(
340 		struct btrfs_delayed_ref_root *dr)
341 {
342 	unsigned long from = 0;
343 
344 	lockdep_assert_held(&dr->lock);
345 
346 	return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
347 }
348 
349 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
350 				   struct btrfs_delayed_ref_head *head)
351 {
352 	lockdep_assert_held(&delayed_refs->lock);
353 	if (mutex_trylock(&head->mutex))
354 		return true;
355 
356 	refcount_inc(&head->refs);
357 	spin_unlock(&delayed_refs->lock);
358 
359 	mutex_lock(&head->mutex);
360 	spin_lock(&delayed_refs->lock);
361 	if (!head->tracked) {
362 		mutex_unlock(&head->mutex);
363 		btrfs_put_delayed_ref_head(head);
364 		return false;
365 	}
366 	btrfs_put_delayed_ref_head(head);
367 	return true;
368 }
369 
370 static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
371 				    struct btrfs_delayed_ref_root *delayed_refs,
372 				    struct btrfs_delayed_ref_head *head,
373 				    struct btrfs_delayed_ref_node *ref)
374 {
375 	lockdep_assert_held(&head->lock);
376 	rb_erase_cached(&ref->ref_node, &head->ref_tree);
377 	RB_CLEAR_NODE(&ref->ref_node);
378 	if (!list_empty(&ref->add_list))
379 		list_del(&ref->add_list);
380 	btrfs_put_delayed_ref(ref);
381 	btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
382 }
383 
384 static bool merge_ref(struct btrfs_fs_info *fs_info,
385 		      struct btrfs_delayed_ref_root *delayed_refs,
386 		      struct btrfs_delayed_ref_head *head,
387 		      struct btrfs_delayed_ref_node *ref,
388 		      u64 seq)
389 {
390 	struct btrfs_delayed_ref_node *next;
391 	struct rb_node *node = rb_next(&ref->ref_node);
392 	bool done = false;
393 
394 	while (!done && node) {
395 		int mod;
396 
397 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
398 		node = rb_next(node);
399 		if (seq && next->seq >= seq)
400 			break;
401 		if (comp_refs(ref, next, false))
402 			break;
403 
404 		if (ref->action == next->action) {
405 			mod = next->ref_mod;
406 		} else {
407 			if (ref->ref_mod < next->ref_mod) {
408 				swap(ref, next);
409 				done = true;
410 			}
411 			mod = -next->ref_mod;
412 		}
413 
414 		drop_delayed_ref(fs_info, delayed_refs, head, next);
415 		ref->ref_mod += mod;
416 		if (ref->ref_mod == 0) {
417 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
418 			done = true;
419 		} else {
420 			/*
421 			 * Can't have multiples of the same ref on a tree block.
422 			 */
423 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
424 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
425 		}
426 	}
427 
428 	return done;
429 }
430 
431 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
432 			      struct btrfs_delayed_ref_root *delayed_refs,
433 			      struct btrfs_delayed_ref_head *head)
434 {
435 	struct btrfs_delayed_ref_node *ref;
436 	struct rb_node *node;
437 	u64 seq = 0;
438 
439 	lockdep_assert_held(&head->lock);
440 
441 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
442 		return;
443 
444 	/* We don't have too many refs to merge for data. */
445 	if (head->is_data)
446 		return;
447 
448 	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
449 again:
450 	for (node = rb_first_cached(&head->ref_tree); node;
451 	     node = rb_next(node)) {
452 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
453 		if (seq && ref->seq >= seq)
454 			continue;
455 		if (merge_ref(fs_info, delayed_refs, head, ref, seq))
456 			goto again;
457 	}
458 }
459 
460 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
461 {
462 	int ret = 0;
463 	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
464 
465 	if (min_seq != 0 && seq >= min_seq) {
466 		btrfs_debug(fs_info,
467 			    "holding back delayed_ref %llu, lowest is %llu",
468 			    seq, min_seq);
469 		ret = 1;
470 	}
471 
472 	return ret;
473 }
474 
475 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
476 		const struct btrfs_fs_info *fs_info,
477 		struct btrfs_delayed_ref_root *delayed_refs)
478 {
479 	struct btrfs_delayed_ref_head *head;
480 	unsigned long start_index;
481 	unsigned long found_index;
482 	bool found_head = false;
483 	bool locked;
484 
485 	spin_lock(&delayed_refs->lock);
486 again:
487 	start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
488 	xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
489 		if (!head->processing) {
490 			found_head = true;
491 			break;
492 		}
493 	}
494 	if (!found_head) {
495 		if (delayed_refs->run_delayed_start == 0) {
496 			spin_unlock(&delayed_refs->lock);
497 			return NULL;
498 		}
499 		delayed_refs->run_delayed_start = 0;
500 		goto again;
501 	}
502 
503 	head->processing = true;
504 	WARN_ON(delayed_refs->num_heads_ready == 0);
505 	delayed_refs->num_heads_ready--;
506 	delayed_refs->run_delayed_start = head->bytenr +
507 		head->num_bytes;
508 
509 	locked = btrfs_delayed_ref_lock(delayed_refs, head);
510 	spin_unlock(&delayed_refs->lock);
511 
512 	/*
513 	 * We may have dropped the spin lock to get the head mutex lock, and
514 	 * that might have given someone else time to free the head.  If that's
515 	 * true, it has been removed from our list and we can move on.
516 	 */
517 	if (!locked)
518 		return ERR_PTR(-EAGAIN);
519 
520 	return head;
521 }
522 
523 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
524 			     struct btrfs_delayed_ref_head *head)
525 {
526 	spin_lock(&delayed_refs->lock);
527 	head->processing = false;
528 	delayed_refs->num_heads_ready++;
529 	spin_unlock(&delayed_refs->lock);
530 	btrfs_delayed_ref_unlock(head);
531 }
532 
533 void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
534 			   struct btrfs_delayed_ref_root *delayed_refs,
535 			   struct btrfs_delayed_ref_head *head)
536 {
537 	const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
538 
539 	lockdep_assert_held(&delayed_refs->lock);
540 	lockdep_assert_held(&head->lock);
541 
542 	xa_erase(&delayed_refs->head_refs, index);
543 	head->tracked = false;
544 	delayed_refs->num_heads--;
545 	if (!head->processing)
546 		delayed_refs->num_heads_ready--;
547 }
548 
549 struct btrfs_delayed_ref_node *btrfs_select_delayed_ref(struct btrfs_delayed_ref_head *head)
550 {
551 	struct btrfs_delayed_ref_node *ref;
552 
553 	lockdep_assert_held(&head->mutex);
554 	lockdep_assert_held(&head->lock);
555 
556 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
557 		return NULL;
558 
559 	/*
560 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
561 	 * This is to prevent a ref count from going down to zero, which deletes
562 	 * the extent item from the extent tree, when there still are references
563 	 * to add, which would fail because they would not find the extent item.
564 	 */
565 	if (!list_empty(&head->ref_add_list))
566 		return list_first_entry(&head->ref_add_list,
567 					struct btrfs_delayed_ref_node, add_list);
568 
569 	ref = rb_entry(rb_first_cached(&head->ref_tree),
570 		       struct btrfs_delayed_ref_node, ref_node);
571 	ASSERT(list_empty(&ref->add_list));
572 	return ref;
573 }
574 
575 /*
576  * Helper to insert the ref_node to the tail or merge with tail.
577  *
578  * Return false if the ref was inserted.
579  * Return true if the ref was merged into an existing one (and therefore can be
580  * freed by the caller).
581  */
582 static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
583 			       struct btrfs_delayed_ref_head *href,
584 			       struct btrfs_delayed_ref_node *ref)
585 {
586 	struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
587 	struct btrfs_delayed_ref_node *exist;
588 	int mod;
589 
590 	spin_lock(&href->lock);
591 	exist = tree_insert(&href->ref_tree, ref);
592 	if (!exist) {
593 		if (ref->action == BTRFS_ADD_DELAYED_REF)
594 			list_add_tail(&ref->add_list, &href->ref_add_list);
595 		spin_unlock(&href->lock);
596 		trans->delayed_ref_updates++;
597 		return false;
598 	}
599 
600 	/* Now we are sure we can merge */
601 	if (exist->action == ref->action) {
602 		mod = ref->ref_mod;
603 	} else {
604 		/* Need to change action */
605 		if (exist->ref_mod < ref->ref_mod) {
606 			exist->action = ref->action;
607 			mod = -exist->ref_mod;
608 			exist->ref_mod = ref->ref_mod;
609 			if (ref->action == BTRFS_ADD_DELAYED_REF)
610 				list_add_tail(&exist->add_list,
611 					      &href->ref_add_list);
612 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
613 				ASSERT(!list_empty(&exist->add_list));
614 				list_del_init(&exist->add_list);
615 			} else {
616 				ASSERT(0);
617 			}
618 		} else
619 			mod = -ref->ref_mod;
620 	}
621 	exist->ref_mod += mod;
622 
623 	/* remove existing tail if its ref_mod is zero */
624 	if (exist->ref_mod == 0)
625 		drop_delayed_ref(trans->fs_info, root, href, exist);
626 	spin_unlock(&href->lock);
627 	return true;
628 }
629 
630 /*
631  * helper function to update the accounting in the head ref
632  * existing and update must have the same bytenr
633  */
634 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
635 			 struct btrfs_delayed_ref_head *existing,
636 			 struct btrfs_delayed_ref_head *update)
637 {
638 	struct btrfs_delayed_ref_root *delayed_refs =
639 		&trans->transaction->delayed_refs;
640 	struct btrfs_fs_info *fs_info = trans->fs_info;
641 	int old_ref_mod;
642 
643 	BUG_ON(existing->is_data != update->is_data);
644 
645 	spin_lock(&existing->lock);
646 
647 	/*
648 	 * When freeing an extent, we may not know the owning root when we
649 	 * first create the head_ref. However, some deref before the last deref
650 	 * will know it, so we just need to update the head_ref accordingly.
651 	 */
652 	if (!existing->owning_root)
653 		existing->owning_root = update->owning_root;
654 
655 	if (update->must_insert_reserved) {
656 		/* if the extent was freed and then
657 		 * reallocated before the delayed ref
658 		 * entries were processed, we can end up
659 		 * with an existing head ref without
660 		 * the must_insert_reserved flag set.
661 		 * Set it again here
662 		 */
663 		existing->must_insert_reserved = update->must_insert_reserved;
664 		existing->owning_root = update->owning_root;
665 
666 		/*
667 		 * update the num_bytes so we make sure the accounting
668 		 * is done correctly
669 		 */
670 		existing->num_bytes = update->num_bytes;
671 
672 	}
673 
674 	if (update->extent_op) {
675 		if (!existing->extent_op) {
676 			existing->extent_op = update->extent_op;
677 		} else {
678 			if (update->extent_op->update_key) {
679 				memcpy(&existing->extent_op->key,
680 				       &update->extent_op->key,
681 				       sizeof(update->extent_op->key));
682 				existing->extent_op->update_key = true;
683 			}
684 			if (update->extent_op->update_flags) {
685 				existing->extent_op->flags_to_set |=
686 					update->extent_op->flags_to_set;
687 				existing->extent_op->update_flags = true;
688 			}
689 			btrfs_free_delayed_extent_op(update->extent_op);
690 		}
691 	}
692 	/*
693 	 * update the reference mod on the head to reflect this new operation,
694 	 * only need the lock for this case cause we could be processing it
695 	 * currently, for refs we just added we know we're a-ok.
696 	 */
697 	old_ref_mod = existing->total_ref_mod;
698 	existing->ref_mod += update->ref_mod;
699 	existing->total_ref_mod += update->ref_mod;
700 
701 	/*
702 	 * If we are going to from a positive ref mod to a negative or vice
703 	 * versa we need to make sure to adjust pending_csums accordingly.
704 	 * We reserve bytes for csum deletion when adding or updating a ref head
705 	 * see add_delayed_ref_head() for more details.
706 	 */
707 	if (existing->is_data) {
708 		u64 csum_leaves =
709 			btrfs_csum_bytes_to_leaves(fs_info,
710 						   existing->num_bytes);
711 
712 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
713 			delayed_refs->pending_csums -= existing->num_bytes;
714 			btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
715 		}
716 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
717 			delayed_refs->pending_csums += existing->num_bytes;
718 			trans->delayed_ref_csum_deletions += csum_leaves;
719 		}
720 	}
721 
722 	spin_unlock(&existing->lock);
723 }
724 
725 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
726 				  struct btrfs_ref *generic_ref,
727 				  struct btrfs_qgroup_extent_record *qrecord,
728 				  u64 reserved)
729 {
730 	int count_mod = 1;
731 	bool must_insert_reserved = false;
732 
733 	/* If reserved is provided, it must be a data extent. */
734 	BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
735 
736 	switch (generic_ref->action) {
737 	case BTRFS_ADD_DELAYED_REF:
738 		/* count_mod is already set to 1. */
739 		break;
740 	case BTRFS_UPDATE_DELAYED_HEAD:
741 		count_mod = 0;
742 		break;
743 	case BTRFS_DROP_DELAYED_REF:
744 		/*
745 		 * The head node stores the sum of all the mods, so dropping a ref
746 		 * should drop the sum in the head node by one.
747 		 */
748 		count_mod = -1;
749 		break;
750 	case BTRFS_ADD_DELAYED_EXTENT:
751 		/*
752 		 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
753 		 * reserved accounting when the extent is finally added, or if a
754 		 * later modification deletes the delayed ref without ever
755 		 * inserting the extent into the extent allocation tree.
756 		 * ref->must_insert_reserved is the flag used to record that
757 		 * accounting mods are required.
758 		 *
759 		 * Once we record must_insert_reserved, switch the action to
760 		 * BTRFS_ADD_DELAYED_REF because other special casing is not
761 		 * required.
762 		 */
763 		must_insert_reserved = true;
764 		break;
765 	}
766 
767 	refcount_set(&head_ref->refs, 1);
768 	head_ref->bytenr = generic_ref->bytenr;
769 	head_ref->num_bytes = generic_ref->num_bytes;
770 	head_ref->ref_mod = count_mod;
771 	head_ref->reserved_bytes = reserved;
772 	head_ref->must_insert_reserved = must_insert_reserved;
773 	head_ref->owning_root = generic_ref->owning_root;
774 	head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
775 	head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
776 	head_ref->ref_tree = RB_ROOT_CACHED;
777 	INIT_LIST_HEAD(&head_ref->ref_add_list);
778 	head_ref->tracked = false;
779 	head_ref->processing = false;
780 	head_ref->total_ref_mod = count_mod;
781 	spin_lock_init(&head_ref->lock);
782 	mutex_init(&head_ref->mutex);
783 
784 	/* If not metadata set an impossible level to help debugging. */
785 	if (generic_ref->type == BTRFS_REF_METADATA)
786 		head_ref->level = generic_ref->tree_ref.level;
787 	else
788 		head_ref->level = U8_MAX;
789 
790 	if (qrecord) {
791 		if (generic_ref->ref_root && reserved) {
792 			qrecord->data_rsv = reserved;
793 			qrecord->data_rsv_refroot = generic_ref->ref_root;
794 		}
795 		qrecord->num_bytes = generic_ref->num_bytes;
796 		qrecord->old_roots = NULL;
797 	}
798 }
799 
800 /*
801  * Helper function to actually insert a head node into the xarray. This does all
802  * the dirty work in terms of maintaining the correct overall modification
803  * count.
804  *
805  * The caller is responsible for calling kfree() on @qrecord. More specifically,
806  * if this function reports that it did not insert it as noted in
807  * @qrecord_inserted_ret, then it's safe to call kfree() on it.
808  *
809  * Returns an error pointer in case of an error.
810  */
811 static noinline struct btrfs_delayed_ref_head *
812 add_delayed_ref_head(struct btrfs_trans_handle *trans,
813 		     struct btrfs_delayed_ref_head *head_ref,
814 		     struct btrfs_qgroup_extent_record *qrecord,
815 		     int action, bool *qrecord_inserted_ret)
816 {
817 	struct btrfs_fs_info *fs_info = trans->fs_info;
818 	struct btrfs_delayed_ref_head *existing;
819 	struct btrfs_delayed_ref_root *delayed_refs;
820 	const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
821 
822 	/*
823 	 * If 'qrecord_inserted_ret' is provided, then the first thing we need
824 	 * to do is to initialize it to false just in case we have an exit
825 	 * before trying to insert the record.
826 	 */
827 	if (qrecord_inserted_ret)
828 		*qrecord_inserted_ret = false;
829 
830 	delayed_refs = &trans->transaction->delayed_refs;
831 	lockdep_assert_held(&delayed_refs->lock);
832 
833 #if BITS_PER_LONG == 32
834 	if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
835 		if (qrecord)
836 			xa_release(&delayed_refs->dirty_extents, index);
837 		btrfs_err_rl(fs_info,
838 "delayed ref head %llu is beyond 32bit page cache and xarray index limit",
839 			     head_ref->bytenr);
840 		btrfs_err_32bit_limit(fs_info);
841 		return ERR_PTR(-EOVERFLOW);
842 	}
843 #endif
844 
845 	/* Record qgroup extent info if provided */
846 	if (qrecord) {
847 		/*
848 		 * Setting 'qrecord' but not 'qrecord_inserted_ret' will likely
849 		 * result in a memory leakage.
850 		 */
851 		ASSERT(qrecord_inserted_ret != NULL);
852 
853 		int ret;
854 
855 		ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
856 						       head_ref->bytenr);
857 		if (ret) {
858 			/* Clean up if insertion fails or item exists. */
859 			xa_release(&delayed_refs->dirty_extents, index);
860 			if (ret < 0)
861 				return ERR_PTR(ret);
862 		} else if (qrecord_inserted_ret) {
863 			*qrecord_inserted_ret = true;
864 		}
865 	}
866 
867 	trace_add_delayed_ref_head(fs_info, head_ref, action);
868 
869 	existing = xa_load(&delayed_refs->head_refs, index);
870 	if (existing) {
871 		update_existing_head_ref(trans, existing, head_ref);
872 		/*
873 		 * we've updated the existing ref, free the newly
874 		 * allocated ref
875 		 */
876 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
877 		head_ref = existing;
878 	} else {
879 		existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
880 		if (xa_is_err(existing)) {
881 			/* Memory was preallocated by the caller. */
882 			ASSERT(xa_err(existing) != -ENOMEM);
883 			return ERR_PTR(xa_err(existing));
884 		} else if (WARN_ON(existing)) {
885 			/*
886 			 * Shouldn't happen we just did a lookup before under
887 			 * delayed_refs->lock.
888 			 */
889 			return ERR_PTR(-EEXIST);
890 		}
891 		head_ref->tracked = true;
892 		/*
893 		 * We reserve the amount of bytes needed to delete csums when
894 		 * adding the ref head and not when adding individual drop refs
895 		 * since the csum items are deleted only after running the last
896 		 * delayed drop ref (the data extent's ref count drops to 0).
897 		 */
898 		if (head_ref->is_data && head_ref->ref_mod < 0) {
899 			delayed_refs->pending_csums += head_ref->num_bytes;
900 			trans->delayed_ref_csum_deletions +=
901 				btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
902 		}
903 		delayed_refs->num_heads++;
904 		delayed_refs->num_heads_ready++;
905 	}
906 
907 	return head_ref;
908 }
909 
910 /*
911  * Initialize the structure which represents a modification to an extent.
912  *
913  * @fs_info:    Internal to the mounted filesystem mount structure.
914  *
915  * @ref:	The structure which is going to be initialized.
916  *
917  * @bytenr:	The logical address of the extent for which a modification is
918  *		going to be recorded.
919  *
920  * @num_bytes:  Size of the extent whose modification is being recorded.
921  *
922  * @ref_root:	The id of the root where this modification has originated, this
923  *		can be either one of the well-known metadata trees or the
924  *		subvolume id which references this extent.
925  *
926  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
927  *		BTRFS_ADD_DELAYED_EXTENT
928  *
929  * @ref_type:	Holds the type of the extent which is being recorded, can be
930  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
931  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
932  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
933  */
934 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
935 				    struct btrfs_delayed_ref_node *ref,
936 				    struct btrfs_ref *generic_ref)
937 {
938 	int action = generic_ref->action;
939 	u64 seq = 0;
940 
941 	if (action == BTRFS_ADD_DELAYED_EXTENT)
942 		action = BTRFS_ADD_DELAYED_REF;
943 
944 	if (btrfs_is_fstree(generic_ref->ref_root))
945 		seq = atomic64_read(&fs_info->tree_mod_seq);
946 
947 	refcount_set(&ref->refs, 1);
948 	ref->bytenr = generic_ref->bytenr;
949 	ref->num_bytes = generic_ref->num_bytes;
950 	ref->ref_mod = 1;
951 	ref->action = action;
952 	ref->seq = seq;
953 	ref->type = btrfs_ref_type(generic_ref);
954 	ref->ref_root = generic_ref->ref_root;
955 	ref->parent = generic_ref->parent;
956 	RB_CLEAR_NODE(&ref->ref_node);
957 	INIT_LIST_HEAD(&ref->add_list);
958 
959 	if (generic_ref->type == BTRFS_REF_DATA)
960 		ref->data_ref = generic_ref->data_ref;
961 	else
962 		ref->tree_ref = generic_ref->tree_ref;
963 }
964 
965 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
966 			 bool skip_qgroup)
967 {
968 #ifdef CONFIG_BTRFS_DEBUG
969 	/* If @real_root not set, use @root as fallback */
970 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
971 #endif
972 	generic_ref->tree_ref.level = level;
973 	generic_ref->type = BTRFS_REF_METADATA;
974 	if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
975 			     (!mod_root || btrfs_is_fstree(mod_root))))
976 		generic_ref->skip_qgroup = true;
977 	else
978 		generic_ref->skip_qgroup = false;
979 
980 }
981 
982 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
983 			 u64 mod_root, bool skip_qgroup)
984 {
985 #ifdef CONFIG_BTRFS_DEBUG
986 	/* If @real_root not set, use @root as fallback */
987 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
988 #endif
989 	generic_ref->data_ref.objectid = ino;
990 	generic_ref->data_ref.offset = offset;
991 	generic_ref->type = BTRFS_REF_DATA;
992 	if (skip_qgroup || !(btrfs_is_fstree(generic_ref->ref_root) &&
993 			     (!mod_root || btrfs_is_fstree(mod_root))))
994 		generic_ref->skip_qgroup = true;
995 	else
996 		generic_ref->skip_qgroup = false;
997 }
998 
999 static int add_delayed_ref(struct btrfs_trans_handle *trans,
1000 			   struct btrfs_ref *generic_ref,
1001 			   struct btrfs_delayed_extent_op *extent_op,
1002 			   u64 reserved)
1003 {
1004 	struct btrfs_fs_info *fs_info = trans->fs_info;
1005 	struct btrfs_delayed_ref_node *node;
1006 	struct btrfs_delayed_ref_head *head_ref;
1007 	struct btrfs_delayed_ref_head *new_head_ref;
1008 	struct btrfs_delayed_ref_root *delayed_refs;
1009 	struct btrfs_qgroup_extent_record *record = NULL;
1010 	const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
1011 	bool qrecord_reserved = false;
1012 	bool qrecord_inserted;
1013 	int action = generic_ref->action;
1014 	bool merged;
1015 	int ret;
1016 
1017 	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
1018 	if (!node)
1019 		return -ENOMEM;
1020 
1021 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1022 	if (!head_ref) {
1023 		ret = -ENOMEM;
1024 		goto free_node;
1025 	}
1026 
1027 	delayed_refs = &trans->transaction->delayed_refs;
1028 
1029 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1030 		record = kzalloc(sizeof(*record), GFP_NOFS);
1031 		if (!record) {
1032 			ret = -ENOMEM;
1033 			goto free_head_ref;
1034 		}
1035 		if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1036 			ret = -ENOMEM;
1037 			goto free_record;
1038 		}
1039 		qrecord_reserved = true;
1040 	}
1041 
1042 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1043 	if (ret) {
1044 		if (qrecord_reserved)
1045 			xa_release(&delayed_refs->dirty_extents, index);
1046 		goto free_record;
1047 	}
1048 
1049 	init_delayed_ref_common(fs_info, node, generic_ref);
1050 	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1051 	head_ref->extent_op = extent_op;
1052 
1053 	spin_lock(&delayed_refs->lock);
1054 
1055 	/*
1056 	 * insert both the head node and the new ref without dropping
1057 	 * the spin lock
1058 	 */
1059 	new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1060 					    action, &qrecord_inserted);
1061 	if (IS_ERR(new_head_ref)) {
1062 		xa_release(&delayed_refs->head_refs, index);
1063 		spin_unlock(&delayed_refs->lock);
1064 		ret = PTR_ERR(new_head_ref);
1065 
1066 		/*
1067 		 * It's only safe to call kfree() on 'qrecord' if
1068 		 * add_delayed_ref_head() has _not_ inserted it for
1069 		 * tracing. Otherwise we need to handle this here.
1070 		 */
1071 		if (!qrecord_reserved || qrecord_inserted)
1072 			goto free_head_ref;
1073 		goto free_record;
1074 	}
1075 	head_ref = new_head_ref;
1076 
1077 	merged = insert_delayed_ref(trans, head_ref, node);
1078 	spin_unlock(&delayed_refs->lock);
1079 
1080 	/*
1081 	 * Need to update the delayed_refs_rsv with any changes we may have
1082 	 * made.
1083 	 */
1084 	btrfs_update_delayed_refs_rsv(trans);
1085 
1086 	if (generic_ref->type == BTRFS_REF_DATA)
1087 		trace_add_delayed_data_ref(trans->fs_info, node);
1088 	else
1089 		trace_add_delayed_tree_ref(trans->fs_info, node);
1090 	if (merged)
1091 		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1092 
1093 	if (qrecord_inserted)
1094 		return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1095 
1096 	kfree(record);
1097 	return 0;
1098 
1099 free_record:
1100 	kfree(record);
1101 free_head_ref:
1102 	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1103 free_node:
1104 	kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1105 	return ret;
1106 }
1107 
1108 /*
1109  * Add a delayed tree ref. This does all of the accounting required to make sure
1110  * the delayed ref is eventually processed before this transaction commits.
1111  */
1112 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1113 			       struct btrfs_ref *generic_ref,
1114 			       struct btrfs_delayed_extent_op *extent_op)
1115 {
1116 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1117 	return add_delayed_ref(trans, generic_ref, extent_op, 0);
1118 }
1119 
1120 /*
1121  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1122  */
1123 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1124 			       struct btrfs_ref *generic_ref,
1125 			       u64 reserved)
1126 {
1127 	ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1128 	return add_delayed_ref(trans, generic_ref, NULL, reserved);
1129 }
1130 
1131 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1132 				u64 bytenr, u64 num_bytes, u8 level,
1133 				struct btrfs_delayed_extent_op *extent_op)
1134 {
1135 	const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1136 	struct btrfs_delayed_ref_head *head_ref;
1137 	struct btrfs_delayed_ref_head *head_ref_ret;
1138 	struct btrfs_delayed_ref_root *delayed_refs;
1139 	struct btrfs_ref generic_ref = {
1140 		.type = BTRFS_REF_METADATA,
1141 		.action = BTRFS_UPDATE_DELAYED_HEAD,
1142 		.bytenr = bytenr,
1143 		.num_bytes = num_bytes,
1144 		.tree_ref.level = level,
1145 	};
1146 	int ret;
1147 
1148 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1149 	if (!head_ref)
1150 		return -ENOMEM;
1151 
1152 	init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1153 	head_ref->extent_op = extent_op;
1154 
1155 	delayed_refs = &trans->transaction->delayed_refs;
1156 
1157 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1158 	if (ret) {
1159 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1160 		return ret;
1161 	}
1162 
1163 	spin_lock(&delayed_refs->lock);
1164 	head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1165 					    BTRFS_UPDATE_DELAYED_HEAD, NULL);
1166 	if (IS_ERR(head_ref_ret)) {
1167 		xa_release(&delayed_refs->head_refs, index);
1168 		spin_unlock(&delayed_refs->lock);
1169 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1170 		return PTR_ERR(head_ref_ret);
1171 	}
1172 	spin_unlock(&delayed_refs->lock);
1173 
1174 	/*
1175 	 * Need to update the delayed_refs_rsv with any changes we may have
1176 	 * made.
1177 	 */
1178 	btrfs_update_delayed_refs_rsv(trans);
1179 	return 0;
1180 }
1181 
1182 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1183 {
1184 	if (refcount_dec_and_test(&ref->refs)) {
1185 		WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1186 		kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1187 	}
1188 }
1189 
1190 /*
1191  * This does a simple search for the head node for a given extent.  Returns the
1192  * head node if found, or NULL if not.
1193  */
1194 struct btrfs_delayed_ref_head *
1195 btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1196 			    struct btrfs_delayed_ref_root *delayed_refs,
1197 			    u64 bytenr)
1198 {
1199 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1200 
1201 	lockdep_assert_held(&delayed_refs->lock);
1202 
1203 	return xa_load(&delayed_refs->head_refs, index);
1204 }
1205 
1206 static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
1207 {
1208 	int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
1209 
1210 	if (type < entry->type)
1211 		return -1;
1212 	if (type > entry->type)
1213 		return 1;
1214 
1215 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1216 		if (root < entry->ref_root)
1217 			return -1;
1218 		if (root > entry->ref_root)
1219 			return 1;
1220 	} else {
1221 		if (parent < entry->parent)
1222 			return -1;
1223 		if (parent > entry->parent)
1224 			return 1;
1225 	}
1226 	return 0;
1227 }
1228 
1229 /*
1230  * Check to see if a given root/parent reference is attached to the head.  This
1231  * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1232  * indicates the reference exists for the given root or parent.  This is for
1233  * tree blocks only.
1234  *
1235  * @head: the head of the bytenr we're searching.
1236  * @root: the root objectid of the reference if it is a normal reference.
1237  * @parent: the parent if this is a shared backref.
1238  */
1239 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1240 				 u64 root, u64 parent)
1241 {
1242 	struct rb_node *node;
1243 	bool found = false;
1244 
1245 	lockdep_assert_held(&head->mutex);
1246 
1247 	spin_lock(&head->lock);
1248 	node = head->ref_tree.rb_root.rb_node;
1249 	while (node) {
1250 		struct btrfs_delayed_ref_node *entry;
1251 		int ret;
1252 
1253 		entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1254 		ret = find_comp(entry, root, parent);
1255 		if (ret < 0) {
1256 			node = node->rb_left;
1257 		} else if (ret > 0) {
1258 			node = node->rb_right;
1259 		} else {
1260 			/*
1261 			 * We only want to count ADD actions, as drops mean the
1262 			 * ref doesn't exist.
1263 			 */
1264 			if (entry->action == BTRFS_ADD_DELAYED_REF)
1265 				found = true;
1266 			break;
1267 		}
1268 	}
1269 	spin_unlock(&head->lock);
1270 	return found;
1271 }
1272 
1273 void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
1274 {
1275 	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1276 	struct btrfs_fs_info *fs_info = trans->fs_info;
1277 
1278 	spin_lock(&delayed_refs->lock);
1279 	while (true) {
1280 		struct btrfs_delayed_ref_head *head;
1281 		struct rb_node *n;
1282 		bool pin_bytes = false;
1283 
1284 		head = find_first_ref_head(delayed_refs);
1285 		if (!head)
1286 			break;
1287 
1288 		if (!btrfs_delayed_ref_lock(delayed_refs, head))
1289 			continue;
1290 
1291 		spin_lock(&head->lock);
1292 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1293 			struct btrfs_delayed_ref_node *ref;
1294 
1295 			ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1296 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
1297 		}
1298 		if (head->must_insert_reserved)
1299 			pin_bytes = true;
1300 		btrfs_free_delayed_extent_op(head->extent_op);
1301 		btrfs_delete_ref_head(fs_info, delayed_refs, head);
1302 		spin_unlock(&head->lock);
1303 		spin_unlock(&delayed_refs->lock);
1304 		mutex_unlock(&head->mutex);
1305 
1306 		if (!btrfs_is_testing(fs_info) && pin_bytes) {
1307 			struct btrfs_block_group *bg;
1308 
1309 			bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1310 			if (WARN_ON_ONCE(bg == NULL)) {
1311 				/*
1312 				 * Unexpected and there's nothing we can do here
1313 				 * because we are in a transaction abort path,
1314 				 * so any errors can only be ignored or reported
1315 				 * while attempting to cleanup all resources.
1316 				 */
1317 				btrfs_err(fs_info,
1318 "block group for delayed ref at %llu was not found while destroying ref head",
1319 					  head->bytenr);
1320 			} else {
1321 				spin_lock(&bg->space_info->lock);
1322 				spin_lock(&bg->lock);
1323 				bg->pinned += head->num_bytes;
1324 				btrfs_space_info_update_bytes_pinned(bg->space_info,
1325 								     head->num_bytes);
1326 				bg->reserved -= head->num_bytes;
1327 				bg->space_info->bytes_reserved -= head->num_bytes;
1328 				spin_unlock(&bg->lock);
1329 				spin_unlock(&bg->space_info->lock);
1330 
1331 				btrfs_put_block_group(bg);
1332 			}
1333 
1334 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1335 				head->bytenr + head->num_bytes - 1);
1336 		}
1337 		if (!btrfs_is_testing(fs_info))
1338 			btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1339 		btrfs_put_delayed_ref_head(head);
1340 		cond_resched();
1341 		spin_lock(&delayed_refs->lock);
1342 	}
1343 
1344 	if (!btrfs_is_testing(fs_info))
1345 		btrfs_qgroup_destroy_extent_records(trans);
1346 
1347 	spin_unlock(&delayed_refs->lock);
1348 }
1349 
1350 void __cold btrfs_delayed_ref_exit(void)
1351 {
1352 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1353 	kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
1354 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1355 }
1356 
1357 int __init btrfs_delayed_ref_init(void)
1358 {
1359 	btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1360 	if (!btrfs_delayed_ref_head_cachep)
1361 		return -ENOMEM;
1362 
1363 	btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1364 	if (!btrfs_delayed_ref_node_cachep)
1365 		goto fail;
1366 
1367 	btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1368 	if (!btrfs_delayed_extent_op_cachep)
1369 		goto fail;
1370 
1371 	return 0;
1372 fail:
1373 	btrfs_delayed_ref_exit();
1374 	return -ENOMEM;
1375 }
1376