xref: /linux/fs/btrfs/delayed-ref.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 
26 /*
27  * delayed back reference update tracking.  For subvolume trees
28  * we queue up extent allocations and backref maintenance for
29  * delayed processing.   This avoids deep call chains where we
30  * add extents in the middle of btrfs_search_slot, and it allows
31  * us to buffer up frequently modified backrefs in an rb tree instead
32  * of hammering updates on the extent allocation tree.
33  */
34 
35 /*
36  * compare two delayed tree backrefs with same bytenr and type
37  */
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 			  struct btrfs_delayed_tree_ref *ref1)
40 {
41 	if (ref1->root < ref2->root)
42 		return -1;
43 	if (ref1->root > ref2->root)
44 		return 1;
45 	if (ref1->parent < ref2->parent)
46 		return -1;
47 	if (ref1->parent > ref2->parent)
48 		return 1;
49 	return 0;
50 }
51 
52 /*
53  * compare two delayed data backrefs with same bytenr and type
54  */
55 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
56 			  struct btrfs_delayed_data_ref *ref1)
57 {
58 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
59 		if (ref1->root < ref2->root)
60 			return -1;
61 		if (ref1->root > ref2->root)
62 			return 1;
63 		if (ref1->objectid < ref2->objectid)
64 			return -1;
65 		if (ref1->objectid > ref2->objectid)
66 			return 1;
67 		if (ref1->offset < ref2->offset)
68 			return -1;
69 		if (ref1->offset > ref2->offset)
70 			return 1;
71 	} else {
72 		if (ref1->parent < ref2->parent)
73 			return -1;
74 		if (ref1->parent > ref2->parent)
75 			return 1;
76 	}
77 	return 0;
78 }
79 
80 /*
81  * entries in the rb tree are ordered by the byte number of the extent,
82  * type of the delayed backrefs and content of delayed backrefs.
83  */
84 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
85 		      struct btrfs_delayed_ref_node *ref1,
86 		      bool compare_seq)
87 {
88 	if (ref1->bytenr < ref2->bytenr)
89 		return -1;
90 	if (ref1->bytenr > ref2->bytenr)
91 		return 1;
92 	if (ref1->is_head && ref2->is_head)
93 		return 0;
94 	if (ref2->is_head)
95 		return -1;
96 	if (ref1->is_head)
97 		return 1;
98 	if (ref1->type < ref2->type)
99 		return -1;
100 	if (ref1->type > ref2->type)
101 		return 1;
102 	/* merging of sequenced refs is not allowed */
103 	if (compare_seq) {
104 		if (ref1->seq < ref2->seq)
105 			return -1;
106 		if (ref1->seq > ref2->seq)
107 			return 1;
108 	}
109 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 				      btrfs_delayed_node_to_tree_ref(ref1));
113 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 				      btrfs_delayed_node_to_data_ref(ref1));
117 	}
118 	BUG();
119 	return 0;
120 }
121 
122 /*
123  * insert a new ref into the rbtree.  This returns any existing refs
124  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125  * inserted.
126  */
127 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 						  struct rb_node *node)
129 {
130 	struct rb_node **p = &root->rb_node;
131 	struct rb_node *parent_node = NULL;
132 	struct btrfs_delayed_ref_node *entry;
133 	struct btrfs_delayed_ref_node *ins;
134 	int cmp;
135 
136 	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
137 	while (*p) {
138 		parent_node = *p;
139 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 				 rb_node);
141 
142 		cmp = comp_entry(entry, ins, 1);
143 		if (cmp < 0)
144 			p = &(*p)->rb_left;
145 		else if (cmp > 0)
146 			p = &(*p)->rb_right;
147 		else
148 			return entry;
149 	}
150 
151 	rb_link_node(node, parent_node, p);
152 	rb_insert_color(node, root);
153 	return NULL;
154 }
155 
156 /*
157  * find an head entry based on bytenr. This returns the delayed ref
158  * head if it was able to find one, or NULL if nothing was in that spot.
159  * If return_bigger is given, the next bigger entry is returned if no exact
160  * match is found.
161  */
162 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163 				  u64 bytenr,
164 				  struct btrfs_delayed_ref_node **last,
165 				  int return_bigger)
166 {
167 	struct rb_node *n;
168 	struct btrfs_delayed_ref_node *entry;
169 	int cmp = 0;
170 
171 again:
172 	n = root->rb_node;
173 	entry = NULL;
174 	while (n) {
175 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 		WARN_ON(!entry->in_tree);
177 		if (last)
178 			*last = entry;
179 
180 		if (bytenr < entry->bytenr)
181 			cmp = -1;
182 		else if (bytenr > entry->bytenr)
183 			cmp = 1;
184 		else if (!btrfs_delayed_ref_is_head(entry))
185 			cmp = 1;
186 		else
187 			cmp = 0;
188 
189 		if (cmp < 0)
190 			n = n->rb_left;
191 		else if (cmp > 0)
192 			n = n->rb_right;
193 		else
194 			return entry;
195 	}
196 	if (entry && return_bigger) {
197 		if (cmp > 0) {
198 			n = rb_next(&entry->rb_node);
199 			if (!n)
200 				n = rb_first(root);
201 			entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 					 rb_node);
203 			bytenr = entry->bytenr;
204 			return_bigger = 0;
205 			goto again;
206 		}
207 		return entry;
208 	}
209 	return NULL;
210 }
211 
212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 			   struct btrfs_delayed_ref_head *head)
214 {
215 	struct btrfs_delayed_ref_root *delayed_refs;
216 
217 	delayed_refs = &trans->transaction->delayed_refs;
218 	assert_spin_locked(&delayed_refs->lock);
219 	if (mutex_trylock(&head->mutex))
220 		return 0;
221 
222 	atomic_inc(&head->node.refs);
223 	spin_unlock(&delayed_refs->lock);
224 
225 	mutex_lock(&head->mutex);
226 	spin_lock(&delayed_refs->lock);
227 	if (!head->node.in_tree) {
228 		mutex_unlock(&head->mutex);
229 		btrfs_put_delayed_ref(&head->node);
230 		return -EAGAIN;
231 	}
232 	btrfs_put_delayed_ref(&head->node);
233 	return 0;
234 }
235 
236 static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
237 				    struct btrfs_delayed_ref_root *delayed_refs,
238 				    struct btrfs_delayed_ref_node *ref)
239 {
240 	rb_erase(&ref->rb_node, &delayed_refs->root);
241 	ref->in_tree = 0;
242 	btrfs_put_delayed_ref(ref);
243 	delayed_refs->num_entries--;
244 	if (trans->delayed_ref_updates)
245 		trans->delayed_ref_updates--;
246 }
247 
248 static int merge_ref(struct btrfs_trans_handle *trans,
249 		     struct btrfs_delayed_ref_root *delayed_refs,
250 		     struct btrfs_delayed_ref_node *ref, u64 seq)
251 {
252 	struct rb_node *node;
253 	int merged = 0;
254 	int mod = 0;
255 	int done = 0;
256 
257 	node = rb_prev(&ref->rb_node);
258 	while (node) {
259 		struct btrfs_delayed_ref_node *next;
260 
261 		next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
262 		node = rb_prev(node);
263 		if (next->bytenr != ref->bytenr)
264 			break;
265 		if (seq && next->seq >= seq)
266 			break;
267 		if (comp_entry(ref, next, 0))
268 			continue;
269 
270 		if (ref->action == next->action) {
271 			mod = next->ref_mod;
272 		} else {
273 			if (ref->ref_mod < next->ref_mod) {
274 				struct btrfs_delayed_ref_node *tmp;
275 
276 				tmp = ref;
277 				ref = next;
278 				next = tmp;
279 				done = 1;
280 			}
281 			mod = -next->ref_mod;
282 		}
283 
284 		merged++;
285 		drop_delayed_ref(trans, delayed_refs, next);
286 		ref->ref_mod += mod;
287 		if (ref->ref_mod == 0) {
288 			drop_delayed_ref(trans, delayed_refs, ref);
289 			break;
290 		} else {
291 			/*
292 			 * You can't have multiples of the same ref on a tree
293 			 * block.
294 			 */
295 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297 		}
298 
299 		if (done)
300 			break;
301 		node = rb_prev(&ref->rb_node);
302 	}
303 
304 	return merged;
305 }
306 
307 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
308 			      struct btrfs_fs_info *fs_info,
309 			      struct btrfs_delayed_ref_root *delayed_refs,
310 			      struct btrfs_delayed_ref_head *head)
311 {
312 	struct rb_node *node;
313 	u64 seq = 0;
314 
315 	spin_lock(&fs_info->tree_mod_seq_lock);
316 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
317 		struct seq_list *elem;
318 
319 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
320 					struct seq_list, list);
321 		seq = elem->seq;
322 	}
323 	spin_unlock(&fs_info->tree_mod_seq_lock);
324 
325 	node = rb_prev(&head->node.rb_node);
326 	while (node) {
327 		struct btrfs_delayed_ref_node *ref;
328 
329 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
330 			       rb_node);
331 		if (ref->bytenr != head->node.bytenr)
332 			break;
333 
334 		/* We can't merge refs that are outside of our seq count */
335 		if (seq && ref->seq >= seq)
336 			break;
337 		if (merge_ref(trans, delayed_refs, ref, seq))
338 			node = rb_prev(&head->node.rb_node);
339 		else
340 			node = rb_prev(node);
341 	}
342 }
343 
344 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
345 			    struct btrfs_delayed_ref_root *delayed_refs,
346 			    u64 seq)
347 {
348 	struct seq_list *elem;
349 	int ret = 0;
350 
351 	spin_lock(&fs_info->tree_mod_seq_lock);
352 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
353 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
354 					struct seq_list, list);
355 		if (seq >= elem->seq) {
356 			pr_debug("holding back delayed_ref %llu, lowest is "
357 				 "%llu (%p)\n", seq, elem->seq, delayed_refs);
358 			ret = 1;
359 		}
360 	}
361 
362 	spin_unlock(&fs_info->tree_mod_seq_lock);
363 	return ret;
364 }
365 
366 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
367 			   struct list_head *cluster, u64 start)
368 {
369 	int count = 0;
370 	struct btrfs_delayed_ref_root *delayed_refs;
371 	struct rb_node *node;
372 	struct btrfs_delayed_ref_node *ref;
373 	struct btrfs_delayed_ref_head *head;
374 
375 	delayed_refs = &trans->transaction->delayed_refs;
376 	if (start == 0) {
377 		node = rb_first(&delayed_refs->root);
378 	} else {
379 		ref = NULL;
380 		find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
381 		if (ref) {
382 			node = &ref->rb_node;
383 		} else
384 			node = rb_first(&delayed_refs->root);
385 	}
386 again:
387 	while (node && count < 32) {
388 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
389 		if (btrfs_delayed_ref_is_head(ref)) {
390 			head = btrfs_delayed_node_to_head(ref);
391 			if (list_empty(&head->cluster)) {
392 				list_add_tail(&head->cluster, cluster);
393 				delayed_refs->run_delayed_start =
394 					head->node.bytenr;
395 				count++;
396 
397 				WARN_ON(delayed_refs->num_heads_ready == 0);
398 				delayed_refs->num_heads_ready--;
399 			} else if (count) {
400 				/* the goal of the clustering is to find extents
401 				 * that are likely to end up in the same extent
402 				 * leaf on disk.  So, we don't want them spread
403 				 * all over the tree.  Stop now if we've hit
404 				 * a head that was already in use
405 				 */
406 				break;
407 			}
408 		}
409 		node = rb_next(node);
410 	}
411 	if (count) {
412 		return 0;
413 	} else if (start) {
414 		/*
415 		 * we've gone to the end of the rbtree without finding any
416 		 * clusters.  start from the beginning and try again
417 		 */
418 		start = 0;
419 		node = rb_first(&delayed_refs->root);
420 		goto again;
421 	}
422 	return 1;
423 }
424 
425 /*
426  * helper function to update an extent delayed ref in the
427  * rbtree.  existing and update must both have the same
428  * bytenr and parent
429  *
430  * This may free existing if the update cancels out whatever
431  * operation it was doing.
432  */
433 static noinline void
434 update_existing_ref(struct btrfs_trans_handle *trans,
435 		    struct btrfs_delayed_ref_root *delayed_refs,
436 		    struct btrfs_delayed_ref_node *existing,
437 		    struct btrfs_delayed_ref_node *update)
438 {
439 	if (update->action != existing->action) {
440 		/*
441 		 * this is effectively undoing either an add or a
442 		 * drop.  We decrement the ref_mod, and if it goes
443 		 * down to zero we just delete the entry without
444 		 * every changing the extent allocation tree.
445 		 */
446 		existing->ref_mod--;
447 		if (existing->ref_mod == 0)
448 			drop_delayed_ref(trans, delayed_refs, existing);
449 		else
450 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
451 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
452 	} else {
453 		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
454 			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
455 		/*
456 		 * the action on the existing ref matches
457 		 * the action on the ref we're trying to add.
458 		 * Bump the ref_mod by one so the backref that
459 		 * is eventually added/removed has the correct
460 		 * reference count
461 		 */
462 		existing->ref_mod += update->ref_mod;
463 	}
464 }
465 
466 /*
467  * helper function to update the accounting in the head ref
468  * existing and update must have the same bytenr
469  */
470 static noinline void
471 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
472 			 struct btrfs_delayed_ref_node *update)
473 {
474 	struct btrfs_delayed_ref_head *existing_ref;
475 	struct btrfs_delayed_ref_head *ref;
476 
477 	existing_ref = btrfs_delayed_node_to_head(existing);
478 	ref = btrfs_delayed_node_to_head(update);
479 	BUG_ON(existing_ref->is_data != ref->is_data);
480 
481 	if (ref->must_insert_reserved) {
482 		/* if the extent was freed and then
483 		 * reallocated before the delayed ref
484 		 * entries were processed, we can end up
485 		 * with an existing head ref without
486 		 * the must_insert_reserved flag set.
487 		 * Set it again here
488 		 */
489 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
490 
491 		/*
492 		 * update the num_bytes so we make sure the accounting
493 		 * is done correctly
494 		 */
495 		existing->num_bytes = update->num_bytes;
496 
497 	}
498 
499 	if (ref->extent_op) {
500 		if (!existing_ref->extent_op) {
501 			existing_ref->extent_op = ref->extent_op;
502 		} else {
503 			if (ref->extent_op->update_key) {
504 				memcpy(&existing_ref->extent_op->key,
505 				       &ref->extent_op->key,
506 				       sizeof(ref->extent_op->key));
507 				existing_ref->extent_op->update_key = 1;
508 			}
509 			if (ref->extent_op->update_flags) {
510 				existing_ref->extent_op->flags_to_set |=
511 					ref->extent_op->flags_to_set;
512 				existing_ref->extent_op->update_flags = 1;
513 			}
514 			kfree(ref->extent_op);
515 		}
516 	}
517 	/*
518 	 * update the reference mod on the head to reflect this new operation
519 	 */
520 	existing->ref_mod += update->ref_mod;
521 }
522 
523 /*
524  * helper function to actually insert a head node into the rbtree.
525  * this does all the dirty work in terms of maintaining the correct
526  * overall modification count.
527  */
528 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
529 					struct btrfs_trans_handle *trans,
530 					struct btrfs_delayed_ref_node *ref,
531 					u64 bytenr, u64 num_bytes,
532 					int action, int is_data)
533 {
534 	struct btrfs_delayed_ref_node *existing;
535 	struct btrfs_delayed_ref_head *head_ref = NULL;
536 	struct btrfs_delayed_ref_root *delayed_refs;
537 	int count_mod = 1;
538 	int must_insert_reserved = 0;
539 
540 	/*
541 	 * the head node stores the sum of all the mods, so dropping a ref
542 	 * should drop the sum in the head node by one.
543 	 */
544 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
545 		count_mod = 0;
546 	else if (action == BTRFS_DROP_DELAYED_REF)
547 		count_mod = -1;
548 
549 	/*
550 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
551 	 * the reserved accounting when the extent is finally added, or
552 	 * if a later modification deletes the delayed ref without ever
553 	 * inserting the extent into the extent allocation tree.
554 	 * ref->must_insert_reserved is the flag used to record
555 	 * that accounting mods are required.
556 	 *
557 	 * Once we record must_insert_reserved, switch the action to
558 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
559 	 */
560 	if (action == BTRFS_ADD_DELAYED_EXTENT)
561 		must_insert_reserved = 1;
562 	else
563 		must_insert_reserved = 0;
564 
565 	delayed_refs = &trans->transaction->delayed_refs;
566 
567 	/* first set the basic ref node struct up */
568 	atomic_set(&ref->refs, 1);
569 	ref->bytenr = bytenr;
570 	ref->num_bytes = num_bytes;
571 	ref->ref_mod = count_mod;
572 	ref->type  = 0;
573 	ref->action  = 0;
574 	ref->is_head = 1;
575 	ref->in_tree = 1;
576 	ref->seq = 0;
577 
578 	head_ref = btrfs_delayed_node_to_head(ref);
579 	head_ref->must_insert_reserved = must_insert_reserved;
580 	head_ref->is_data = is_data;
581 
582 	INIT_LIST_HEAD(&head_ref->cluster);
583 	mutex_init(&head_ref->mutex);
584 
585 	trace_btrfs_delayed_ref_head(ref, head_ref, action);
586 
587 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
588 
589 	if (existing) {
590 		update_existing_head_ref(existing, ref);
591 		/*
592 		 * we've updated the existing ref, free the newly
593 		 * allocated ref
594 		 */
595 		kfree(head_ref);
596 	} else {
597 		delayed_refs->num_heads++;
598 		delayed_refs->num_heads_ready++;
599 		delayed_refs->num_entries++;
600 		trans->delayed_ref_updates++;
601 	}
602 }
603 
604 /*
605  * helper to insert a delayed tree ref into the rbtree.
606  */
607 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
608 					 struct btrfs_trans_handle *trans,
609 					 struct btrfs_delayed_ref_node *ref,
610 					 u64 bytenr, u64 num_bytes, u64 parent,
611 					 u64 ref_root, int level, int action,
612 					 int for_cow)
613 {
614 	struct btrfs_delayed_ref_node *existing;
615 	struct btrfs_delayed_tree_ref *full_ref;
616 	struct btrfs_delayed_ref_root *delayed_refs;
617 	u64 seq = 0;
618 
619 	if (action == BTRFS_ADD_DELAYED_EXTENT)
620 		action = BTRFS_ADD_DELAYED_REF;
621 
622 	delayed_refs = &trans->transaction->delayed_refs;
623 
624 	/* first set the basic ref node struct up */
625 	atomic_set(&ref->refs, 1);
626 	ref->bytenr = bytenr;
627 	ref->num_bytes = num_bytes;
628 	ref->ref_mod = 1;
629 	ref->action = action;
630 	ref->is_head = 0;
631 	ref->in_tree = 1;
632 
633 	if (need_ref_seq(for_cow, ref_root))
634 		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
635 	ref->seq = seq;
636 
637 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
638 	full_ref->parent = parent;
639 	full_ref->root = ref_root;
640 	if (parent)
641 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
642 	else
643 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
644 	full_ref->level = level;
645 
646 	trace_btrfs_delayed_tree_ref(ref, full_ref, action);
647 
648 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
649 
650 	if (existing) {
651 		update_existing_ref(trans, delayed_refs, existing, ref);
652 		/*
653 		 * we've updated the existing ref, free the newly
654 		 * allocated ref
655 		 */
656 		kfree(full_ref);
657 	} else {
658 		delayed_refs->num_entries++;
659 		trans->delayed_ref_updates++;
660 	}
661 }
662 
663 /*
664  * helper to insert a delayed data ref into the rbtree.
665  */
666 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
667 					 struct btrfs_trans_handle *trans,
668 					 struct btrfs_delayed_ref_node *ref,
669 					 u64 bytenr, u64 num_bytes, u64 parent,
670 					 u64 ref_root, u64 owner, u64 offset,
671 					 int action, int for_cow)
672 {
673 	struct btrfs_delayed_ref_node *existing;
674 	struct btrfs_delayed_data_ref *full_ref;
675 	struct btrfs_delayed_ref_root *delayed_refs;
676 	u64 seq = 0;
677 
678 	if (action == BTRFS_ADD_DELAYED_EXTENT)
679 		action = BTRFS_ADD_DELAYED_REF;
680 
681 	delayed_refs = &trans->transaction->delayed_refs;
682 
683 	/* first set the basic ref node struct up */
684 	atomic_set(&ref->refs, 1);
685 	ref->bytenr = bytenr;
686 	ref->num_bytes = num_bytes;
687 	ref->ref_mod = 1;
688 	ref->action = action;
689 	ref->is_head = 0;
690 	ref->in_tree = 1;
691 
692 	if (need_ref_seq(for_cow, ref_root))
693 		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
694 	ref->seq = seq;
695 
696 	full_ref = btrfs_delayed_node_to_data_ref(ref);
697 	full_ref->parent = parent;
698 	full_ref->root = ref_root;
699 	if (parent)
700 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
701 	else
702 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
703 
704 	full_ref->objectid = owner;
705 	full_ref->offset = offset;
706 
707 	trace_btrfs_delayed_data_ref(ref, full_ref, action);
708 
709 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
710 
711 	if (existing) {
712 		update_existing_ref(trans, delayed_refs, existing, ref);
713 		/*
714 		 * we've updated the existing ref, free the newly
715 		 * allocated ref
716 		 */
717 		kfree(full_ref);
718 	} else {
719 		delayed_refs->num_entries++;
720 		trans->delayed_ref_updates++;
721 	}
722 }
723 
724 /*
725  * add a delayed tree ref.  This does all of the accounting required
726  * to make sure the delayed ref is eventually processed before this
727  * transaction commits.
728  */
729 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
730 			       struct btrfs_trans_handle *trans,
731 			       u64 bytenr, u64 num_bytes, u64 parent,
732 			       u64 ref_root,  int level, int action,
733 			       struct btrfs_delayed_extent_op *extent_op,
734 			       int for_cow)
735 {
736 	struct btrfs_delayed_tree_ref *ref;
737 	struct btrfs_delayed_ref_head *head_ref;
738 	struct btrfs_delayed_ref_root *delayed_refs;
739 
740 	BUG_ON(extent_op && extent_op->is_data);
741 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
742 	if (!ref)
743 		return -ENOMEM;
744 
745 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
746 	if (!head_ref) {
747 		kfree(ref);
748 		return -ENOMEM;
749 	}
750 
751 	head_ref->extent_op = extent_op;
752 
753 	delayed_refs = &trans->transaction->delayed_refs;
754 	spin_lock(&delayed_refs->lock);
755 
756 	/*
757 	 * insert both the head node and the new ref without dropping
758 	 * the spin lock
759 	 */
760 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
761 				   num_bytes, action, 0);
762 
763 	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
764 				   num_bytes, parent, ref_root, level, action,
765 				   for_cow);
766 	spin_unlock(&delayed_refs->lock);
767 	if (need_ref_seq(for_cow, ref_root))
768 		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
769 
770 	return 0;
771 }
772 
773 /*
774  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
775  */
776 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
777 			       struct btrfs_trans_handle *trans,
778 			       u64 bytenr, u64 num_bytes,
779 			       u64 parent, u64 ref_root,
780 			       u64 owner, u64 offset, int action,
781 			       struct btrfs_delayed_extent_op *extent_op,
782 			       int for_cow)
783 {
784 	struct btrfs_delayed_data_ref *ref;
785 	struct btrfs_delayed_ref_head *head_ref;
786 	struct btrfs_delayed_ref_root *delayed_refs;
787 
788 	BUG_ON(extent_op && !extent_op->is_data);
789 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
790 	if (!ref)
791 		return -ENOMEM;
792 
793 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
794 	if (!head_ref) {
795 		kfree(ref);
796 		return -ENOMEM;
797 	}
798 
799 	head_ref->extent_op = extent_op;
800 
801 	delayed_refs = &trans->transaction->delayed_refs;
802 	spin_lock(&delayed_refs->lock);
803 
804 	/*
805 	 * insert both the head node and the new ref without dropping
806 	 * the spin lock
807 	 */
808 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
809 				   num_bytes, action, 1);
810 
811 	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
812 				   num_bytes, parent, ref_root, owner, offset,
813 				   action, for_cow);
814 	spin_unlock(&delayed_refs->lock);
815 	if (need_ref_seq(for_cow, ref_root))
816 		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
817 
818 	return 0;
819 }
820 
821 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
822 				struct btrfs_trans_handle *trans,
823 				u64 bytenr, u64 num_bytes,
824 				struct btrfs_delayed_extent_op *extent_op)
825 {
826 	struct btrfs_delayed_ref_head *head_ref;
827 	struct btrfs_delayed_ref_root *delayed_refs;
828 
829 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
830 	if (!head_ref)
831 		return -ENOMEM;
832 
833 	head_ref->extent_op = extent_op;
834 
835 	delayed_refs = &trans->transaction->delayed_refs;
836 	spin_lock(&delayed_refs->lock);
837 
838 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
839 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
840 				   extent_op->is_data);
841 
842 	spin_unlock(&delayed_refs->lock);
843 	return 0;
844 }
845 
846 /*
847  * this does a simple search for the head node for a given extent.
848  * It must be called with the delayed ref spinlock held, and it returns
849  * the head node if any where found, or NULL if not.
850  */
851 struct btrfs_delayed_ref_head *
852 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
853 {
854 	struct btrfs_delayed_ref_node *ref;
855 	struct btrfs_delayed_ref_root *delayed_refs;
856 
857 	delayed_refs = &trans->transaction->delayed_refs;
858 	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
859 	if (ref)
860 		return btrfs_delayed_node_to_head(ref);
861 	return NULL;
862 }
863