xref: /linux/fs/btrfs/delayed-ref.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 
26 /*
27  * delayed back reference update tracking.  For subvolume trees
28  * we queue up extent allocations and backref maintenance for
29  * delayed processing.   This avoids deep call chains where we
30  * add extents in the middle of btrfs_search_slot, and it allows
31  * us to buffer up frequently modified backrefs in an rb tree instead
32  * of hammering updates on the extent allocation tree.
33  */
34 
35 /*
36  * compare two delayed tree backrefs with same bytenr and type
37  */
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 			  struct btrfs_delayed_tree_ref *ref1)
40 {
41 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 		if (ref1->root < ref2->root)
43 			return -1;
44 		if (ref1->root > ref2->root)
45 			return 1;
46 	} else {
47 		if (ref1->parent < ref2->parent)
48 			return -1;
49 		if (ref1->parent > ref2->parent)
50 			return 1;
51 	}
52 	return 0;
53 }
54 
55 /*
56  * compare two delayed data backrefs with same bytenr and type
57  */
58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 			  struct btrfs_delayed_data_ref *ref1)
60 {
61 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 		if (ref1->root < ref2->root)
63 			return -1;
64 		if (ref1->root > ref2->root)
65 			return 1;
66 		if (ref1->objectid < ref2->objectid)
67 			return -1;
68 		if (ref1->objectid > ref2->objectid)
69 			return 1;
70 		if (ref1->offset < ref2->offset)
71 			return -1;
72 		if (ref1->offset > ref2->offset)
73 			return 1;
74 	} else {
75 		if (ref1->parent < ref2->parent)
76 			return -1;
77 		if (ref1->parent > ref2->parent)
78 			return 1;
79 	}
80 	return 0;
81 }
82 
83 /*
84  * entries in the rb tree are ordered by the byte number of the extent,
85  * type of the delayed backrefs and content of delayed backrefs.
86  */
87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 		      struct btrfs_delayed_ref_node *ref1)
89 {
90 	if (ref1->bytenr < ref2->bytenr)
91 		return -1;
92 	if (ref1->bytenr > ref2->bytenr)
93 		return 1;
94 	if (ref1->is_head && ref2->is_head)
95 		return 0;
96 	if (ref2->is_head)
97 		return -1;
98 	if (ref1->is_head)
99 		return 1;
100 	if (ref1->type < ref2->type)
101 		return -1;
102 	if (ref1->type > ref2->type)
103 		return 1;
104 	/* merging of sequenced refs is not allowed */
105 	if (ref1->seq < ref2->seq)
106 		return -1;
107 	if (ref1->seq > ref2->seq)
108 		return 1;
109 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 				      btrfs_delayed_node_to_tree_ref(ref1));
113 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 				      btrfs_delayed_node_to_data_ref(ref1));
117 	}
118 	BUG();
119 	return 0;
120 }
121 
122 /*
123  * insert a new ref into the rbtree.  This returns any existing refs
124  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125  * inserted.
126  */
127 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 						  struct rb_node *node)
129 {
130 	struct rb_node **p = &root->rb_node;
131 	struct rb_node *parent_node = NULL;
132 	struct btrfs_delayed_ref_node *entry;
133 	struct btrfs_delayed_ref_node *ins;
134 	int cmp;
135 
136 	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
137 	while (*p) {
138 		parent_node = *p;
139 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 				 rb_node);
141 
142 		cmp = comp_entry(entry, ins);
143 		if (cmp < 0)
144 			p = &(*p)->rb_left;
145 		else if (cmp > 0)
146 			p = &(*p)->rb_right;
147 		else
148 			return entry;
149 	}
150 
151 	rb_link_node(node, parent_node, p);
152 	rb_insert_color(node, root);
153 	return NULL;
154 }
155 
156 /*
157  * find an head entry based on bytenr. This returns the delayed ref
158  * head if it was able to find one, or NULL if nothing was in that spot.
159  * If return_bigger is given, the next bigger entry is returned if no exact
160  * match is found.
161  */
162 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163 				  u64 bytenr,
164 				  struct btrfs_delayed_ref_node **last,
165 				  int return_bigger)
166 {
167 	struct rb_node *n;
168 	struct btrfs_delayed_ref_node *entry;
169 	int cmp = 0;
170 
171 again:
172 	n = root->rb_node;
173 	entry = NULL;
174 	while (n) {
175 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 		WARN_ON(!entry->in_tree);
177 		if (last)
178 			*last = entry;
179 
180 		if (bytenr < entry->bytenr)
181 			cmp = -1;
182 		else if (bytenr > entry->bytenr)
183 			cmp = 1;
184 		else if (!btrfs_delayed_ref_is_head(entry))
185 			cmp = 1;
186 		else
187 			cmp = 0;
188 
189 		if (cmp < 0)
190 			n = n->rb_left;
191 		else if (cmp > 0)
192 			n = n->rb_right;
193 		else
194 			return entry;
195 	}
196 	if (entry && return_bigger) {
197 		if (cmp > 0) {
198 			n = rb_next(&entry->rb_node);
199 			if (!n)
200 				n = rb_first(root);
201 			entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 					 rb_node);
203 			bytenr = entry->bytenr;
204 			return_bigger = 0;
205 			goto again;
206 		}
207 		return entry;
208 	}
209 	return NULL;
210 }
211 
212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 			   struct btrfs_delayed_ref_head *head)
214 {
215 	struct btrfs_delayed_ref_root *delayed_refs;
216 
217 	delayed_refs = &trans->transaction->delayed_refs;
218 	assert_spin_locked(&delayed_refs->lock);
219 	if (mutex_trylock(&head->mutex))
220 		return 0;
221 
222 	atomic_inc(&head->node.refs);
223 	spin_unlock(&delayed_refs->lock);
224 
225 	mutex_lock(&head->mutex);
226 	spin_lock(&delayed_refs->lock);
227 	if (!head->node.in_tree) {
228 		mutex_unlock(&head->mutex);
229 		btrfs_put_delayed_ref(&head->node);
230 		return -EAGAIN;
231 	}
232 	btrfs_put_delayed_ref(&head->node);
233 	return 0;
234 }
235 
236 int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
237 			    u64 seq)
238 {
239 	struct seq_list *elem;
240 
241 	assert_spin_locked(&delayed_refs->lock);
242 	if (list_empty(&delayed_refs->seq_head))
243 		return 0;
244 
245 	elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
246 	if (seq >= elem->seq) {
247 		pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
248 			 seq, elem->seq, delayed_refs);
249 		return 1;
250 	}
251 	return 0;
252 }
253 
254 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
255 			   struct list_head *cluster, u64 start)
256 {
257 	int count = 0;
258 	struct btrfs_delayed_ref_root *delayed_refs;
259 	struct rb_node *node;
260 	struct btrfs_delayed_ref_node *ref;
261 	struct btrfs_delayed_ref_head *head;
262 
263 	delayed_refs = &trans->transaction->delayed_refs;
264 	if (start == 0) {
265 		node = rb_first(&delayed_refs->root);
266 	} else {
267 		ref = NULL;
268 		find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
269 		if (ref) {
270 			node = &ref->rb_node;
271 		} else
272 			node = rb_first(&delayed_refs->root);
273 	}
274 again:
275 	while (node && count < 32) {
276 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
277 		if (btrfs_delayed_ref_is_head(ref)) {
278 			head = btrfs_delayed_node_to_head(ref);
279 			if (list_empty(&head->cluster)) {
280 				list_add_tail(&head->cluster, cluster);
281 				delayed_refs->run_delayed_start =
282 					head->node.bytenr;
283 				count++;
284 
285 				WARN_ON(delayed_refs->num_heads_ready == 0);
286 				delayed_refs->num_heads_ready--;
287 			} else if (count) {
288 				/* the goal of the clustering is to find extents
289 				 * that are likely to end up in the same extent
290 				 * leaf on disk.  So, we don't want them spread
291 				 * all over the tree.  Stop now if we've hit
292 				 * a head that was already in use
293 				 */
294 				break;
295 			}
296 		}
297 		node = rb_next(node);
298 	}
299 	if (count) {
300 		return 0;
301 	} else if (start) {
302 		/*
303 		 * we've gone to the end of the rbtree without finding any
304 		 * clusters.  start from the beginning and try again
305 		 */
306 		start = 0;
307 		node = rb_first(&delayed_refs->root);
308 		goto again;
309 	}
310 	return 1;
311 }
312 
313 /*
314  * helper function to update an extent delayed ref in the
315  * rbtree.  existing and update must both have the same
316  * bytenr and parent
317  *
318  * This may free existing if the update cancels out whatever
319  * operation it was doing.
320  */
321 static noinline void
322 update_existing_ref(struct btrfs_trans_handle *trans,
323 		    struct btrfs_delayed_ref_root *delayed_refs,
324 		    struct btrfs_delayed_ref_node *existing,
325 		    struct btrfs_delayed_ref_node *update)
326 {
327 	if (update->action != existing->action) {
328 		/*
329 		 * this is effectively undoing either an add or a
330 		 * drop.  We decrement the ref_mod, and if it goes
331 		 * down to zero we just delete the entry without
332 		 * every changing the extent allocation tree.
333 		 */
334 		existing->ref_mod--;
335 		if (existing->ref_mod == 0) {
336 			rb_erase(&existing->rb_node,
337 				 &delayed_refs->root);
338 			existing->in_tree = 0;
339 			btrfs_put_delayed_ref(existing);
340 			delayed_refs->num_entries--;
341 			if (trans->delayed_ref_updates)
342 				trans->delayed_ref_updates--;
343 		} else {
344 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
345 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
346 		}
347 	} else {
348 		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350 		/*
351 		 * the action on the existing ref matches
352 		 * the action on the ref we're trying to add.
353 		 * Bump the ref_mod by one so the backref that
354 		 * is eventually added/removed has the correct
355 		 * reference count
356 		 */
357 		existing->ref_mod += update->ref_mod;
358 	}
359 }
360 
361 /*
362  * helper function to update the accounting in the head ref
363  * existing and update must have the same bytenr
364  */
365 static noinline void
366 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
367 			 struct btrfs_delayed_ref_node *update)
368 {
369 	struct btrfs_delayed_ref_head *existing_ref;
370 	struct btrfs_delayed_ref_head *ref;
371 
372 	existing_ref = btrfs_delayed_node_to_head(existing);
373 	ref = btrfs_delayed_node_to_head(update);
374 	BUG_ON(existing_ref->is_data != ref->is_data);
375 
376 	if (ref->must_insert_reserved) {
377 		/* if the extent was freed and then
378 		 * reallocated before the delayed ref
379 		 * entries were processed, we can end up
380 		 * with an existing head ref without
381 		 * the must_insert_reserved flag set.
382 		 * Set it again here
383 		 */
384 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
385 
386 		/*
387 		 * update the num_bytes so we make sure the accounting
388 		 * is done correctly
389 		 */
390 		existing->num_bytes = update->num_bytes;
391 
392 	}
393 
394 	if (ref->extent_op) {
395 		if (!existing_ref->extent_op) {
396 			existing_ref->extent_op = ref->extent_op;
397 		} else {
398 			if (ref->extent_op->update_key) {
399 				memcpy(&existing_ref->extent_op->key,
400 				       &ref->extent_op->key,
401 				       sizeof(ref->extent_op->key));
402 				existing_ref->extent_op->update_key = 1;
403 			}
404 			if (ref->extent_op->update_flags) {
405 				existing_ref->extent_op->flags_to_set |=
406 					ref->extent_op->flags_to_set;
407 				existing_ref->extent_op->update_flags = 1;
408 			}
409 			kfree(ref->extent_op);
410 		}
411 	}
412 	/*
413 	 * update the reference mod on the head to reflect this new operation
414 	 */
415 	existing->ref_mod += update->ref_mod;
416 }
417 
418 /*
419  * helper function to actually insert a head node into the rbtree.
420  * this does all the dirty work in terms of maintaining the correct
421  * overall modification count.
422  */
423 static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 					struct btrfs_trans_handle *trans,
425 					struct btrfs_delayed_ref_node *ref,
426 					u64 bytenr, u64 num_bytes,
427 					int action, int is_data)
428 {
429 	struct btrfs_delayed_ref_node *existing;
430 	struct btrfs_delayed_ref_head *head_ref = NULL;
431 	struct btrfs_delayed_ref_root *delayed_refs;
432 	int count_mod = 1;
433 	int must_insert_reserved = 0;
434 
435 	/*
436 	 * the head node stores the sum of all the mods, so dropping a ref
437 	 * should drop the sum in the head node by one.
438 	 */
439 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
440 		count_mod = 0;
441 	else if (action == BTRFS_DROP_DELAYED_REF)
442 		count_mod = -1;
443 
444 	/*
445 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446 	 * the reserved accounting when the extent is finally added, or
447 	 * if a later modification deletes the delayed ref without ever
448 	 * inserting the extent into the extent allocation tree.
449 	 * ref->must_insert_reserved is the flag used to record
450 	 * that accounting mods are required.
451 	 *
452 	 * Once we record must_insert_reserved, switch the action to
453 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
454 	 */
455 	if (action == BTRFS_ADD_DELAYED_EXTENT)
456 		must_insert_reserved = 1;
457 	else
458 		must_insert_reserved = 0;
459 
460 	delayed_refs = &trans->transaction->delayed_refs;
461 
462 	/* first set the basic ref node struct up */
463 	atomic_set(&ref->refs, 1);
464 	ref->bytenr = bytenr;
465 	ref->num_bytes = num_bytes;
466 	ref->ref_mod = count_mod;
467 	ref->type  = 0;
468 	ref->action  = 0;
469 	ref->is_head = 1;
470 	ref->in_tree = 1;
471 	ref->seq = 0;
472 
473 	head_ref = btrfs_delayed_node_to_head(ref);
474 	head_ref->must_insert_reserved = must_insert_reserved;
475 	head_ref->is_data = is_data;
476 
477 	INIT_LIST_HEAD(&head_ref->cluster);
478 	mutex_init(&head_ref->mutex);
479 
480 	trace_btrfs_delayed_ref_head(ref, head_ref, action);
481 
482 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
483 
484 	if (existing) {
485 		update_existing_head_ref(existing, ref);
486 		/*
487 		 * we've updated the existing ref, free the newly
488 		 * allocated ref
489 		 */
490 		kfree(ref);
491 	} else {
492 		delayed_refs->num_heads++;
493 		delayed_refs->num_heads_ready++;
494 		delayed_refs->num_entries++;
495 		trans->delayed_ref_updates++;
496 	}
497 	return 0;
498 }
499 
500 /*
501  * helper to insert a delayed tree ref into the rbtree.
502  */
503 static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
504 					 struct btrfs_trans_handle *trans,
505 					 struct btrfs_delayed_ref_node *ref,
506 					 u64 bytenr, u64 num_bytes, u64 parent,
507 					 u64 ref_root, int level, int action,
508 					 int for_cow)
509 {
510 	struct btrfs_delayed_ref_node *existing;
511 	struct btrfs_delayed_tree_ref *full_ref;
512 	struct btrfs_delayed_ref_root *delayed_refs;
513 	u64 seq = 0;
514 
515 	if (action == BTRFS_ADD_DELAYED_EXTENT)
516 		action = BTRFS_ADD_DELAYED_REF;
517 
518 	delayed_refs = &trans->transaction->delayed_refs;
519 
520 	/* first set the basic ref node struct up */
521 	atomic_set(&ref->refs, 1);
522 	ref->bytenr = bytenr;
523 	ref->num_bytes = num_bytes;
524 	ref->ref_mod = 1;
525 	ref->action = action;
526 	ref->is_head = 0;
527 	ref->in_tree = 1;
528 
529 	if (need_ref_seq(for_cow, ref_root))
530 		seq = inc_delayed_seq(delayed_refs);
531 	ref->seq = seq;
532 
533 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
534 	full_ref->parent = parent;
535 	full_ref->root = ref_root;
536 	if (parent)
537 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
538 	else
539 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
540 	full_ref->level = level;
541 
542 	trace_btrfs_delayed_tree_ref(ref, full_ref, action);
543 
544 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
545 
546 	if (existing) {
547 		update_existing_ref(trans, delayed_refs, existing, ref);
548 		/*
549 		 * we've updated the existing ref, free the newly
550 		 * allocated ref
551 		 */
552 		kfree(ref);
553 	} else {
554 		delayed_refs->num_entries++;
555 		trans->delayed_ref_updates++;
556 	}
557 	return 0;
558 }
559 
560 /*
561  * helper to insert a delayed data ref into the rbtree.
562  */
563 static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
564 					 struct btrfs_trans_handle *trans,
565 					 struct btrfs_delayed_ref_node *ref,
566 					 u64 bytenr, u64 num_bytes, u64 parent,
567 					 u64 ref_root, u64 owner, u64 offset,
568 					 int action, int for_cow)
569 {
570 	struct btrfs_delayed_ref_node *existing;
571 	struct btrfs_delayed_data_ref *full_ref;
572 	struct btrfs_delayed_ref_root *delayed_refs;
573 	u64 seq = 0;
574 
575 	if (action == BTRFS_ADD_DELAYED_EXTENT)
576 		action = BTRFS_ADD_DELAYED_REF;
577 
578 	delayed_refs = &trans->transaction->delayed_refs;
579 
580 	/* first set the basic ref node struct up */
581 	atomic_set(&ref->refs, 1);
582 	ref->bytenr = bytenr;
583 	ref->num_bytes = num_bytes;
584 	ref->ref_mod = 1;
585 	ref->action = action;
586 	ref->is_head = 0;
587 	ref->in_tree = 1;
588 
589 	if (need_ref_seq(for_cow, ref_root))
590 		seq = inc_delayed_seq(delayed_refs);
591 	ref->seq = seq;
592 
593 	full_ref = btrfs_delayed_node_to_data_ref(ref);
594 	full_ref->parent = parent;
595 	full_ref->root = ref_root;
596 	if (parent)
597 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
598 	else
599 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
600 
601 	full_ref->objectid = owner;
602 	full_ref->offset = offset;
603 
604 	trace_btrfs_delayed_data_ref(ref, full_ref, action);
605 
606 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
607 
608 	if (existing) {
609 		update_existing_ref(trans, delayed_refs, existing, ref);
610 		/*
611 		 * we've updated the existing ref, free the newly
612 		 * allocated ref
613 		 */
614 		kfree(ref);
615 	} else {
616 		delayed_refs->num_entries++;
617 		trans->delayed_ref_updates++;
618 	}
619 	return 0;
620 }
621 
622 /*
623  * add a delayed tree ref.  This does all of the accounting required
624  * to make sure the delayed ref is eventually processed before this
625  * transaction commits.
626  */
627 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
628 			       struct btrfs_trans_handle *trans,
629 			       u64 bytenr, u64 num_bytes, u64 parent,
630 			       u64 ref_root,  int level, int action,
631 			       struct btrfs_delayed_extent_op *extent_op,
632 			       int for_cow)
633 {
634 	struct btrfs_delayed_tree_ref *ref;
635 	struct btrfs_delayed_ref_head *head_ref;
636 	struct btrfs_delayed_ref_root *delayed_refs;
637 	int ret;
638 
639 	BUG_ON(extent_op && extent_op->is_data);
640 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
641 	if (!ref)
642 		return -ENOMEM;
643 
644 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
645 	if (!head_ref) {
646 		kfree(ref);
647 		return -ENOMEM;
648 	}
649 
650 	head_ref->extent_op = extent_op;
651 
652 	delayed_refs = &trans->transaction->delayed_refs;
653 	spin_lock(&delayed_refs->lock);
654 
655 	/*
656 	 * insert both the head node and the new ref without dropping
657 	 * the spin lock
658 	 */
659 	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
660 				   num_bytes, action, 0);
661 	BUG_ON(ret);
662 
663 	ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
664 				   num_bytes, parent, ref_root, level, action,
665 				   for_cow);
666 	BUG_ON(ret);
667 	if (!need_ref_seq(for_cow, ref_root) &&
668 	    waitqueue_active(&delayed_refs->seq_wait))
669 		wake_up(&delayed_refs->seq_wait);
670 	spin_unlock(&delayed_refs->lock);
671 	return 0;
672 }
673 
674 /*
675  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
676  */
677 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
678 			       struct btrfs_trans_handle *trans,
679 			       u64 bytenr, u64 num_bytes,
680 			       u64 parent, u64 ref_root,
681 			       u64 owner, u64 offset, int action,
682 			       struct btrfs_delayed_extent_op *extent_op,
683 			       int for_cow)
684 {
685 	struct btrfs_delayed_data_ref *ref;
686 	struct btrfs_delayed_ref_head *head_ref;
687 	struct btrfs_delayed_ref_root *delayed_refs;
688 	int ret;
689 
690 	BUG_ON(extent_op && !extent_op->is_data);
691 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
692 	if (!ref)
693 		return -ENOMEM;
694 
695 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
696 	if (!head_ref) {
697 		kfree(ref);
698 		return -ENOMEM;
699 	}
700 
701 	head_ref->extent_op = extent_op;
702 
703 	delayed_refs = &trans->transaction->delayed_refs;
704 	spin_lock(&delayed_refs->lock);
705 
706 	/*
707 	 * insert both the head node and the new ref without dropping
708 	 * the spin lock
709 	 */
710 	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
711 				   num_bytes, action, 1);
712 	BUG_ON(ret);
713 
714 	ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
715 				   num_bytes, parent, ref_root, owner, offset,
716 				   action, for_cow);
717 	BUG_ON(ret);
718 	if (!need_ref_seq(for_cow, ref_root) &&
719 	    waitqueue_active(&delayed_refs->seq_wait))
720 		wake_up(&delayed_refs->seq_wait);
721 	spin_unlock(&delayed_refs->lock);
722 	return 0;
723 }
724 
725 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
726 				struct btrfs_trans_handle *trans,
727 				u64 bytenr, u64 num_bytes,
728 				struct btrfs_delayed_extent_op *extent_op)
729 {
730 	struct btrfs_delayed_ref_head *head_ref;
731 	struct btrfs_delayed_ref_root *delayed_refs;
732 	int ret;
733 
734 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
735 	if (!head_ref)
736 		return -ENOMEM;
737 
738 	head_ref->extent_op = extent_op;
739 
740 	delayed_refs = &trans->transaction->delayed_refs;
741 	spin_lock(&delayed_refs->lock);
742 
743 	ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
744 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 				   extent_op->is_data);
746 	BUG_ON(ret);
747 
748 	if (waitqueue_active(&delayed_refs->seq_wait))
749 		wake_up(&delayed_refs->seq_wait);
750 	spin_unlock(&delayed_refs->lock);
751 	return 0;
752 }
753 
754 /*
755  * this does a simple search for the head node for a given extent.
756  * It must be called with the delayed ref spinlock held, and it returns
757  * the head node if any where found, or NULL if not.
758  */
759 struct btrfs_delayed_ref_head *
760 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
761 {
762 	struct btrfs_delayed_ref_node *ref;
763 	struct btrfs_delayed_ref_root *delayed_refs;
764 
765 	delayed_refs = &trans->transaction->delayed_refs;
766 	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
767 	if (ref)
768 		return btrfs_delayed_node_to_head(ref);
769 	return NULL;
770 }
771