xref: /linux/fs/btrfs/delayed-ref.c (revision c4c14c3bd177ea769fee938674f73a8ec0cdd47a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13 
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
18 /*
19  * delayed back reference update tracking.  For subvolume trees
20  * we queue up extent allocations and backref maintenance for
21  * delayed processing.   This avoids deep call chains where we
22  * add extents in the middle of btrfs_search_slot, and it allows
23  * us to buffer up frequently modified backrefs in an rb tree instead
24  * of hammering updates on the extent allocation tree.
25  */
26 
27 /*
28  * compare two delayed tree backrefs with same bytenr and type
29  */
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 			  struct btrfs_delayed_tree_ref *ref2)
32 {
33 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 		if (ref1->root < ref2->root)
35 			return -1;
36 		if (ref1->root > ref2->root)
37 			return 1;
38 	} else {
39 		if (ref1->parent < ref2->parent)
40 			return -1;
41 		if (ref1->parent > ref2->parent)
42 			return 1;
43 	}
44 	return 0;
45 }
46 
47 /*
48  * compare two delayed data backrefs with same bytenr and type
49  */
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 			  struct btrfs_delayed_data_ref *ref2)
52 {
53 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 		if (ref1->root < ref2->root)
55 			return -1;
56 		if (ref1->root > ref2->root)
57 			return 1;
58 		if (ref1->objectid < ref2->objectid)
59 			return -1;
60 		if (ref1->objectid > ref2->objectid)
61 			return 1;
62 		if (ref1->offset < ref2->offset)
63 			return -1;
64 		if (ref1->offset > ref2->offset)
65 			return 1;
66 	} else {
67 		if (ref1->parent < ref2->parent)
68 			return -1;
69 		if (ref1->parent > ref2->parent)
70 			return 1;
71 	}
72 	return 0;
73 }
74 
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 		     struct btrfs_delayed_ref_node *ref2,
77 		     bool check_seq)
78 {
79 	int ret = 0;
80 
81 	if (ref1->type < ref2->type)
82 		return -1;
83 	if (ref1->type > ref2->type)
84 		return 1;
85 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 				     btrfs_delayed_node_to_tree_ref(ref2));
89 	else
90 		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 				     btrfs_delayed_node_to_data_ref(ref2));
92 	if (ret)
93 		return ret;
94 	if (check_seq) {
95 		if (ref1->seq < ref2->seq)
96 			return -1;
97 		if (ref1->seq > ref2->seq)
98 			return 1;
99 	}
100 	return 0;
101 }
102 
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
105 						   struct rb_node *node)
106 {
107 	struct rb_node **p = &root->rb_root.rb_node;
108 	struct rb_node *parent_node = NULL;
109 	struct btrfs_delayed_ref_head *entry;
110 	struct btrfs_delayed_ref_head *ins;
111 	u64 bytenr;
112 	bool leftmost = true;
113 
114 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
115 	bytenr = ins->bytenr;
116 	while (*p) {
117 		parent_node = *p;
118 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
119 				 href_node);
120 
121 		if (bytenr < entry->bytenr) {
122 			p = &(*p)->rb_left;
123 		} else if (bytenr > entry->bytenr) {
124 			p = &(*p)->rb_right;
125 			leftmost = false;
126 		} else {
127 			return entry;
128 		}
129 	}
130 
131 	rb_link_node(node, parent_node, p);
132 	rb_insert_color_cached(node, root, leftmost);
133 	return NULL;
134 }
135 
136 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
137 		struct btrfs_delayed_ref_node *ins)
138 {
139 	struct rb_node **p = &root->rb_root.rb_node;
140 	struct rb_node *node = &ins->ref_node;
141 	struct rb_node *parent_node = NULL;
142 	struct btrfs_delayed_ref_node *entry;
143 	bool leftmost = true;
144 
145 	while (*p) {
146 		int comp;
147 
148 		parent_node = *p;
149 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
150 				 ref_node);
151 		comp = comp_refs(ins, entry, true);
152 		if (comp < 0) {
153 			p = &(*p)->rb_left;
154 		} else if (comp > 0) {
155 			p = &(*p)->rb_right;
156 			leftmost = false;
157 		} else {
158 			return entry;
159 		}
160 	}
161 
162 	rb_link_node(node, parent_node, p);
163 	rb_insert_color_cached(node, root, leftmost);
164 	return NULL;
165 }
166 
167 /*
168  * find an head entry based on bytenr. This returns the delayed ref
169  * head if it was able to find one, or NULL if nothing was in that spot.
170  * If return_bigger is given, the next bigger entry is returned if no exact
171  * match is found. But if no bigger one is found then the first node of the
172  * ref head tree will be returned.
173  */
174 static struct btrfs_delayed_ref_head* find_ref_head(
175 		struct btrfs_delayed_ref_root *dr, u64 bytenr,
176 		bool return_bigger)
177 {
178 	struct rb_root *root = &dr->href_root.rb_root;
179 	struct rb_node *n;
180 	struct btrfs_delayed_ref_head *entry;
181 
182 	n = root->rb_node;
183 	entry = NULL;
184 	while (n) {
185 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
186 
187 		if (bytenr < entry->bytenr)
188 			n = n->rb_left;
189 		else if (bytenr > entry->bytenr)
190 			n = n->rb_right;
191 		else
192 			return entry;
193 	}
194 	if (entry && return_bigger) {
195 		if (bytenr > entry->bytenr) {
196 			n = rb_next(&entry->href_node);
197 			if (!n)
198 				n = rb_first_cached(&dr->href_root);
199 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
200 					 href_node);
201 			return entry;
202 		}
203 		return entry;
204 	}
205 	return NULL;
206 }
207 
208 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
209 			   struct btrfs_delayed_ref_head *head)
210 {
211 	lockdep_assert_held(&delayed_refs->lock);
212 	if (mutex_trylock(&head->mutex))
213 		return 0;
214 
215 	refcount_inc(&head->refs);
216 	spin_unlock(&delayed_refs->lock);
217 
218 	mutex_lock(&head->mutex);
219 	spin_lock(&delayed_refs->lock);
220 	if (RB_EMPTY_NODE(&head->href_node)) {
221 		mutex_unlock(&head->mutex);
222 		btrfs_put_delayed_ref_head(head);
223 		return -EAGAIN;
224 	}
225 	btrfs_put_delayed_ref_head(head);
226 	return 0;
227 }
228 
229 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
230 				    struct btrfs_delayed_ref_root *delayed_refs,
231 				    struct btrfs_delayed_ref_head *head,
232 				    struct btrfs_delayed_ref_node *ref)
233 {
234 	lockdep_assert_held(&head->lock);
235 	rb_erase_cached(&ref->ref_node, &head->ref_tree);
236 	RB_CLEAR_NODE(&ref->ref_node);
237 	if (!list_empty(&ref->add_list))
238 		list_del(&ref->add_list);
239 	ref->in_tree = 0;
240 	btrfs_put_delayed_ref(ref);
241 	atomic_dec(&delayed_refs->num_entries);
242 	if (trans->delayed_ref_updates)
243 		trans->delayed_ref_updates--;
244 }
245 
246 static bool merge_ref(struct btrfs_trans_handle *trans,
247 		      struct btrfs_delayed_ref_root *delayed_refs,
248 		      struct btrfs_delayed_ref_head *head,
249 		      struct btrfs_delayed_ref_node *ref,
250 		      u64 seq)
251 {
252 	struct btrfs_delayed_ref_node *next;
253 	struct rb_node *node = rb_next(&ref->ref_node);
254 	bool done = false;
255 
256 	while (!done && node) {
257 		int mod;
258 
259 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
260 		node = rb_next(node);
261 		if (seq && next->seq >= seq)
262 			break;
263 		if (comp_refs(ref, next, false))
264 			break;
265 
266 		if (ref->action == next->action) {
267 			mod = next->ref_mod;
268 		} else {
269 			if (ref->ref_mod < next->ref_mod) {
270 				swap(ref, next);
271 				done = true;
272 			}
273 			mod = -next->ref_mod;
274 		}
275 
276 		drop_delayed_ref(trans, delayed_refs, head, next);
277 		ref->ref_mod += mod;
278 		if (ref->ref_mod == 0) {
279 			drop_delayed_ref(trans, delayed_refs, head, ref);
280 			done = true;
281 		} else {
282 			/*
283 			 * Can't have multiples of the same ref on a tree block.
284 			 */
285 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
286 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
287 		}
288 	}
289 
290 	return done;
291 }
292 
293 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
294 			      struct btrfs_delayed_ref_root *delayed_refs,
295 			      struct btrfs_delayed_ref_head *head)
296 {
297 	struct btrfs_fs_info *fs_info = trans->fs_info;
298 	struct btrfs_delayed_ref_node *ref;
299 	struct rb_node *node;
300 	u64 seq = 0;
301 
302 	lockdep_assert_held(&head->lock);
303 
304 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
305 		return;
306 
307 	/* We don't have too many refs to merge for data. */
308 	if (head->is_data)
309 		return;
310 
311 	spin_lock(&fs_info->tree_mod_seq_lock);
312 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
313 		struct seq_list *elem;
314 
315 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
316 					struct seq_list, list);
317 		seq = elem->seq;
318 	}
319 	spin_unlock(&fs_info->tree_mod_seq_lock);
320 
321 again:
322 	for (node = rb_first_cached(&head->ref_tree); node;
323 	     node = rb_next(node)) {
324 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
325 		if (seq && ref->seq >= seq)
326 			continue;
327 		if (merge_ref(trans, delayed_refs, head, ref, seq))
328 			goto again;
329 	}
330 }
331 
332 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
333 {
334 	struct seq_list *elem;
335 	int ret = 0;
336 
337 	spin_lock(&fs_info->tree_mod_seq_lock);
338 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
339 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
340 					struct seq_list, list);
341 		if (seq >= elem->seq) {
342 			btrfs_debug(fs_info,
343 				"holding back delayed_ref %#x.%x, lowest is %#x.%x",
344 				(u32)(seq >> 32), (u32)seq,
345 				(u32)(elem->seq >> 32), (u32)elem->seq);
346 			ret = 1;
347 		}
348 	}
349 
350 	spin_unlock(&fs_info->tree_mod_seq_lock);
351 	return ret;
352 }
353 
354 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
355 		struct btrfs_delayed_ref_root *delayed_refs)
356 {
357 	struct btrfs_delayed_ref_head *head;
358 	u64 start;
359 	bool loop = false;
360 
361 again:
362 	start = delayed_refs->run_delayed_start;
363 	head = find_ref_head(delayed_refs, start, true);
364 	if (!head && !loop) {
365 		delayed_refs->run_delayed_start = 0;
366 		start = 0;
367 		loop = true;
368 		head = find_ref_head(delayed_refs, start, true);
369 		if (!head)
370 			return NULL;
371 	} else if (!head && loop) {
372 		return NULL;
373 	}
374 
375 	while (head->processing) {
376 		struct rb_node *node;
377 
378 		node = rb_next(&head->href_node);
379 		if (!node) {
380 			if (loop)
381 				return NULL;
382 			delayed_refs->run_delayed_start = 0;
383 			start = 0;
384 			loop = true;
385 			goto again;
386 		}
387 		head = rb_entry(node, struct btrfs_delayed_ref_head,
388 				href_node);
389 	}
390 
391 	head->processing = 1;
392 	WARN_ON(delayed_refs->num_heads_ready == 0);
393 	delayed_refs->num_heads_ready--;
394 	delayed_refs->run_delayed_start = head->bytenr +
395 		head->num_bytes;
396 	return head;
397 }
398 
399 /*
400  * Helper to insert the ref_node to the tail or merge with tail.
401  *
402  * Return 0 for insert.
403  * Return >0 for merge.
404  */
405 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
406 			      struct btrfs_delayed_ref_root *root,
407 			      struct btrfs_delayed_ref_head *href,
408 			      struct btrfs_delayed_ref_node *ref)
409 {
410 	struct btrfs_delayed_ref_node *exist;
411 	int mod;
412 	int ret = 0;
413 
414 	spin_lock(&href->lock);
415 	exist = tree_insert(&href->ref_tree, ref);
416 	if (!exist)
417 		goto inserted;
418 
419 	/* Now we are sure we can merge */
420 	ret = 1;
421 	if (exist->action == ref->action) {
422 		mod = ref->ref_mod;
423 	} else {
424 		/* Need to change action */
425 		if (exist->ref_mod < ref->ref_mod) {
426 			exist->action = ref->action;
427 			mod = -exist->ref_mod;
428 			exist->ref_mod = ref->ref_mod;
429 			if (ref->action == BTRFS_ADD_DELAYED_REF)
430 				list_add_tail(&exist->add_list,
431 					      &href->ref_add_list);
432 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
433 				ASSERT(!list_empty(&exist->add_list));
434 				list_del(&exist->add_list);
435 			} else {
436 				ASSERT(0);
437 			}
438 		} else
439 			mod = -ref->ref_mod;
440 	}
441 	exist->ref_mod += mod;
442 
443 	/* remove existing tail if its ref_mod is zero */
444 	if (exist->ref_mod == 0)
445 		drop_delayed_ref(trans, root, href, exist);
446 	spin_unlock(&href->lock);
447 	return ret;
448 inserted:
449 	if (ref->action == BTRFS_ADD_DELAYED_REF)
450 		list_add_tail(&ref->add_list, &href->ref_add_list);
451 	atomic_inc(&root->num_entries);
452 	trans->delayed_ref_updates++;
453 	spin_unlock(&href->lock);
454 	return ret;
455 }
456 
457 /*
458  * helper function to update the accounting in the head ref
459  * existing and update must have the same bytenr
460  */
461 static noinline void
462 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
463 			 struct btrfs_delayed_ref_head *existing,
464 			 struct btrfs_delayed_ref_head *update,
465 			 int *old_ref_mod_ret)
466 {
467 	int old_ref_mod;
468 
469 	BUG_ON(existing->is_data != update->is_data);
470 
471 	spin_lock(&existing->lock);
472 	if (update->must_insert_reserved) {
473 		/* if the extent was freed and then
474 		 * reallocated before the delayed ref
475 		 * entries were processed, we can end up
476 		 * with an existing head ref without
477 		 * the must_insert_reserved flag set.
478 		 * Set it again here
479 		 */
480 		existing->must_insert_reserved = update->must_insert_reserved;
481 
482 		/*
483 		 * update the num_bytes so we make sure the accounting
484 		 * is done correctly
485 		 */
486 		existing->num_bytes = update->num_bytes;
487 
488 	}
489 
490 	if (update->extent_op) {
491 		if (!existing->extent_op) {
492 			existing->extent_op = update->extent_op;
493 		} else {
494 			if (update->extent_op->update_key) {
495 				memcpy(&existing->extent_op->key,
496 				       &update->extent_op->key,
497 				       sizeof(update->extent_op->key));
498 				existing->extent_op->update_key = true;
499 			}
500 			if (update->extent_op->update_flags) {
501 				existing->extent_op->flags_to_set |=
502 					update->extent_op->flags_to_set;
503 				existing->extent_op->update_flags = true;
504 			}
505 			btrfs_free_delayed_extent_op(update->extent_op);
506 		}
507 	}
508 	/*
509 	 * update the reference mod on the head to reflect this new operation,
510 	 * only need the lock for this case cause we could be processing it
511 	 * currently, for refs we just added we know we're a-ok.
512 	 */
513 	old_ref_mod = existing->total_ref_mod;
514 	if (old_ref_mod_ret)
515 		*old_ref_mod_ret = old_ref_mod;
516 	existing->ref_mod += update->ref_mod;
517 	existing->total_ref_mod += update->ref_mod;
518 
519 	/*
520 	 * If we are going to from a positive ref mod to a negative or vice
521 	 * versa we need to make sure to adjust pending_csums accordingly.
522 	 */
523 	if (existing->is_data) {
524 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
525 			delayed_refs->pending_csums -= existing->num_bytes;
526 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
527 			delayed_refs->pending_csums += existing->num_bytes;
528 	}
529 	spin_unlock(&existing->lock);
530 }
531 
532 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
533 				  struct btrfs_qgroup_extent_record *qrecord,
534 				  u64 bytenr, u64 num_bytes, u64 ref_root,
535 				  u64 reserved, int action, bool is_data,
536 				  bool is_system)
537 {
538 	int count_mod = 1;
539 	int must_insert_reserved = 0;
540 
541 	/* If reserved is provided, it must be a data extent. */
542 	BUG_ON(!is_data && reserved);
543 
544 	/*
545 	 * The head node stores the sum of all the mods, so dropping a ref
546 	 * should drop the sum in the head node by one.
547 	 */
548 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
549 		count_mod = 0;
550 	else if (action == BTRFS_DROP_DELAYED_REF)
551 		count_mod = -1;
552 
553 	/*
554 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
555 	 * accounting when the extent is finally added, or if a later
556 	 * modification deletes the delayed ref without ever inserting the
557 	 * extent into the extent allocation tree.  ref->must_insert_reserved
558 	 * is the flag used to record that accounting mods are required.
559 	 *
560 	 * Once we record must_insert_reserved, switch the action to
561 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
562 	 */
563 	if (action == BTRFS_ADD_DELAYED_EXTENT)
564 		must_insert_reserved = 1;
565 	else
566 		must_insert_reserved = 0;
567 
568 	refcount_set(&head_ref->refs, 1);
569 	head_ref->bytenr = bytenr;
570 	head_ref->num_bytes = num_bytes;
571 	head_ref->ref_mod = count_mod;
572 	head_ref->must_insert_reserved = must_insert_reserved;
573 	head_ref->is_data = is_data;
574 	head_ref->is_system = is_system;
575 	head_ref->ref_tree = RB_ROOT_CACHED;
576 	INIT_LIST_HEAD(&head_ref->ref_add_list);
577 	RB_CLEAR_NODE(&head_ref->href_node);
578 	head_ref->processing = 0;
579 	head_ref->total_ref_mod = count_mod;
580 	head_ref->qgroup_reserved = 0;
581 	head_ref->qgroup_ref_root = 0;
582 	spin_lock_init(&head_ref->lock);
583 	mutex_init(&head_ref->mutex);
584 
585 	if (qrecord) {
586 		if (ref_root && reserved) {
587 			head_ref->qgroup_ref_root = ref_root;
588 			head_ref->qgroup_reserved = reserved;
589 		}
590 
591 		qrecord->bytenr = bytenr;
592 		qrecord->num_bytes = num_bytes;
593 		qrecord->old_roots = NULL;
594 	}
595 }
596 
597 /*
598  * helper function to actually insert a head node into the rbtree.
599  * this does all the dirty work in terms of maintaining the correct
600  * overall modification count.
601  */
602 static noinline struct btrfs_delayed_ref_head *
603 add_delayed_ref_head(struct btrfs_trans_handle *trans,
604 		     struct btrfs_delayed_ref_head *head_ref,
605 		     struct btrfs_qgroup_extent_record *qrecord,
606 		     int action, int *qrecord_inserted_ret,
607 		     int *old_ref_mod, int *new_ref_mod)
608 {
609 	struct btrfs_delayed_ref_head *existing;
610 	struct btrfs_delayed_ref_root *delayed_refs;
611 	int qrecord_inserted = 0;
612 
613 	delayed_refs = &trans->transaction->delayed_refs;
614 
615 	/* Record qgroup extent info if provided */
616 	if (qrecord) {
617 		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
618 					delayed_refs, qrecord))
619 			kfree(qrecord);
620 		else
621 			qrecord_inserted = 1;
622 	}
623 
624 	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
625 
626 	existing = htree_insert(&delayed_refs->href_root,
627 				&head_ref->href_node);
628 	if (existing) {
629 		WARN_ON(qrecord && head_ref->qgroup_ref_root
630 			&& head_ref->qgroup_reserved
631 			&& existing->qgroup_ref_root
632 			&& existing->qgroup_reserved);
633 		update_existing_head_ref(delayed_refs, existing, head_ref,
634 					 old_ref_mod);
635 		/*
636 		 * we've updated the existing ref, free the newly
637 		 * allocated ref
638 		 */
639 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
640 		head_ref = existing;
641 	} else {
642 		if (old_ref_mod)
643 			*old_ref_mod = 0;
644 		if (head_ref->is_data && head_ref->ref_mod < 0)
645 			delayed_refs->pending_csums += head_ref->num_bytes;
646 		delayed_refs->num_heads++;
647 		delayed_refs->num_heads_ready++;
648 		atomic_inc(&delayed_refs->num_entries);
649 		trans->delayed_ref_updates++;
650 	}
651 	if (qrecord_inserted_ret)
652 		*qrecord_inserted_ret = qrecord_inserted;
653 	if (new_ref_mod)
654 		*new_ref_mod = head_ref->total_ref_mod;
655 
656 	return head_ref;
657 }
658 
659 /*
660  * init_delayed_ref_common - Initialize the structure which represents a
661  *			     modification to a an extent.
662  *
663  * @fs_info:    Internal to the mounted filesystem mount structure.
664  *
665  * @ref:	The structure which is going to be initialized.
666  *
667  * @bytenr:	The logical address of the extent for which a modification is
668  *		going to be recorded.
669  *
670  * @num_bytes:  Size of the extent whose modification is being recorded.
671  *
672  * @ref_root:	The id of the root where this modification has originated, this
673  *		can be either one of the well-known metadata trees or the
674  *		subvolume id which references this extent.
675  *
676  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
677  *		BTRFS_ADD_DELAYED_EXTENT
678  *
679  * @ref_type:	Holds the type of the extent which is being recorded, can be
680  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
681  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
682  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
683  */
684 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
685 				    struct btrfs_delayed_ref_node *ref,
686 				    u64 bytenr, u64 num_bytes, u64 ref_root,
687 				    int action, u8 ref_type)
688 {
689 	u64 seq = 0;
690 
691 	if (action == BTRFS_ADD_DELAYED_EXTENT)
692 		action = BTRFS_ADD_DELAYED_REF;
693 
694 	if (is_fstree(ref_root))
695 		seq = atomic64_read(&fs_info->tree_mod_seq);
696 
697 	refcount_set(&ref->refs, 1);
698 	ref->bytenr = bytenr;
699 	ref->num_bytes = num_bytes;
700 	ref->ref_mod = 1;
701 	ref->action = action;
702 	ref->is_head = 0;
703 	ref->in_tree = 1;
704 	ref->seq = seq;
705 	ref->type = ref_type;
706 	RB_CLEAR_NODE(&ref->ref_node);
707 	INIT_LIST_HEAD(&ref->add_list);
708 }
709 
710 /*
711  * add a delayed tree ref.  This does all of the accounting required
712  * to make sure the delayed ref is eventually processed before this
713  * transaction commits.
714  */
715 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
716 			       u64 bytenr, u64 num_bytes, u64 parent,
717 			       u64 ref_root,  int level, int action,
718 			       struct btrfs_delayed_extent_op *extent_op,
719 			       int *old_ref_mod, int *new_ref_mod)
720 {
721 	struct btrfs_fs_info *fs_info = trans->fs_info;
722 	struct btrfs_delayed_tree_ref *ref;
723 	struct btrfs_delayed_ref_head *head_ref;
724 	struct btrfs_delayed_ref_root *delayed_refs;
725 	struct btrfs_qgroup_extent_record *record = NULL;
726 	int qrecord_inserted;
727 	bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
728 	int ret;
729 	u8 ref_type;
730 
731 	BUG_ON(extent_op && extent_op->is_data);
732 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
733 	if (!ref)
734 		return -ENOMEM;
735 
736 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
737 	if (!head_ref) {
738 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
739 		return -ENOMEM;
740 	}
741 
742 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
743 	    is_fstree(ref_root)) {
744 		record = kmalloc(sizeof(*record), GFP_NOFS);
745 		if (!record) {
746 			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
747 			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
748 			return -ENOMEM;
749 		}
750 	}
751 
752 	if (parent)
753 		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
754 	else
755 		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
756 
757 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
758 				ref_root, action, ref_type);
759 	ref->root = ref_root;
760 	ref->parent = parent;
761 	ref->level = level;
762 
763 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
764 			      ref_root, 0, action, false, is_system);
765 	head_ref->extent_op = extent_op;
766 
767 	delayed_refs = &trans->transaction->delayed_refs;
768 	spin_lock(&delayed_refs->lock);
769 
770 	/*
771 	 * insert both the head node and the new ref without dropping
772 	 * the spin lock
773 	 */
774 	head_ref = add_delayed_ref_head(trans, head_ref, record,
775 					action, &qrecord_inserted,
776 					old_ref_mod, new_ref_mod);
777 
778 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
779 	spin_unlock(&delayed_refs->lock);
780 
781 	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
782 				   action == BTRFS_ADD_DELAYED_EXTENT ?
783 				   BTRFS_ADD_DELAYED_REF : action);
784 	if (ret > 0)
785 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
786 
787 	if (qrecord_inserted)
788 		btrfs_qgroup_trace_extent_post(fs_info, record);
789 
790 	return 0;
791 }
792 
793 /*
794  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
795  */
796 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
797 			       u64 bytenr, u64 num_bytes,
798 			       u64 parent, u64 ref_root,
799 			       u64 owner, u64 offset, u64 reserved, int action,
800 			       int *old_ref_mod, int *new_ref_mod)
801 {
802 	struct btrfs_fs_info *fs_info = trans->fs_info;
803 	struct btrfs_delayed_data_ref *ref;
804 	struct btrfs_delayed_ref_head *head_ref;
805 	struct btrfs_delayed_ref_root *delayed_refs;
806 	struct btrfs_qgroup_extent_record *record = NULL;
807 	int qrecord_inserted;
808 	int ret;
809 	u8 ref_type;
810 
811 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
812 	if (!ref)
813 		return -ENOMEM;
814 
815 	if (parent)
816 	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
817 	else
818 	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
819 	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
820 				ref_root, action, ref_type);
821 	ref->root = ref_root;
822 	ref->parent = parent;
823 	ref->objectid = owner;
824 	ref->offset = offset;
825 
826 
827 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
828 	if (!head_ref) {
829 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830 		return -ENOMEM;
831 	}
832 
833 	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
834 	    is_fstree(ref_root)) {
835 		record = kmalloc(sizeof(*record), GFP_NOFS);
836 		if (!record) {
837 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
838 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
839 					head_ref);
840 			return -ENOMEM;
841 		}
842 	}
843 
844 	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
845 			      reserved, action, true, false);
846 	head_ref->extent_op = NULL;
847 
848 	delayed_refs = &trans->transaction->delayed_refs;
849 	spin_lock(&delayed_refs->lock);
850 
851 	/*
852 	 * insert both the head node and the new ref without dropping
853 	 * the spin lock
854 	 */
855 	head_ref = add_delayed_ref_head(trans, head_ref, record,
856 					action, &qrecord_inserted,
857 					old_ref_mod, new_ref_mod);
858 
859 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
860 	spin_unlock(&delayed_refs->lock);
861 
862 	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
863 				   action == BTRFS_ADD_DELAYED_EXTENT ?
864 				   BTRFS_ADD_DELAYED_REF : action);
865 	if (ret > 0)
866 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
867 
868 
869 	if (qrecord_inserted)
870 		return btrfs_qgroup_trace_extent_post(fs_info, record);
871 	return 0;
872 }
873 
874 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
875 				struct btrfs_trans_handle *trans,
876 				u64 bytenr, u64 num_bytes,
877 				struct btrfs_delayed_extent_op *extent_op)
878 {
879 	struct btrfs_delayed_ref_head *head_ref;
880 	struct btrfs_delayed_ref_root *delayed_refs;
881 
882 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
883 	if (!head_ref)
884 		return -ENOMEM;
885 
886 	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
887 			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
888 			      false);
889 	head_ref->extent_op = extent_op;
890 
891 	delayed_refs = &trans->transaction->delayed_refs;
892 	spin_lock(&delayed_refs->lock);
893 
894 	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
895 			     NULL, NULL, NULL);
896 
897 	spin_unlock(&delayed_refs->lock);
898 	return 0;
899 }
900 
901 /*
902  * this does a simple search for the head node for a given extent.
903  * It must be called with the delayed ref spinlock held, and it returns
904  * the head node if any where found, or NULL if not.
905  */
906 struct btrfs_delayed_ref_head *
907 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
908 {
909 	return find_ref_head(delayed_refs, bytenr, false);
910 }
911 
912 void __cold btrfs_delayed_ref_exit(void)
913 {
914 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
915 	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
916 	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
917 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
918 }
919 
920 int __init btrfs_delayed_ref_init(void)
921 {
922 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
923 				"btrfs_delayed_ref_head",
924 				sizeof(struct btrfs_delayed_ref_head), 0,
925 				SLAB_MEM_SPREAD, NULL);
926 	if (!btrfs_delayed_ref_head_cachep)
927 		goto fail;
928 
929 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
930 				"btrfs_delayed_tree_ref",
931 				sizeof(struct btrfs_delayed_tree_ref), 0,
932 				SLAB_MEM_SPREAD, NULL);
933 	if (!btrfs_delayed_tree_ref_cachep)
934 		goto fail;
935 
936 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
937 				"btrfs_delayed_data_ref",
938 				sizeof(struct btrfs_delayed_data_ref), 0,
939 				SLAB_MEM_SPREAD, NULL);
940 	if (!btrfs_delayed_data_ref_cachep)
941 		goto fail;
942 
943 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
944 				"btrfs_delayed_extent_op",
945 				sizeof(struct btrfs_delayed_extent_op), 0,
946 				SLAB_MEM_SPREAD, NULL);
947 	if (!btrfs_delayed_extent_op_cachep)
948 		goto fail;
949 
950 	return 0;
951 fail:
952 	btrfs_delayed_ref_exit();
953 	return -ENOMEM;
954 }
955