xref: /linux/fs/btrfs/delayed-ref.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "extent-tree.h"
13 #include "transaction.h"
14 #include "qgroup.h"
15 #include "space-info.h"
16 #include "tree-mod-log.h"
17 #include "fs.h"
18 
19 struct kmem_cache *btrfs_delayed_ref_head_cachep;
20 struct kmem_cache *btrfs_delayed_ref_node_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23  * delayed back reference update tracking.  For subvolume trees
24  * we queue up extent allocations and backref maintenance for
25  * delayed processing.   This avoids deep call chains where we
26  * add extents in the middle of btrfs_search_slot, and it allows
27  * us to buffer up frequently modified backrefs in an rb tree instead
28  * of hammering updates on the extent allocation tree.
29  */
30 
btrfs_check_space_for_delayed_refs(struct btrfs_fs_info * fs_info)31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33 	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 	bool ret = false;
36 	u64 reserved;
37 
38 	spin_lock(&global_rsv->lock);
39 	reserved = global_rsv->reserved;
40 	spin_unlock(&global_rsv->lock);
41 
42 	/*
43 	 * Since the global reserve is just kind of magic we don't really want
44 	 * to rely on it to save our bacon, so if our size is more than the
45 	 * delayed_refs_rsv and the global rsv then it's time to think about
46 	 * bailing.
47 	 */
48 	spin_lock(&delayed_refs_rsv->lock);
49 	reserved += delayed_refs_rsv->reserved;
50 	if (delayed_refs_rsv->size >= reserved)
51 		ret = true;
52 	spin_unlock(&delayed_refs_rsv->lock);
53 	return ret;
54 }
55 
56 /*
57  * Release a ref head's reservation.
58  *
59  * @fs_info:  the filesystem
60  * @nr_refs:  number of delayed refs to drop
61  * @nr_csums: number of csum items to drop
62  *
63  * Drops the delayed ref head's count from the delayed refs rsv and free any
64  * excess reservation we had.
65  */
btrfs_delayed_refs_rsv_release(struct btrfs_fs_info * fs_info,int nr_refs,int nr_csums)66 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67 {
68 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 	u64 num_bytes;
70 	u64 released;
71 
72 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74 
75 	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 	if (released)
77 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 					      0, released, 0);
79 }
80 
81 /*
82  * Adjust the size of the delayed refs rsv.
83  *
84  * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85  * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86  * add it to the delayed_refs_rsv.
87  */
btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle * trans)88 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89 {
90 	struct btrfs_fs_info *fs_info = trans->fs_info;
91 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 	struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 	u64 num_bytes;
94 	u64 reserved_bytes;
95 
96 	num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
97 	num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
98 						       trans->delayed_ref_csum_deletions);
99 
100 	if (num_bytes == 0)
101 		return;
102 
103 	/*
104 	 * Try to take num_bytes from the transaction's local delayed reserve.
105 	 * If not possible, try to take as much as it's available. If the local
106 	 * reserve doesn't have enough reserved space, the delayed refs reserve
107 	 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
108 	 * by someone or if a transaction commit is triggered before that, the
109 	 * global block reserve will be used. We want to minimize using the
110 	 * global block reserve for cases we can account for in advance, to
111 	 * avoid exhausting it and reach -ENOSPC during a transaction commit.
112 	 */
113 	spin_lock(&local_rsv->lock);
114 	reserved_bytes = min(num_bytes, local_rsv->reserved);
115 	local_rsv->reserved -= reserved_bytes;
116 	local_rsv->full = (local_rsv->reserved >= local_rsv->size);
117 	spin_unlock(&local_rsv->lock);
118 
119 	spin_lock(&delayed_rsv->lock);
120 	delayed_rsv->size += num_bytes;
121 	delayed_rsv->reserved += reserved_bytes;
122 	delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
123 	spin_unlock(&delayed_rsv->lock);
124 	trans->delayed_ref_updates = 0;
125 	trans->delayed_ref_csum_deletions = 0;
126 }
127 
128 /*
129  * Adjust the size of the delayed refs block reserve for 1 block group item
130  * insertion, used after allocating a block group.
131  */
btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)132 void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
133 {
134 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
135 
136 	spin_lock(&delayed_rsv->lock);
137 	/*
138 	 * Inserting a block group item does not require changing the free space
139 	 * tree, only the extent tree or the block group tree, so this is all we
140 	 * need.
141 	 */
142 	delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
143 	delayed_rsv->full = false;
144 	spin_unlock(&delayed_rsv->lock);
145 }
146 
147 /*
148  * Adjust the size of the delayed refs block reserve to release space for 1
149  * block group item insertion.
150  */
btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info * fs_info)151 void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
152 {
153 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
154 	const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
155 	u64 released;
156 
157 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
158 	if (released > 0)
159 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
160 					      0, released, 0);
161 }
162 
163 /*
164  * Adjust the size of the delayed refs block reserve for 1 block group item
165  * update.
166  */
btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)167 void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
168 {
169 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
170 
171 	spin_lock(&delayed_rsv->lock);
172 	/*
173 	 * Updating a block group item does not result in new nodes/leaves and
174 	 * does not require changing the free space tree, only the extent tree
175 	 * or the block group tree, so this is all we need.
176 	 */
177 	delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
178 	delayed_rsv->full = false;
179 	spin_unlock(&delayed_rsv->lock);
180 }
181 
182 /*
183  * Adjust the size of the delayed refs block reserve to release space for 1
184  * block group item update.
185  */
btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info * fs_info)186 void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
187 {
188 	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
189 	const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
190 	u64 released;
191 
192 	released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
193 	if (released > 0)
194 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
195 					      0, released, 0);
196 }
197 
198 /*
199  * Refill based on our delayed refs usage.
200  *
201  * @fs_info: the filesystem
202  * @flush:   control how we can flush for this reservation.
203  *
204  * This will refill the delayed block_rsv up to 1 items size worth of space and
205  * will return -ENOSPC if we can't make the reservation.
206  */
btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info * fs_info,enum btrfs_reserve_flush_enum flush)207 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
208 				  enum btrfs_reserve_flush_enum flush)
209 {
210 	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
211 	struct btrfs_space_info *space_info = block_rsv->space_info;
212 	u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
213 	u64 num_bytes = 0;
214 	u64 refilled_bytes;
215 	u64 to_free;
216 	int ret = -ENOSPC;
217 
218 	spin_lock(&block_rsv->lock);
219 	if (block_rsv->reserved < block_rsv->size) {
220 		num_bytes = block_rsv->size - block_rsv->reserved;
221 		num_bytes = min(num_bytes, limit);
222 	}
223 	spin_unlock(&block_rsv->lock);
224 
225 	if (!num_bytes)
226 		return 0;
227 
228 	ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
229 	if (ret)
230 		return ret;
231 
232 	/*
233 	 * We may have raced with someone else, so check again if we the block
234 	 * reserve is still not full and release any excess space.
235 	 */
236 	spin_lock(&block_rsv->lock);
237 	if (block_rsv->reserved < block_rsv->size) {
238 		u64 needed = block_rsv->size - block_rsv->reserved;
239 
240 		if (num_bytes >= needed) {
241 			block_rsv->reserved += needed;
242 			block_rsv->full = true;
243 			to_free = num_bytes - needed;
244 			refilled_bytes = needed;
245 		} else {
246 			block_rsv->reserved += num_bytes;
247 			to_free = 0;
248 			refilled_bytes = num_bytes;
249 		}
250 	} else {
251 		to_free = num_bytes;
252 		refilled_bytes = 0;
253 	}
254 	spin_unlock(&block_rsv->lock);
255 
256 	if (to_free > 0)
257 		btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
258 
259 	if (refilled_bytes > 0)
260 		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
261 					      refilled_bytes, 1);
262 	return 0;
263 }
264 
265 /*
266  * compare two delayed data backrefs with same bytenr and type
267  */
comp_data_refs(struct btrfs_delayed_ref_node * ref1,struct btrfs_delayed_ref_node * ref2)268 static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
269 			  struct btrfs_delayed_ref_node *ref2)
270 {
271 	if (ref1->data_ref.objectid < ref2->data_ref.objectid)
272 		return -1;
273 	if (ref1->data_ref.objectid > ref2->data_ref.objectid)
274 		return 1;
275 	if (ref1->data_ref.offset < ref2->data_ref.offset)
276 		return -1;
277 	if (ref1->data_ref.offset > ref2->data_ref.offset)
278 		return 1;
279 	return 0;
280 }
281 
comp_refs(struct btrfs_delayed_ref_node * ref1,struct btrfs_delayed_ref_node * ref2,bool check_seq)282 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
283 		     struct btrfs_delayed_ref_node *ref2,
284 		     bool check_seq)
285 {
286 	int ret = 0;
287 
288 	if (ref1->type < ref2->type)
289 		return -1;
290 	if (ref1->type > ref2->type)
291 		return 1;
292 	if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
293 	    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
294 		if (ref1->parent < ref2->parent)
295 			return -1;
296 		if (ref1->parent > ref2->parent)
297 			return 1;
298 	} else {
299 		if (ref1->ref_root < ref2->ref_root)
300 			return -1;
301 		if (ref1->ref_root > ref2->ref_root)
302 			return 1;
303 		if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
304 			ret = comp_data_refs(ref1, ref2);
305 	}
306 	if (ret)
307 		return ret;
308 	if (check_seq) {
309 		if (ref1->seq < ref2->seq)
310 			return -1;
311 		if (ref1->seq > ref2->seq)
312 			return 1;
313 	}
314 	return 0;
315 }
316 
tree_insert(struct rb_root_cached * root,struct btrfs_delayed_ref_node * ins)317 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
318 		struct btrfs_delayed_ref_node *ins)
319 {
320 	struct rb_node **p = &root->rb_root.rb_node;
321 	struct rb_node *node = &ins->ref_node;
322 	struct rb_node *parent_node = NULL;
323 	struct btrfs_delayed_ref_node *entry;
324 	bool leftmost = true;
325 
326 	while (*p) {
327 		int comp;
328 
329 		parent_node = *p;
330 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
331 				 ref_node);
332 		comp = comp_refs(ins, entry, true);
333 		if (comp < 0) {
334 			p = &(*p)->rb_left;
335 		} else if (comp > 0) {
336 			p = &(*p)->rb_right;
337 			leftmost = false;
338 		} else {
339 			return entry;
340 		}
341 	}
342 
343 	rb_link_node(node, parent_node, p);
344 	rb_insert_color_cached(node, root, leftmost);
345 	return NULL;
346 }
347 
find_first_ref_head(struct btrfs_delayed_ref_root * dr)348 static struct btrfs_delayed_ref_head *find_first_ref_head(
349 		struct btrfs_delayed_ref_root *dr)
350 {
351 	unsigned long from = 0;
352 
353 	lockdep_assert_held(&dr->lock);
354 
355 	return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
356 }
357 
btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)358 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
359 				   struct btrfs_delayed_ref_head *head)
360 {
361 	lockdep_assert_held(&delayed_refs->lock);
362 	if (mutex_trylock(&head->mutex))
363 		return true;
364 
365 	refcount_inc(&head->refs);
366 	spin_unlock(&delayed_refs->lock);
367 
368 	mutex_lock(&head->mutex);
369 	spin_lock(&delayed_refs->lock);
370 	if (!head->tracked) {
371 		mutex_unlock(&head->mutex);
372 		btrfs_put_delayed_ref_head(head);
373 		return false;
374 	}
375 	btrfs_put_delayed_ref_head(head);
376 	return true;
377 }
378 
drop_delayed_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)379 static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
380 				    struct btrfs_delayed_ref_root *delayed_refs,
381 				    struct btrfs_delayed_ref_head *head,
382 				    struct btrfs_delayed_ref_node *ref)
383 {
384 	lockdep_assert_held(&head->lock);
385 	rb_erase_cached(&ref->ref_node, &head->ref_tree);
386 	RB_CLEAR_NODE(&ref->ref_node);
387 	if (!list_empty(&ref->add_list))
388 		list_del(&ref->add_list);
389 	btrfs_put_delayed_ref(ref);
390 	btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
391 }
392 
merge_ref(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)393 static bool merge_ref(struct btrfs_fs_info *fs_info,
394 		      struct btrfs_delayed_ref_root *delayed_refs,
395 		      struct btrfs_delayed_ref_head *head,
396 		      struct btrfs_delayed_ref_node *ref,
397 		      u64 seq)
398 {
399 	struct btrfs_delayed_ref_node *next;
400 	struct rb_node *node = rb_next(&ref->ref_node);
401 	bool done = false;
402 
403 	while (!done && node) {
404 		int mod;
405 
406 		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
407 		node = rb_next(node);
408 		if (seq && next->seq >= seq)
409 			break;
410 		if (comp_refs(ref, next, false))
411 			break;
412 
413 		if (ref->action == next->action) {
414 			mod = next->ref_mod;
415 		} else {
416 			if (ref->ref_mod < next->ref_mod) {
417 				swap(ref, next);
418 				done = true;
419 			}
420 			mod = -next->ref_mod;
421 		}
422 
423 		drop_delayed_ref(fs_info, delayed_refs, head, next);
424 		ref->ref_mod += mod;
425 		if (ref->ref_mod == 0) {
426 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
427 			done = true;
428 		} else {
429 			/*
430 			 * Can't have multiples of the same ref on a tree block.
431 			 */
432 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
433 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
434 		}
435 	}
436 
437 	return done;
438 }
439 
btrfs_merge_delayed_refs(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)440 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
441 			      struct btrfs_delayed_ref_root *delayed_refs,
442 			      struct btrfs_delayed_ref_head *head)
443 {
444 	struct btrfs_delayed_ref_node *ref;
445 	struct rb_node *node;
446 	u64 seq = 0;
447 
448 	lockdep_assert_held(&head->lock);
449 
450 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
451 		return;
452 
453 	/* We don't have too many refs to merge for data. */
454 	if (head->is_data)
455 		return;
456 
457 	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
458 again:
459 	for (node = rb_first_cached(&head->ref_tree); node;
460 	     node = rb_next(node)) {
461 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
462 		if (seq && ref->seq >= seq)
463 			continue;
464 		if (merge_ref(fs_info, delayed_refs, head, ref, seq))
465 			goto again;
466 	}
467 }
468 
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,u64 seq)469 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
470 {
471 	int ret = 0;
472 	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
473 
474 	if (min_seq != 0 && seq >= min_seq) {
475 		btrfs_debug(fs_info,
476 			    "holding back delayed_ref %llu, lowest is %llu",
477 			    seq, min_seq);
478 		ret = 1;
479 	}
480 
481 	return ret;
482 }
483 
btrfs_select_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs)484 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
485 		const struct btrfs_fs_info *fs_info,
486 		struct btrfs_delayed_ref_root *delayed_refs)
487 {
488 	struct btrfs_delayed_ref_head *head;
489 	unsigned long start_index;
490 	unsigned long found_index;
491 	bool found_head = false;
492 	bool locked;
493 
494 	spin_lock(&delayed_refs->lock);
495 again:
496 	start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
497 	xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
498 		if (!head->processing) {
499 			found_head = true;
500 			break;
501 		}
502 	}
503 	if (!found_head) {
504 		if (delayed_refs->run_delayed_start == 0) {
505 			spin_unlock(&delayed_refs->lock);
506 			return NULL;
507 		}
508 		delayed_refs->run_delayed_start = 0;
509 		goto again;
510 	}
511 
512 	head->processing = true;
513 	WARN_ON(delayed_refs->num_heads_ready == 0);
514 	delayed_refs->num_heads_ready--;
515 	delayed_refs->run_delayed_start = head->bytenr +
516 		head->num_bytes;
517 
518 	locked = btrfs_delayed_ref_lock(delayed_refs, head);
519 	spin_unlock(&delayed_refs->lock);
520 
521 	/*
522 	 * We may have dropped the spin lock to get the head mutex lock, and
523 	 * that might have given someone else time to free the head.  If that's
524 	 * true, it has been removed from our list and we can move on.
525 	 */
526 	if (!locked)
527 		return ERR_PTR(-EAGAIN);
528 
529 	return head;
530 }
531 
btrfs_unselect_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)532 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
533 			     struct btrfs_delayed_ref_head *head)
534 {
535 	spin_lock(&delayed_refs->lock);
536 	head->processing = false;
537 	delayed_refs->num_heads_ready++;
538 	spin_unlock(&delayed_refs->lock);
539 	btrfs_delayed_ref_unlock(head);
540 }
541 
btrfs_delete_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)542 void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
543 			   struct btrfs_delayed_ref_root *delayed_refs,
544 			   struct btrfs_delayed_ref_head *head)
545 {
546 	const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
547 
548 	lockdep_assert_held(&delayed_refs->lock);
549 	lockdep_assert_held(&head->lock);
550 
551 	xa_erase(&delayed_refs->head_refs, index);
552 	head->tracked = false;
553 	delayed_refs->num_heads--;
554 	if (!head->processing)
555 		delayed_refs->num_heads_ready--;
556 }
557 
558 /*
559  * Helper to insert the ref_node to the tail or merge with tail.
560  *
561  * Return false if the ref was inserted.
562  * Return true if the ref was merged into an existing one (and therefore can be
563  * freed by the caller).
564  */
insert_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)565 static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
566 			       struct btrfs_delayed_ref_head *href,
567 			       struct btrfs_delayed_ref_node *ref)
568 {
569 	struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
570 	struct btrfs_delayed_ref_node *exist;
571 	int mod;
572 
573 	spin_lock(&href->lock);
574 	exist = tree_insert(&href->ref_tree, ref);
575 	if (!exist) {
576 		if (ref->action == BTRFS_ADD_DELAYED_REF)
577 			list_add_tail(&ref->add_list, &href->ref_add_list);
578 		spin_unlock(&href->lock);
579 		trans->delayed_ref_updates++;
580 		return false;
581 	}
582 
583 	/* Now we are sure we can merge */
584 	if (exist->action == ref->action) {
585 		mod = ref->ref_mod;
586 	} else {
587 		/* Need to change action */
588 		if (exist->ref_mod < ref->ref_mod) {
589 			exist->action = ref->action;
590 			mod = -exist->ref_mod;
591 			exist->ref_mod = ref->ref_mod;
592 			if (ref->action == BTRFS_ADD_DELAYED_REF)
593 				list_add_tail(&exist->add_list,
594 					      &href->ref_add_list);
595 			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
596 				ASSERT(!list_empty(&exist->add_list));
597 				list_del_init(&exist->add_list);
598 			} else {
599 				ASSERT(0);
600 			}
601 		} else
602 			mod = -ref->ref_mod;
603 	}
604 	exist->ref_mod += mod;
605 
606 	/* remove existing tail if its ref_mod is zero */
607 	if (exist->ref_mod == 0)
608 		drop_delayed_ref(trans->fs_info, root, href, exist);
609 	spin_unlock(&href->lock);
610 	return true;
611 }
612 
613 /*
614  * helper function to update the accounting in the head ref
615  * existing and update must have the same bytenr
616  */
update_existing_head_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * existing,struct btrfs_delayed_ref_head * update)617 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
618 			 struct btrfs_delayed_ref_head *existing,
619 			 struct btrfs_delayed_ref_head *update)
620 {
621 	struct btrfs_delayed_ref_root *delayed_refs =
622 		&trans->transaction->delayed_refs;
623 	struct btrfs_fs_info *fs_info = trans->fs_info;
624 	int old_ref_mod;
625 
626 	BUG_ON(existing->is_data != update->is_data);
627 
628 	spin_lock(&existing->lock);
629 
630 	/*
631 	 * When freeing an extent, we may not know the owning root when we
632 	 * first create the head_ref. However, some deref before the last deref
633 	 * will know it, so we just need to update the head_ref accordingly.
634 	 */
635 	if (!existing->owning_root)
636 		existing->owning_root = update->owning_root;
637 
638 	if (update->must_insert_reserved) {
639 		/* if the extent was freed and then
640 		 * reallocated before the delayed ref
641 		 * entries were processed, we can end up
642 		 * with an existing head ref without
643 		 * the must_insert_reserved flag set.
644 		 * Set it again here
645 		 */
646 		existing->must_insert_reserved = update->must_insert_reserved;
647 		existing->owning_root = update->owning_root;
648 
649 		/*
650 		 * update the num_bytes so we make sure the accounting
651 		 * is done correctly
652 		 */
653 		existing->num_bytes = update->num_bytes;
654 
655 	}
656 
657 	if (update->extent_op) {
658 		if (!existing->extent_op) {
659 			existing->extent_op = update->extent_op;
660 		} else {
661 			if (update->extent_op->update_key) {
662 				memcpy(&existing->extent_op->key,
663 				       &update->extent_op->key,
664 				       sizeof(update->extent_op->key));
665 				existing->extent_op->update_key = true;
666 			}
667 			if (update->extent_op->update_flags) {
668 				existing->extent_op->flags_to_set |=
669 					update->extent_op->flags_to_set;
670 				existing->extent_op->update_flags = true;
671 			}
672 			btrfs_free_delayed_extent_op(update->extent_op);
673 		}
674 	}
675 	/*
676 	 * update the reference mod on the head to reflect this new operation,
677 	 * only need the lock for this case cause we could be processing it
678 	 * currently, for refs we just added we know we're a-ok.
679 	 */
680 	old_ref_mod = existing->total_ref_mod;
681 	existing->ref_mod += update->ref_mod;
682 	existing->total_ref_mod += update->ref_mod;
683 
684 	/*
685 	 * If we are going to from a positive ref mod to a negative or vice
686 	 * versa we need to make sure to adjust pending_csums accordingly.
687 	 * We reserve bytes for csum deletion when adding or updating a ref head
688 	 * see add_delayed_ref_head() for more details.
689 	 */
690 	if (existing->is_data) {
691 		u64 csum_leaves =
692 			btrfs_csum_bytes_to_leaves(fs_info,
693 						   existing->num_bytes);
694 
695 		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
696 			delayed_refs->pending_csums -= existing->num_bytes;
697 			btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
698 		}
699 		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
700 			delayed_refs->pending_csums += existing->num_bytes;
701 			trans->delayed_ref_csum_deletions += csum_leaves;
702 		}
703 	}
704 
705 	spin_unlock(&existing->lock);
706 }
707 
init_delayed_ref_head(struct btrfs_delayed_ref_head * head_ref,struct btrfs_ref * generic_ref,struct btrfs_qgroup_extent_record * qrecord,u64 reserved)708 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
709 				  struct btrfs_ref *generic_ref,
710 				  struct btrfs_qgroup_extent_record *qrecord,
711 				  u64 reserved)
712 {
713 	int count_mod = 1;
714 	bool must_insert_reserved = false;
715 
716 	/* If reserved is provided, it must be a data extent. */
717 	BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
718 
719 	switch (generic_ref->action) {
720 	case BTRFS_ADD_DELAYED_REF:
721 		/* count_mod is already set to 1. */
722 		break;
723 	case BTRFS_UPDATE_DELAYED_HEAD:
724 		count_mod = 0;
725 		break;
726 	case BTRFS_DROP_DELAYED_REF:
727 		/*
728 		 * The head node stores the sum of all the mods, so dropping a ref
729 		 * should drop the sum in the head node by one.
730 		 */
731 		count_mod = -1;
732 		break;
733 	case BTRFS_ADD_DELAYED_EXTENT:
734 		/*
735 		 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
736 		 * reserved accounting when the extent is finally added, or if a
737 		 * later modification deletes the delayed ref without ever
738 		 * inserting the extent into the extent allocation tree.
739 		 * ref->must_insert_reserved is the flag used to record that
740 		 * accounting mods are required.
741 		 *
742 		 * Once we record must_insert_reserved, switch the action to
743 		 * BTRFS_ADD_DELAYED_REF because other special casing is not
744 		 * required.
745 		 */
746 		must_insert_reserved = true;
747 		break;
748 	}
749 
750 	refcount_set(&head_ref->refs, 1);
751 	head_ref->bytenr = generic_ref->bytenr;
752 	head_ref->num_bytes = generic_ref->num_bytes;
753 	head_ref->ref_mod = count_mod;
754 	head_ref->reserved_bytes = reserved;
755 	head_ref->must_insert_reserved = must_insert_reserved;
756 	head_ref->owning_root = generic_ref->owning_root;
757 	head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
758 	head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
759 	head_ref->ref_tree = RB_ROOT_CACHED;
760 	INIT_LIST_HEAD(&head_ref->ref_add_list);
761 	head_ref->tracked = false;
762 	head_ref->processing = false;
763 	head_ref->total_ref_mod = count_mod;
764 	spin_lock_init(&head_ref->lock);
765 	mutex_init(&head_ref->mutex);
766 
767 	/* If not metadata set an impossible level to help debugging. */
768 	if (generic_ref->type == BTRFS_REF_METADATA)
769 		head_ref->level = generic_ref->tree_ref.level;
770 	else
771 		head_ref->level = U8_MAX;
772 
773 	if (qrecord) {
774 		if (generic_ref->ref_root && reserved) {
775 			qrecord->data_rsv = reserved;
776 			qrecord->data_rsv_refroot = generic_ref->ref_root;
777 		}
778 		qrecord->num_bytes = generic_ref->num_bytes;
779 		qrecord->old_roots = NULL;
780 	}
781 }
782 
783 /*
784  * helper function to actually insert a head node into the rbtree.
785  * this does all the dirty work in terms of maintaining the correct
786  * overall modification count.
787  *
788  * Returns an error pointer in case of an error.
789  */
790 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_qgroup_extent_record * qrecord,int action,bool * qrecord_inserted_ret)791 add_delayed_ref_head(struct btrfs_trans_handle *trans,
792 		     struct btrfs_delayed_ref_head *head_ref,
793 		     struct btrfs_qgroup_extent_record *qrecord,
794 		     int action, bool *qrecord_inserted_ret)
795 {
796 	struct btrfs_fs_info *fs_info = trans->fs_info;
797 	struct btrfs_delayed_ref_head *existing;
798 	struct btrfs_delayed_ref_root *delayed_refs;
799 	const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
800 	bool qrecord_inserted = false;
801 
802 	delayed_refs = &trans->transaction->delayed_refs;
803 	lockdep_assert_held(&delayed_refs->lock);
804 
805 #if BITS_PER_LONG == 32
806 	if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
807 		if (qrecord)
808 			xa_release(&delayed_refs->dirty_extents, index);
809 		btrfs_err_rl(fs_info,
810 "delayed ref head %llu is beyond 32bit page cache and xarray index limit",
811 			     head_ref->bytenr);
812 		btrfs_err_32bit_limit(fs_info);
813 		return ERR_PTR(-EOVERFLOW);
814 	}
815 #endif
816 
817 	/* Record qgroup extent info if provided */
818 	if (qrecord) {
819 		int ret;
820 
821 		ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
822 						       head_ref->bytenr);
823 		if (ret) {
824 			/* Clean up if insertion fails or item exists. */
825 			xa_release(&delayed_refs->dirty_extents, index);
826 			/* Caller responsible for freeing qrecord on error. */
827 			if (ret < 0)
828 				return ERR_PTR(ret);
829 			kfree(qrecord);
830 		} else {
831 			qrecord_inserted = true;
832 		}
833 	}
834 
835 	trace_add_delayed_ref_head(fs_info, head_ref, action);
836 
837 	existing = xa_load(&delayed_refs->head_refs, index);
838 	if (existing) {
839 		update_existing_head_ref(trans, existing, head_ref);
840 		/*
841 		 * we've updated the existing ref, free the newly
842 		 * allocated ref
843 		 */
844 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
845 		head_ref = existing;
846 	} else {
847 		existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
848 		if (xa_is_err(existing)) {
849 			/* Memory was preallocated by the caller. */
850 			ASSERT(xa_err(existing) != -ENOMEM);
851 			return ERR_PTR(xa_err(existing));
852 		} else if (WARN_ON(existing)) {
853 			/*
854 			 * Shouldn't happen we just did a lookup before under
855 			 * delayed_refs->lock.
856 			 */
857 			return ERR_PTR(-EEXIST);
858 		}
859 		head_ref->tracked = true;
860 		/*
861 		 * We reserve the amount of bytes needed to delete csums when
862 		 * adding the ref head and not when adding individual drop refs
863 		 * since the csum items are deleted only after running the last
864 		 * delayed drop ref (the data extent's ref count drops to 0).
865 		 */
866 		if (head_ref->is_data && head_ref->ref_mod < 0) {
867 			delayed_refs->pending_csums += head_ref->num_bytes;
868 			trans->delayed_ref_csum_deletions +=
869 				btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
870 		}
871 		delayed_refs->num_heads++;
872 		delayed_refs->num_heads_ready++;
873 	}
874 	if (qrecord_inserted_ret)
875 		*qrecord_inserted_ret = qrecord_inserted;
876 
877 	return head_ref;
878 }
879 
880 /*
881  * Initialize the structure which represents a modification to a an extent.
882  *
883  * @fs_info:    Internal to the mounted filesystem mount structure.
884  *
885  * @ref:	The structure which is going to be initialized.
886  *
887  * @bytenr:	The logical address of the extent for which a modification is
888  *		going to be recorded.
889  *
890  * @num_bytes:  Size of the extent whose modification is being recorded.
891  *
892  * @ref_root:	The id of the root where this modification has originated, this
893  *		can be either one of the well-known metadata trees or the
894  *		subvolume id which references this extent.
895  *
896  * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
897  *		BTRFS_ADD_DELAYED_EXTENT
898  *
899  * @ref_type:	Holds the type of the extent which is being recorded, can be
900  *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
901  *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
902  *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
903  */
init_delayed_ref_common(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_node * ref,struct btrfs_ref * generic_ref)904 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
905 				    struct btrfs_delayed_ref_node *ref,
906 				    struct btrfs_ref *generic_ref)
907 {
908 	int action = generic_ref->action;
909 	u64 seq = 0;
910 
911 	if (action == BTRFS_ADD_DELAYED_EXTENT)
912 		action = BTRFS_ADD_DELAYED_REF;
913 
914 	if (is_fstree(generic_ref->ref_root))
915 		seq = atomic64_read(&fs_info->tree_mod_seq);
916 
917 	refcount_set(&ref->refs, 1);
918 	ref->bytenr = generic_ref->bytenr;
919 	ref->num_bytes = generic_ref->num_bytes;
920 	ref->ref_mod = 1;
921 	ref->action = action;
922 	ref->seq = seq;
923 	ref->type = btrfs_ref_type(generic_ref);
924 	ref->ref_root = generic_ref->ref_root;
925 	ref->parent = generic_ref->parent;
926 	RB_CLEAR_NODE(&ref->ref_node);
927 	INIT_LIST_HEAD(&ref->add_list);
928 
929 	if (generic_ref->type == BTRFS_REF_DATA)
930 		ref->data_ref = generic_ref->data_ref;
931 	else
932 		ref->tree_ref = generic_ref->tree_ref;
933 }
934 
btrfs_init_tree_ref(struct btrfs_ref * generic_ref,int level,u64 mod_root,bool skip_qgroup)935 void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
936 			 bool skip_qgroup)
937 {
938 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
939 	/* If @real_root not set, use @root as fallback */
940 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
941 #endif
942 	generic_ref->tree_ref.level = level;
943 	generic_ref->type = BTRFS_REF_METADATA;
944 	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
945 			     (!mod_root || is_fstree(mod_root))))
946 		generic_ref->skip_qgroup = true;
947 	else
948 		generic_ref->skip_qgroup = false;
949 
950 }
951 
btrfs_init_data_ref(struct btrfs_ref * generic_ref,u64 ino,u64 offset,u64 mod_root,bool skip_qgroup)952 void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
953 			 u64 mod_root, bool skip_qgroup)
954 {
955 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
956 	/* If @real_root not set, use @root as fallback */
957 	generic_ref->real_root = mod_root ?: generic_ref->ref_root;
958 #endif
959 	generic_ref->data_ref.objectid = ino;
960 	generic_ref->data_ref.offset = offset;
961 	generic_ref->type = BTRFS_REF_DATA;
962 	if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
963 			     (!mod_root || is_fstree(mod_root))))
964 		generic_ref->skip_qgroup = true;
965 	else
966 		generic_ref->skip_qgroup = false;
967 }
968 
add_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op,u64 reserved)969 static int add_delayed_ref(struct btrfs_trans_handle *trans,
970 			   struct btrfs_ref *generic_ref,
971 			   struct btrfs_delayed_extent_op *extent_op,
972 			   u64 reserved)
973 {
974 	struct btrfs_fs_info *fs_info = trans->fs_info;
975 	struct btrfs_delayed_ref_node *node;
976 	struct btrfs_delayed_ref_head *head_ref;
977 	struct btrfs_delayed_ref_head *new_head_ref;
978 	struct btrfs_delayed_ref_root *delayed_refs;
979 	struct btrfs_qgroup_extent_record *record = NULL;
980 	const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
981 	bool qrecord_reserved = false;
982 	bool qrecord_inserted;
983 	int action = generic_ref->action;
984 	bool merged;
985 	int ret;
986 
987 	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
988 	if (!node)
989 		return -ENOMEM;
990 
991 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
992 	if (!head_ref) {
993 		ret = -ENOMEM;
994 		goto free_node;
995 	}
996 
997 	delayed_refs = &trans->transaction->delayed_refs;
998 
999 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1000 		record = kzalloc(sizeof(*record), GFP_NOFS);
1001 		if (!record) {
1002 			ret = -ENOMEM;
1003 			goto free_head_ref;
1004 		}
1005 		if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1006 			ret = -ENOMEM;
1007 			goto free_record;
1008 		}
1009 		qrecord_reserved = true;
1010 	}
1011 
1012 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1013 	if (ret) {
1014 		if (qrecord_reserved)
1015 			xa_release(&delayed_refs->dirty_extents, index);
1016 		goto free_record;
1017 	}
1018 
1019 	init_delayed_ref_common(fs_info, node, generic_ref);
1020 	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1021 	head_ref->extent_op = extent_op;
1022 
1023 	spin_lock(&delayed_refs->lock);
1024 
1025 	/*
1026 	 * insert both the head node and the new ref without dropping
1027 	 * the spin lock
1028 	 */
1029 	new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1030 					    action, &qrecord_inserted);
1031 	if (IS_ERR(new_head_ref)) {
1032 		xa_release(&delayed_refs->head_refs, index);
1033 		spin_unlock(&delayed_refs->lock);
1034 		ret = PTR_ERR(new_head_ref);
1035 		goto free_record;
1036 	}
1037 	head_ref = new_head_ref;
1038 
1039 	merged = insert_delayed_ref(trans, head_ref, node);
1040 	spin_unlock(&delayed_refs->lock);
1041 
1042 	/*
1043 	 * Need to update the delayed_refs_rsv with any changes we may have
1044 	 * made.
1045 	 */
1046 	btrfs_update_delayed_refs_rsv(trans);
1047 
1048 	if (generic_ref->type == BTRFS_REF_DATA)
1049 		trace_add_delayed_data_ref(trans->fs_info, node);
1050 	else
1051 		trace_add_delayed_tree_ref(trans->fs_info, node);
1052 	if (merged)
1053 		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1054 
1055 	if (qrecord_inserted)
1056 		return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1057 	return 0;
1058 
1059 free_record:
1060 	kfree(record);
1061 free_head_ref:
1062 	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1063 free_node:
1064 	kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1065 	return ret;
1066 }
1067 
1068 /*
1069  * Add a delayed tree ref. This does all of the accounting required to make sure
1070  * the delayed ref is eventually processed before this transaction commits.
1071  */
btrfs_add_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,struct btrfs_delayed_extent_op * extent_op)1072 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1073 			       struct btrfs_ref *generic_ref,
1074 			       struct btrfs_delayed_extent_op *extent_op)
1075 {
1076 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1077 	return add_delayed_ref(trans, generic_ref, extent_op, 0);
1078 }
1079 
1080 /*
1081  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1082  */
btrfs_add_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref,u64 reserved)1083 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1084 			       struct btrfs_ref *generic_ref,
1085 			       u64 reserved)
1086 {
1087 	ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1088 	return add_delayed_ref(trans, generic_ref, NULL, reserved);
1089 }
1090 
btrfs_add_delayed_extent_op(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u8 level,struct btrfs_delayed_extent_op * extent_op)1091 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1092 				u64 bytenr, u64 num_bytes, u8 level,
1093 				struct btrfs_delayed_extent_op *extent_op)
1094 {
1095 	const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1096 	struct btrfs_delayed_ref_head *head_ref;
1097 	struct btrfs_delayed_ref_head *head_ref_ret;
1098 	struct btrfs_delayed_ref_root *delayed_refs;
1099 	struct btrfs_ref generic_ref = {
1100 		.type = BTRFS_REF_METADATA,
1101 		.action = BTRFS_UPDATE_DELAYED_HEAD,
1102 		.bytenr = bytenr,
1103 		.num_bytes = num_bytes,
1104 		.tree_ref.level = level,
1105 	};
1106 	int ret;
1107 
1108 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1109 	if (!head_ref)
1110 		return -ENOMEM;
1111 
1112 	init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1113 	head_ref->extent_op = extent_op;
1114 
1115 	delayed_refs = &trans->transaction->delayed_refs;
1116 
1117 	ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1118 	if (ret) {
1119 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1120 		return ret;
1121 	}
1122 
1123 	spin_lock(&delayed_refs->lock);
1124 	head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1125 					    BTRFS_UPDATE_DELAYED_HEAD, NULL);
1126 	if (IS_ERR(head_ref_ret)) {
1127 		xa_release(&delayed_refs->head_refs, index);
1128 		spin_unlock(&delayed_refs->lock);
1129 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1130 		return PTR_ERR(head_ref_ret);
1131 	}
1132 	spin_unlock(&delayed_refs->lock);
1133 
1134 	/*
1135 	 * Need to update the delayed_refs_rsv with any changes we may have
1136 	 * made.
1137 	 */
1138 	btrfs_update_delayed_refs_rsv(trans);
1139 	return 0;
1140 }
1141 
btrfs_put_delayed_ref(struct btrfs_delayed_ref_node * ref)1142 void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1143 {
1144 	if (refcount_dec_and_test(&ref->refs)) {
1145 		WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1146 		kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1147 	}
1148 }
1149 
1150 /*
1151  * This does a simple search for the head node for a given extent.  Returns the
1152  * head node if found, or NULL if not.
1153  */
1154 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,u64 bytenr)1155 btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1156 			    struct btrfs_delayed_ref_root *delayed_refs,
1157 			    u64 bytenr)
1158 {
1159 	const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1160 
1161 	lockdep_assert_held(&delayed_refs->lock);
1162 
1163 	return xa_load(&delayed_refs->head_refs, index);
1164 }
1165 
find_comp(struct btrfs_delayed_ref_node * entry,u64 root,u64 parent)1166 static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
1167 {
1168 	int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
1169 
1170 	if (type < entry->type)
1171 		return -1;
1172 	if (type > entry->type)
1173 		return 1;
1174 
1175 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1176 		if (root < entry->ref_root)
1177 			return -1;
1178 		if (root > entry->ref_root)
1179 			return 1;
1180 	} else {
1181 		if (parent < entry->parent)
1182 			return -1;
1183 		if (parent > entry->parent)
1184 			return 1;
1185 	}
1186 	return 0;
1187 }
1188 
1189 /*
1190  * Check to see if a given root/parent reference is attached to the head.  This
1191  * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1192  * indicates the reference exists for the given root or parent.  This is for
1193  * tree blocks only.
1194  *
1195  * @head: the head of the bytenr we're searching.
1196  * @root: the root objectid of the reference if it is a normal reference.
1197  * @parent: the parent if this is a shared backref.
1198  */
btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head * head,u64 root,u64 parent)1199 bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1200 				 u64 root, u64 parent)
1201 {
1202 	struct rb_node *node;
1203 	bool found = false;
1204 
1205 	lockdep_assert_held(&head->mutex);
1206 
1207 	spin_lock(&head->lock);
1208 	node = head->ref_tree.rb_root.rb_node;
1209 	while (node) {
1210 		struct btrfs_delayed_ref_node *entry;
1211 		int ret;
1212 
1213 		entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1214 		ret = find_comp(entry, root, parent);
1215 		if (ret < 0) {
1216 			node = node->rb_left;
1217 		} else if (ret > 0) {
1218 			node = node->rb_right;
1219 		} else {
1220 			/*
1221 			 * We only want to count ADD actions, as drops mean the
1222 			 * ref doesn't exist.
1223 			 */
1224 			if (entry->action == BTRFS_ADD_DELAYED_REF)
1225 				found = true;
1226 			break;
1227 		}
1228 	}
1229 	spin_unlock(&head->lock);
1230 	return found;
1231 }
1232 
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans)1233 void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
1234 {
1235 	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1236 	struct btrfs_fs_info *fs_info = trans->fs_info;
1237 
1238 	spin_lock(&delayed_refs->lock);
1239 	while (true) {
1240 		struct btrfs_delayed_ref_head *head;
1241 		struct rb_node *n;
1242 		bool pin_bytes = false;
1243 
1244 		head = find_first_ref_head(delayed_refs);
1245 		if (!head)
1246 			break;
1247 
1248 		if (!btrfs_delayed_ref_lock(delayed_refs, head))
1249 			continue;
1250 
1251 		spin_lock(&head->lock);
1252 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1253 			struct btrfs_delayed_ref_node *ref;
1254 
1255 			ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1256 			drop_delayed_ref(fs_info, delayed_refs, head, ref);
1257 		}
1258 		if (head->must_insert_reserved)
1259 			pin_bytes = true;
1260 		btrfs_free_delayed_extent_op(head->extent_op);
1261 		btrfs_delete_ref_head(fs_info, delayed_refs, head);
1262 		spin_unlock(&head->lock);
1263 		spin_unlock(&delayed_refs->lock);
1264 		mutex_unlock(&head->mutex);
1265 
1266 		if (pin_bytes) {
1267 			struct btrfs_block_group *bg;
1268 
1269 			bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1270 			if (WARN_ON_ONCE(bg == NULL)) {
1271 				/*
1272 				 * Unexpected and there's nothing we can do here
1273 				 * because we are in a transaction abort path,
1274 				 * so any errors can only be ignored or reported
1275 				 * while attempting to cleanup all resources.
1276 				 */
1277 				btrfs_err(fs_info,
1278 "block group for delayed ref at %llu was not found while destroying ref head",
1279 					  head->bytenr);
1280 			} else {
1281 				spin_lock(&bg->space_info->lock);
1282 				spin_lock(&bg->lock);
1283 				bg->pinned += head->num_bytes;
1284 				btrfs_space_info_update_bytes_pinned(fs_info,
1285 								     bg->space_info,
1286 								     head->num_bytes);
1287 				bg->reserved -= head->num_bytes;
1288 				bg->space_info->bytes_reserved -= head->num_bytes;
1289 				spin_unlock(&bg->lock);
1290 				spin_unlock(&bg->space_info->lock);
1291 
1292 				btrfs_put_block_group(bg);
1293 			}
1294 
1295 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1296 				head->bytenr + head->num_bytes - 1);
1297 		}
1298 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1299 		btrfs_put_delayed_ref_head(head);
1300 		cond_resched();
1301 		spin_lock(&delayed_refs->lock);
1302 	}
1303 	btrfs_qgroup_destroy_extent_records(trans);
1304 
1305 	spin_unlock(&delayed_refs->lock);
1306 }
1307 
btrfs_delayed_ref_exit(void)1308 void __cold btrfs_delayed_ref_exit(void)
1309 {
1310 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1311 	kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
1312 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1313 }
1314 
btrfs_delayed_ref_init(void)1315 int __init btrfs_delayed_ref_init(void)
1316 {
1317 	btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1318 	if (!btrfs_delayed_ref_head_cachep)
1319 		goto fail;
1320 
1321 	btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1322 	if (!btrfs_delayed_ref_node_cachep)
1323 		goto fail;
1324 
1325 	btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1326 	if (!btrfs_delayed_extent_op_cachep)
1327 		goto fail;
1328 
1329 	return 0;
1330 fail:
1331 	btrfs_delayed_ref_exit();
1332 	return -ENOMEM;
1333 }
1334