xref: /linux/fs/btrfs/backref.c (revision 90b7d4c415b2992125b8ff8fcd3191b14047fb7f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17 #include "tree-mod-log.h"
18 #include "fs.h"
19 #include "accessors.h"
20 #include "extent-tree.h"
21 #include "relocation.h"
22 #include "tree-checker.h"
23 
24 /* Just arbitrary numbers so we can be sure one of these happened. */
25 #define BACKREF_FOUND_SHARED     6
26 #define BACKREF_FOUND_NOT_SHARED 7
27 
28 struct extent_inode_elem {
29 	u64 inum;
30 	u64 offset;
31 	u64 num_bytes;
32 	struct extent_inode_elem *next;
33 };
34 
35 static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
36 			      const struct btrfs_key *key,
37 			      const struct extent_buffer *eb,
38 			      const struct btrfs_file_extent_item *fi,
39 			      struct extent_inode_elem **eie)
40 {
41 	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
42 	u64 offset = key->offset;
43 	struct extent_inode_elem *e;
44 	const u64 *root_ids;
45 	int root_count;
46 	bool cached;
47 
48 	if (!ctx->ignore_extent_item_pos &&
49 	    !btrfs_file_extent_compression(eb, fi) &&
50 	    !btrfs_file_extent_encryption(eb, fi) &&
51 	    !btrfs_file_extent_other_encoding(eb, fi)) {
52 		u64 data_offset;
53 
54 		data_offset = btrfs_file_extent_offset(eb, fi);
55 
56 		if (ctx->extent_item_pos < data_offset ||
57 		    ctx->extent_item_pos >= data_offset + data_len)
58 			return 1;
59 		offset += ctx->extent_item_pos - data_offset;
60 	}
61 
62 	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
63 		goto add_inode_elem;
64 
65 	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
66 				   &root_count);
67 	if (!cached)
68 		goto add_inode_elem;
69 
70 	for (int i = 0; i < root_count; i++) {
71 		int ret;
72 
73 		ret = ctx->indirect_ref_iterator(key->objectid, offset,
74 						 data_len, root_ids[i],
75 						 ctx->user_ctx);
76 		if (ret)
77 			return ret;
78 	}
79 
80 add_inode_elem:
81 	e = kmalloc_obj(*e, GFP_NOFS);
82 	if (!e)
83 		return -ENOMEM;
84 
85 	e->next = *eie;
86 	e->inum = key->objectid;
87 	e->offset = offset;
88 	e->num_bytes = data_len;
89 	*eie = e;
90 
91 	return 0;
92 }
93 
94 static void free_inode_elem_list(struct extent_inode_elem *eie)
95 {
96 	struct extent_inode_elem *eie_next;
97 
98 	for (; eie; eie = eie_next) {
99 		eie_next = eie->next;
100 		kfree(eie);
101 	}
102 }
103 
104 static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
105 			     const struct extent_buffer *eb,
106 			     struct extent_inode_elem **eie)
107 {
108 	u64 disk_byte;
109 	struct btrfs_key key;
110 	struct btrfs_file_extent_item *fi;
111 	int slot;
112 	int nritems;
113 	int extent_type;
114 	int ret;
115 
116 	/*
117 	 * from the shared data ref, we only have the leaf but we need
118 	 * the key. thus, we must look into all items and see that we
119 	 * find one (some) with a reference to our extent item.
120 	 */
121 	nritems = btrfs_header_nritems(eb);
122 	for (slot = 0; slot < nritems; ++slot) {
123 		btrfs_item_key_to_cpu(eb, &key, slot);
124 		if (key.type != BTRFS_EXTENT_DATA_KEY)
125 			continue;
126 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
127 		extent_type = btrfs_file_extent_type(eb, fi);
128 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
129 			continue;
130 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
131 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
132 		if (disk_byte != ctx->bytenr)
133 			continue;
134 
135 		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
136 		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
137 			return ret;
138 	}
139 
140 	return 0;
141 }
142 
143 struct preftree {
144 	struct rb_root_cached root;
145 	unsigned int count;
146 };
147 
148 #define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
149 
150 struct preftrees {
151 	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
152 	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
153 	struct preftree indirect_missing_keys;
154 };
155 
156 /*
157  * Checks for a shared extent during backref search.
158  *
159  * The share_count tracks prelim_refs (direct and indirect) having a
160  * ref->count >0:
161  *  - incremented when a ref->count transitions to >0
162  *  - decremented when a ref->count transitions to <1
163  */
164 struct share_check {
165 	struct btrfs_backref_share_check_ctx *ctx;
166 	struct btrfs_root *root;
167 	u64 inum;
168 	u64 data_bytenr;
169 	u64 data_extent_gen;
170 	/*
171 	 * Counts number of inodes that refer to an extent (different inodes in
172 	 * the same root or different roots) that we could find. The sharedness
173 	 * check typically stops once this counter gets greater than 1, so it
174 	 * may not reflect the total number of inodes.
175 	 */
176 	int share_count;
177 	/*
178 	 * The number of times we found our inode refers to the data extent we
179 	 * are determining the sharedness. In other words, how many file extent
180 	 * items we could find for our inode that point to our target data
181 	 * extent. The value we get here after finishing the extent sharedness
182 	 * check may be smaller than reality, but if it ends up being greater
183 	 * than 1, then we know for sure the inode has multiple file extent
184 	 * items that point to our inode, and we can safely assume it's useful
185 	 * to cache the sharedness check result.
186 	 */
187 	int self_ref_count;
188 	bool have_delayed_delete_refs;
189 };
190 
191 static inline int extent_is_shared(struct share_check *sc)
192 {
193 	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
194 }
195 
196 static struct kmem_cache *btrfs_prelim_ref_cache;
197 
198 int __init btrfs_prelim_ref_init(void)
199 {
200 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
201 					sizeof(struct prelim_ref), 0, 0, NULL);
202 	if (!btrfs_prelim_ref_cache)
203 		return -ENOMEM;
204 	return 0;
205 }
206 
207 void __cold btrfs_prelim_ref_exit(void)
208 {
209 	kmem_cache_destroy(btrfs_prelim_ref_cache);
210 }
211 
212 static void free_pref(struct prelim_ref *ref)
213 {
214 	kmem_cache_free(btrfs_prelim_ref_cache, ref);
215 }
216 
217 /*
218  * Return 0 when both refs are for the same block (and can be merged).
219  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
220  * indicates a 'higher' block.
221  */
222 static int prelim_ref_compare(const struct prelim_ref *ref1,
223 			      const struct prelim_ref *ref2)
224 {
225 	if (ref1->level < ref2->level)
226 		return -1;
227 	if (ref1->level > ref2->level)
228 		return 1;
229 	if (ref1->root_id < ref2->root_id)
230 		return -1;
231 	if (ref1->root_id > ref2->root_id)
232 		return 1;
233 	if (ref1->key_for_search.type < ref2->key_for_search.type)
234 		return -1;
235 	if (ref1->key_for_search.type > ref2->key_for_search.type)
236 		return 1;
237 	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
238 		return -1;
239 	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
240 		return 1;
241 	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
242 		return -1;
243 	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
244 		return 1;
245 	if (ref1->parent < ref2->parent)
246 		return -1;
247 	if (ref1->parent > ref2->parent)
248 		return 1;
249 
250 	return 0;
251 }
252 
253 static int prelim_ref_rb_add_cmp(const struct rb_node *new,
254 				 const struct rb_node *exist)
255 {
256 	const struct prelim_ref *ref_new =
257 		rb_entry(new, struct prelim_ref, rbnode);
258 	const struct prelim_ref *ref_exist =
259 		rb_entry(exist, struct prelim_ref, rbnode);
260 
261 	/*
262 	 * prelim_ref_compare() expects the first parameter as the existing one,
263 	 * different from the rb_find_add_cached() order.
264 	 */
265 	return prelim_ref_compare(ref_exist, ref_new);
266 }
267 
268 static void update_share_count(struct share_check *sc, int oldcount,
269 			       int newcount, const struct prelim_ref *newref)
270 {
271 	if ((!sc) || (oldcount == 0 && newcount < 1))
272 		return;
273 
274 	if (oldcount > 0 && newcount < 1)
275 		sc->share_count--;
276 	else if (oldcount < 1 && newcount > 0)
277 		sc->share_count++;
278 
279 	if (newref->root_id == btrfs_root_id(sc->root) &&
280 	    newref->wanted_disk_byte == sc->data_bytenr &&
281 	    newref->key_for_search.objectid == sc->inum)
282 		sc->self_ref_count += newref->count;
283 }
284 
285 /*
286  * Add @newref to the @root rbtree, merging identical refs.
287  *
288  * Callers should assume that newref has been freed after calling.
289  */
290 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
291 			      struct preftree *preftree,
292 			      struct prelim_ref *newref,
293 			      struct share_check *sc)
294 {
295 	struct rb_root_cached *root;
296 	struct rb_node *exist;
297 
298 	root = &preftree->root;
299 	exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp);
300 	if (exist) {
301 		struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode);
302 		/* Identical refs, merge them and free @newref */
303 		struct extent_inode_elem *eie = ref->inode_list;
304 
305 		while (eie && eie->next)
306 			eie = eie->next;
307 
308 		if (!eie)
309 			ref->inode_list = newref->inode_list;
310 		else
311 			eie->next = newref->inode_list;
312 		trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
313 							preftree->count);
314 		/*
315 		 * A delayed ref can have newref->count < 0.
316 		 * The ref->count is updated to follow any
317 		 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
318 		 */
319 		update_share_count(sc, ref->count,
320 					ref->count + newref->count, newref);
321 		ref->count += newref->count;
322 		free_pref(newref);
323 		return;
324 	}
325 
326 	update_share_count(sc, 0, newref->count, newref);
327 	preftree->count++;
328 	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
329 }
330 
331 /*
332  * Release the entire tree.  We don't care about internal consistency so
333  * just free everything and then reset the tree root.
334  */
335 static void prelim_release(struct preftree *preftree)
336 {
337 	struct prelim_ref *ref, *next_ref;
338 
339 	rbtree_postorder_for_each_entry_safe(ref, next_ref,
340 					     &preftree->root.rb_root, rbnode) {
341 		free_inode_elem_list(ref->inode_list);
342 		free_pref(ref);
343 	}
344 
345 	preftree->root = RB_ROOT_CACHED;
346 	preftree->count = 0;
347 }
348 
349 /*
350  * the rules for all callers of this function are:
351  * - obtaining the parent is the goal
352  * - if you add a key, you must know that it is a correct key
353  * - if you cannot add the parent or a correct key, then we will look into the
354  *   block later to set a correct key
355  *
356  * delayed refs
357  * ============
358  *        backref type | shared | indirect | shared | indirect
359  * information         |   tree |     tree |   data |     data
360  * --------------------+--------+----------+--------+----------
361  *      parent logical |    y   |     -    |    -   |     -
362  *      key to resolve |    -   |     y    |    y   |     y
363  *  tree block logical |    -   |     -    |    -   |     -
364  *  root for resolving |    y   |     y    |    y   |     y
365  *
366  * - column 1:       we've the parent -> done
367  * - column 2, 3, 4: we use the key to find the parent
368  *
369  * on disk refs (inline or keyed)
370  * ==============================
371  *        backref type | shared | indirect | shared | indirect
372  * information         |   tree |     tree |   data |     data
373  * --------------------+--------+----------+--------+----------
374  *      parent logical |    y   |     -    |    y   |     -
375  *      key to resolve |    -   |     -    |    -   |     y
376  *  tree block logical |    y   |     y    |    y   |     y
377  *  root for resolving |    -   |     y    |    y   |     y
378  *
379  * - column 1, 3: we've the parent -> done
380  * - column 2:    we take the first key from the block to find the parent
381  *                (see add_missing_keys)
382  * - column 4:    we use the key to find the parent
383  *
384  * additional information that's available but not required to find the parent
385  * block might help in merging entries to gain some speed.
386  */
387 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
388 			  struct preftree *preftree, u64 root_id,
389 			  const struct btrfs_key *key, int level, u64 parent,
390 			  u64 wanted_disk_byte, int count,
391 			  struct share_check *sc, gfp_t gfp_mask)
392 {
393 	struct prelim_ref *ref;
394 
395 	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
396 		return 0;
397 
398 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
399 	if (!ref)
400 		return -ENOMEM;
401 
402 	ref->root_id = root_id;
403 	if (key)
404 		ref->key_for_search = *key;
405 	else
406 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
407 
408 	ref->inode_list = NULL;
409 	ref->level = level;
410 	ref->count = count;
411 	ref->parent = parent;
412 	ref->wanted_disk_byte = wanted_disk_byte;
413 	prelim_ref_insert(fs_info, preftree, ref, sc);
414 	return extent_is_shared(sc);
415 }
416 
417 /* direct refs use root == 0, key == NULL */
418 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
419 			  struct preftrees *preftrees, int level, u64 parent,
420 			  u64 wanted_disk_byte, int count,
421 			  struct share_check *sc, gfp_t gfp_mask)
422 {
423 	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
424 			      parent, wanted_disk_byte, count, sc, gfp_mask);
425 }
426 
427 /* indirect refs use parent == 0 */
428 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
429 			    struct preftrees *preftrees, u64 root_id,
430 			    const struct btrfs_key *key, int level,
431 			    u64 wanted_disk_byte, int count,
432 			    struct share_check *sc, gfp_t gfp_mask)
433 {
434 	struct preftree *tree = &preftrees->indirect;
435 
436 	if (!key)
437 		tree = &preftrees->indirect_missing_keys;
438 	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
439 			      wanted_disk_byte, count, sc, gfp_mask);
440 }
441 
442 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
443 {
444 	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
445 	struct rb_node *parent = NULL;
446 	struct prelim_ref *ref = NULL;
447 	struct prelim_ref target = {};
448 	int result;
449 
450 	target.parent = bytenr;
451 
452 	while (*p) {
453 		parent = *p;
454 		ref = rb_entry(parent, struct prelim_ref, rbnode);
455 		result = prelim_ref_compare(ref, &target);
456 
457 		if (result < 0)
458 			p = &(*p)->rb_left;
459 		else if (result > 0)
460 			p = &(*p)->rb_right;
461 		else
462 			return 1;
463 	}
464 	return 0;
465 }
466 
467 static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
468 			   struct btrfs_root *root, struct btrfs_path *path,
469 			   struct ulist *parents,
470 			   struct preftrees *preftrees, struct prelim_ref *ref,
471 			   int level)
472 {
473 	int ret = 0;
474 	int slot;
475 	struct extent_buffer *eb;
476 	struct btrfs_key key;
477 	struct btrfs_key *key_for_search = &ref->key_for_search;
478 	struct btrfs_file_extent_item *fi;
479 	struct extent_inode_elem *eie = NULL, *old = NULL;
480 	u64 disk_byte;
481 	u64 wanted_disk_byte = ref->wanted_disk_byte;
482 	u64 count = 0;
483 	u64 data_offset;
484 	u8 type;
485 
486 	if (level != 0) {
487 		eb = path->nodes[level];
488 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
489 		if (ret < 0)
490 			return ret;
491 		return 0;
492 	}
493 
494 	/*
495 	 * 1. We normally enter this function with the path already pointing to
496 	 *    the first item to check. But sometimes, we may enter it with
497 	 *    slot == nritems.
498 	 * 2. We are searching for normal backref but bytenr of this leaf
499 	 *    matches shared data backref
500 	 * 3. The leaf owner is not equal to the root we are searching
501 	 *
502 	 * For these cases, go to the next leaf before we continue.
503 	 */
504 	eb = path->nodes[0];
505 	if (path->slots[0] >= btrfs_header_nritems(eb) ||
506 	    is_shared_data_backref(preftrees, eb->start) ||
507 	    ref->root_id != btrfs_header_owner(eb)) {
508 		if (ctx->time_seq == BTRFS_SEQ_LAST)
509 			ret = btrfs_next_leaf(root, path);
510 		else
511 			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
512 	}
513 
514 	while (!ret && count < ref->count) {
515 		eb = path->nodes[0];
516 		slot = path->slots[0];
517 
518 		btrfs_item_key_to_cpu(eb, &key, slot);
519 
520 		if (key.objectid != key_for_search->objectid ||
521 		    key.type != BTRFS_EXTENT_DATA_KEY)
522 			break;
523 
524 		/*
525 		 * We are searching for normal backref but bytenr of this leaf
526 		 * matches shared data backref, OR
527 		 * the leaf owner is not equal to the root we are searching for
528 		 */
529 		if (slot == 0 &&
530 		    (is_shared_data_backref(preftrees, eb->start) ||
531 		     ref->root_id != btrfs_header_owner(eb))) {
532 			if (ctx->time_seq == BTRFS_SEQ_LAST)
533 				ret = btrfs_next_leaf(root, path);
534 			else
535 				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
536 			continue;
537 		}
538 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
539 		type = btrfs_file_extent_type(eb, fi);
540 		if (type == BTRFS_FILE_EXTENT_INLINE)
541 			goto next;
542 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
543 		data_offset = btrfs_file_extent_offset(eb, fi);
544 
545 		if (disk_byte == wanted_disk_byte) {
546 			eie = NULL;
547 			old = NULL;
548 			if (ref->key_for_search.offset == key.offset - data_offset)
549 				count++;
550 			else
551 				goto next;
552 			if (!ctx->skip_inode_ref_list) {
553 				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
554 				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
555 				    ret < 0)
556 					break;
557 			}
558 			if (ret > 0)
559 				goto next;
560 			ret = ulist_add_merge_ptr(parents, eb->start,
561 						  eie, (void **)&old, GFP_NOFS);
562 			if (ret < 0)
563 				break;
564 			if (!ret && !ctx->skip_inode_ref_list) {
565 				while (old->next)
566 					old = old->next;
567 				old->next = eie;
568 			}
569 			eie = NULL;
570 		}
571 next:
572 		if (ctx->time_seq == BTRFS_SEQ_LAST)
573 			ret = btrfs_next_item(root, path);
574 		else
575 			ret = btrfs_next_old_item(root, path, ctx->time_seq);
576 	}
577 
578 	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
579 		free_inode_elem_list(eie);
580 	else if (ret > 0)
581 		ret = 0;
582 
583 	return ret;
584 }
585 
586 /*
587  * resolve an indirect backref in the form (root_id, key, level)
588  * to a logical address
589  */
590 static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
591 				struct btrfs_path *path,
592 				struct preftrees *preftrees,
593 				struct prelim_ref *ref, struct ulist *parents)
594 {
595 	struct btrfs_root *root;
596 	struct extent_buffer *eb;
597 	int ret = 0;
598 	int root_level;
599 	int level = ref->level;
600 	struct btrfs_key search_key = ref->key_for_search;
601 
602 	/*
603 	 * If we're search_commit_root we could possibly be holding locks on
604 	 * other tree nodes.  This happens when qgroups does backref walks when
605 	 * adding new delayed refs.  To deal with this we need to look in cache
606 	 * for the root, and if we don't find it then we need to search the
607 	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
608 	 * here.
609 	 */
610 	if (path->search_commit_root)
611 		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
612 	else
613 		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
614 	if (IS_ERR(root)) {
615 		ret = PTR_ERR(root);
616 		goto out_free;
617 	}
618 
619 	if (!path->search_commit_root &&
620 	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
621 		ret = -ENOENT;
622 		goto out;
623 	}
624 
625 	if (btrfs_is_testing(ctx->fs_info)) {
626 		ret = -ENOENT;
627 		goto out;
628 	}
629 
630 	if (path->search_commit_root)
631 		root_level = btrfs_header_level(root->commit_root);
632 	else if (ctx->time_seq == BTRFS_SEQ_LAST)
633 		root_level = btrfs_header_level(root->node);
634 	else
635 		root_level = btrfs_old_root_level(root, ctx->time_seq);
636 
637 	if (root_level + 1 == level)
638 		goto out;
639 
640 	/*
641 	 * We can often find data backrefs with an offset that is too large
642 	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
643 	 * subtracting a file's offset with the data offset of its
644 	 * corresponding extent data item. This can happen for example in the
645 	 * clone ioctl.
646 	 *
647 	 * So if we detect such case we set the search key's offset to zero to
648 	 * make sure we will find the matching file extent item at
649 	 * add_all_parents(), otherwise we will miss it because the offset
650 	 * taken form the backref is much larger then the offset of the file
651 	 * extent item. This can make us scan a very large number of file
652 	 * extent items, but at least it will not make us miss any.
653 	 *
654 	 * This is an ugly workaround for a behaviour that should have never
655 	 * existed, but it does and a fix for the clone ioctl would touch a lot
656 	 * of places, cause backwards incompatibility and would not fix the
657 	 * problem for extents cloned with older kernels.
658 	 */
659 	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
660 	    search_key.offset >= LLONG_MAX)
661 		search_key.offset = 0;
662 	path->lowest_level = level;
663 	if (ctx->time_seq == BTRFS_SEQ_LAST)
664 		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
665 	else
666 		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
667 
668 	btrfs_debug(ctx->fs_info,
669 "search slot in root %llu (level %d, ref count %d) returned %d for key " BTRFS_KEY_FMT,
670 		    ref->root_id, level, ref->count, ret,
671 		    BTRFS_KEY_FMT_VALUE(&ref->key_for_search));
672 	if (ret < 0)
673 		goto out;
674 
675 	eb = path->nodes[level];
676 	while (!eb) {
677 		if (WARN_ON(!level)) {
678 			ret = 1;
679 			goto out;
680 		}
681 		level--;
682 		eb = path->nodes[level];
683 	}
684 
685 	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
686 out:
687 	btrfs_put_root(root);
688 out_free:
689 	path->lowest_level = 0;
690 	btrfs_release_path(path);
691 	return ret;
692 }
693 
694 static struct extent_inode_elem *
695 unode_aux_to_inode_list(struct ulist_node *node)
696 {
697 	if (!node)
698 		return NULL;
699 	return (struct extent_inode_elem *)(uintptr_t)node->aux;
700 }
701 
702 static void free_leaf_list(struct ulist *ulist)
703 {
704 	struct ulist_node *node;
705 	struct ulist_iterator uiter;
706 
707 	ULIST_ITER_INIT(&uiter);
708 	while ((node = ulist_next(ulist, &uiter)))
709 		free_inode_elem_list(unode_aux_to_inode_list(node));
710 
711 	ulist_free(ulist);
712 }
713 
714 /*
715  * We maintain three separate rbtrees: one for direct refs, one for
716  * indirect refs which have a key, and one for indirect refs which do not
717  * have a key. Each tree does merge on insertion.
718  *
719  * Once all of the references are located, we iterate over the tree of
720  * indirect refs with missing keys. An appropriate key is located and
721  * the ref is moved onto the tree for indirect refs. After all missing
722  * keys are thus located, we iterate over the indirect ref tree, resolve
723  * each reference, and then insert the resolved reference onto the
724  * direct tree (merging there too).
725  *
726  * New backrefs (i.e., for parent nodes) are added to the appropriate
727  * rbtree as they are encountered. The new backrefs are subsequently
728  * resolved as above.
729  */
730 static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
731 				 struct btrfs_path *path,
732 				 struct preftrees *preftrees,
733 				 struct share_check *sc)
734 {
735 	int ret = 0;
736 	struct ulist *parents;
737 	struct ulist_node *node;
738 	struct ulist_iterator uiter;
739 	struct rb_node *rnode;
740 
741 	parents = ulist_alloc(GFP_NOFS);
742 	if (!parents)
743 		return -ENOMEM;
744 
745 	/*
746 	 * We could trade memory usage for performance here by iterating
747 	 * the tree, allocating new refs for each insertion, and then
748 	 * freeing the entire indirect tree when we're done.  In some test
749 	 * cases, the tree can grow quite large (~200k objects).
750 	 */
751 	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
752 		struct prelim_ref *ref;
753 		int ret2;
754 
755 		ref = rb_entry(rnode, struct prelim_ref, rbnode);
756 		if (WARN(ref->parent,
757 			 "BUG: direct ref found in indirect tree")) {
758 			ret = -EINVAL;
759 			goto out;
760 		}
761 
762 		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
763 		preftrees->indirect.count--;
764 
765 		if (ref->count == 0) {
766 			free_pref(ref);
767 			continue;
768 		}
769 
770 		if (sc && ref->root_id != btrfs_root_id(sc->root)) {
771 			free_pref(ref);
772 			ret = BACKREF_FOUND_SHARED;
773 			goto out;
774 		}
775 		ret2 = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
776 		/*
777 		 * we can only tolerate ENOENT,otherwise,we should catch error
778 		 * and return directly.
779 		 */
780 		if (ret2 == -ENOENT) {
781 			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
782 					  NULL);
783 			continue;
784 		} else if (ret2) {
785 			free_pref(ref);
786 			ret = ret2;
787 			goto out;
788 		}
789 
790 		/* we put the first parent into the ref at hand */
791 		ULIST_ITER_INIT(&uiter);
792 		node = ulist_next(parents, &uiter);
793 		ref->parent = node ? node->val : 0;
794 		ref->inode_list = unode_aux_to_inode_list(node);
795 
796 		/* Add a prelim_ref(s) for any other parent(s). */
797 		while ((node = ulist_next(parents, &uiter))) {
798 			struct prelim_ref *new_ref;
799 
800 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
801 						   GFP_NOFS);
802 			if (!new_ref) {
803 				free_pref(ref);
804 				ret = -ENOMEM;
805 				goto out;
806 			}
807 			memcpy(new_ref, ref, sizeof(*ref));
808 			new_ref->parent = node->val;
809 			new_ref->inode_list = unode_aux_to_inode_list(node);
810 			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
811 					  new_ref, NULL);
812 		}
813 
814 		/*
815 		 * Now it's a direct ref, put it in the direct tree. We must
816 		 * do this last because the ref could be merged/freed here.
817 		 */
818 		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
819 
820 		ulist_reinit(parents);
821 		cond_resched();
822 	}
823 out:
824 	/*
825 	 * We may have inode lists attached to refs in the parents ulist, so we
826 	 * must free them before freeing the ulist and its refs.
827 	 */
828 	free_leaf_list(parents);
829 	return ret;
830 }
831 
832 /*
833  * read tree blocks and add keys where required.
834  */
835 static int add_missing_keys(struct btrfs_fs_info *fs_info,
836 			    struct preftrees *preftrees, bool lock)
837 {
838 	struct prelim_ref *ref;
839 	struct extent_buffer *eb;
840 	struct preftree *tree = &preftrees->indirect_missing_keys;
841 	struct rb_node *node;
842 
843 	while ((node = rb_first_cached(&tree->root))) {
844 		struct btrfs_tree_parent_check check = { 0 };
845 
846 		ref = rb_entry(node, struct prelim_ref, rbnode);
847 		rb_erase_cached(node, &tree->root);
848 
849 		BUG_ON(ref->parent);	/* should not be a direct ref */
850 		BUG_ON(ref->key_for_search.type);
851 		BUG_ON(!ref->wanted_disk_byte);
852 
853 		check.level = ref->level - 1;
854 		check.owner_root = ref->root_id;
855 
856 		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
857 		if (IS_ERR(eb)) {
858 			free_pref(ref);
859 			return PTR_ERR(eb);
860 		}
861 
862 		if (lock)
863 			btrfs_tree_read_lock(eb);
864 		if (btrfs_header_level(eb) == 0)
865 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
866 		else
867 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
868 		if (lock)
869 			btrfs_tree_read_unlock(eb);
870 		free_extent_buffer(eb);
871 		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
872 		cond_resched();
873 	}
874 	return 0;
875 }
876 
877 /*
878  * add all currently queued delayed refs from this head whose seq nr is
879  * smaller or equal that seq to the list
880  */
881 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
882 			    struct btrfs_delayed_ref_head *head, u64 seq,
883 			    struct preftrees *preftrees, struct share_check *sc)
884 {
885 	struct btrfs_delayed_ref_node *node;
886 	struct btrfs_key key;
887 	struct rb_node *n;
888 	int count;
889 	int ret = 0;
890 
891 	spin_lock(&head->lock);
892 	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
893 		node = rb_entry(n, struct btrfs_delayed_ref_node,
894 				ref_node);
895 		if (node->seq > seq)
896 			continue;
897 
898 		switch (node->action) {
899 		case BTRFS_ADD_DELAYED_EXTENT:
900 		case BTRFS_UPDATE_DELAYED_HEAD:
901 			WARN_ON(1);
902 			continue;
903 		case BTRFS_ADD_DELAYED_REF:
904 			count = node->ref_mod;
905 			break;
906 		case BTRFS_DROP_DELAYED_REF:
907 			count = node->ref_mod * -1;
908 			break;
909 		default:
910 			BUG();
911 		}
912 		switch (node->type) {
913 		case BTRFS_TREE_BLOCK_REF_KEY: {
914 			/* NORMAL INDIRECT METADATA backref */
915 			struct btrfs_key *key_ptr = NULL;
916 			/* The owner of a tree block ref is the level. */
917 			int level = btrfs_delayed_ref_owner(node);
918 
919 			if (head->extent_op && head->extent_op->update_key) {
920 				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
921 				key_ptr = &key;
922 			}
923 
924 			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
925 					       key_ptr, level + 1, node->bytenr,
926 					       count, sc, GFP_ATOMIC);
927 			break;
928 		}
929 		case BTRFS_SHARED_BLOCK_REF_KEY: {
930 			/*
931 			 * SHARED DIRECT METADATA backref
932 			 *
933 			 * The owner of a tree block ref is the level.
934 			 */
935 			int level = btrfs_delayed_ref_owner(node);
936 
937 			ret = add_direct_ref(fs_info, preftrees, level + 1,
938 					     node->parent, node->bytenr, count,
939 					     sc, GFP_ATOMIC);
940 			break;
941 		}
942 		case BTRFS_EXTENT_DATA_REF_KEY: {
943 			/* NORMAL INDIRECT DATA backref */
944 			key.objectid = btrfs_delayed_ref_owner(node);
945 			key.type = BTRFS_EXTENT_DATA_KEY;
946 			key.offset = btrfs_delayed_ref_offset(node);
947 
948 			/*
949 			 * If we have a share check context and a reference for
950 			 * another inode, we can't exit immediately. This is
951 			 * because even if this is a BTRFS_ADD_DELAYED_REF
952 			 * reference we may find next a BTRFS_DROP_DELAYED_REF
953 			 * which cancels out this ADD reference.
954 			 *
955 			 * If this is a DROP reference and there was no previous
956 			 * ADD reference, then we need to signal that when we
957 			 * process references from the extent tree (through
958 			 * add_inline_refs() and add_keyed_refs()), we should
959 			 * not exit early if we find a reference for another
960 			 * inode, because one of the delayed DROP references
961 			 * may cancel that reference in the extent tree.
962 			 */
963 			if (sc && count < 0)
964 				sc->have_delayed_delete_refs = true;
965 
966 			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
967 					       &key, 0, node->bytenr, count, sc,
968 					       GFP_ATOMIC);
969 			break;
970 		}
971 		case BTRFS_SHARED_DATA_REF_KEY: {
972 			/* SHARED DIRECT FULL backref */
973 			ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
974 					     node->bytenr, count, sc,
975 					     GFP_ATOMIC);
976 			break;
977 		}
978 		default:
979 			WARN_ON(1);
980 		}
981 		/*
982 		 * We must ignore BACKREF_FOUND_SHARED until all delayed
983 		 * refs have been checked.
984 		 */
985 		if (ret && (ret != BACKREF_FOUND_SHARED))
986 			break;
987 	}
988 	if (!ret)
989 		ret = extent_is_shared(sc);
990 
991 	spin_unlock(&head->lock);
992 	return ret;
993 }
994 
995 /*
996  * add all inline backrefs for bytenr to the list
997  *
998  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
999  */
1000 static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1001 			   struct btrfs_path *path,
1002 			   int *info_level, struct preftrees *preftrees,
1003 			   struct share_check *sc)
1004 {
1005 	int ret = 0;
1006 	int slot;
1007 	struct extent_buffer *leaf;
1008 	struct btrfs_key key;
1009 	struct btrfs_key found_key;
1010 	unsigned long ptr;
1011 	unsigned long end;
1012 	struct btrfs_extent_item *ei;
1013 	u64 flags;
1014 	u64 item_size;
1015 
1016 	/*
1017 	 * enumerate all inline refs
1018 	 */
1019 	leaf = path->nodes[0];
1020 	slot = path->slots[0];
1021 
1022 	item_size = btrfs_item_size(leaf, slot);
1023 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1024 
1025 	if (ctx->check_extent_item) {
1026 		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1027 		if (ret)
1028 			return ret;
1029 	}
1030 
1031 	flags = btrfs_extent_flags(leaf, ei);
1032 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1033 
1034 	ptr = (unsigned long)(ei + 1);
1035 	end = (unsigned long)ei + item_size;
1036 
1037 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1038 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1039 		struct btrfs_tree_block_info *info;
1040 
1041 		info = (struct btrfs_tree_block_info *)ptr;
1042 		*info_level = btrfs_tree_block_level(leaf, info);
1043 		ptr += sizeof(struct btrfs_tree_block_info);
1044 		BUG_ON(ptr > end);
1045 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1046 		*info_level = found_key.offset;
1047 	} else {
1048 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1049 	}
1050 
1051 	while (ptr < end) {
1052 		struct btrfs_extent_inline_ref *iref;
1053 		u64 offset;
1054 		int type;
1055 
1056 		iref = (struct btrfs_extent_inline_ref *)ptr;
1057 		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1058 							BTRFS_REF_TYPE_ANY);
1059 		if (unlikely(type == BTRFS_REF_TYPE_INVALID))
1060 			return -EUCLEAN;
1061 
1062 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1063 
1064 		switch (type) {
1065 		case BTRFS_SHARED_BLOCK_REF_KEY:
1066 			ret = add_direct_ref(ctx->fs_info, preftrees,
1067 					     *info_level + 1, offset,
1068 					     ctx->bytenr, 1, NULL, GFP_NOFS);
1069 			break;
1070 		case BTRFS_SHARED_DATA_REF_KEY: {
1071 			struct btrfs_shared_data_ref *sdref;
1072 			int count;
1073 
1074 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1075 			count = btrfs_shared_data_ref_count(leaf, sdref);
1076 
1077 			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1078 					     ctx->bytenr, count, sc, GFP_NOFS);
1079 			break;
1080 		}
1081 		case BTRFS_TREE_BLOCK_REF_KEY:
1082 			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1083 					       NULL, *info_level + 1,
1084 					       ctx->bytenr, 1, NULL, GFP_NOFS);
1085 			break;
1086 		case BTRFS_EXTENT_DATA_REF_KEY: {
1087 			struct btrfs_extent_data_ref *dref;
1088 			int count;
1089 			u64 root;
1090 
1091 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1092 			count = btrfs_extent_data_ref_count(leaf, dref);
1093 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1094 								      dref);
1095 			key.type = BTRFS_EXTENT_DATA_KEY;
1096 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1097 
1098 			if (sc && key.objectid != sc->inum &&
1099 			    !sc->have_delayed_delete_refs) {
1100 				ret = BACKREF_FOUND_SHARED;
1101 				break;
1102 			}
1103 
1104 			root = btrfs_extent_data_ref_root(leaf, dref);
1105 
1106 			if (!ctx->skip_data_ref ||
1107 			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1108 						ctx->user_ctx))
1109 				ret = add_indirect_ref(ctx->fs_info, preftrees,
1110 						       root, &key, 0, ctx->bytenr,
1111 						       count, sc, GFP_NOFS);
1112 			break;
1113 		}
1114 		case BTRFS_EXTENT_OWNER_REF_KEY:
1115 			ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1116 			break;
1117 		default:
1118 			WARN_ON(1);
1119 		}
1120 		if (ret)
1121 			return ret;
1122 		ptr += btrfs_extent_inline_ref_size(type);
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 /*
1129  * add all non-inline backrefs for bytenr to the list
1130  *
1131  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1132  */
1133 static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1134 			  struct btrfs_root *extent_root,
1135 			  struct btrfs_path *path,
1136 			  int info_level, struct preftrees *preftrees,
1137 			  struct share_check *sc)
1138 {
1139 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1140 	int ret;
1141 	int slot;
1142 	struct extent_buffer *leaf;
1143 	struct btrfs_key key;
1144 
1145 	while (1) {
1146 		ret = btrfs_next_item(extent_root, path);
1147 		if (ret < 0)
1148 			break;
1149 		if (ret) {
1150 			ret = 0;
1151 			break;
1152 		}
1153 
1154 		slot = path->slots[0];
1155 		leaf = path->nodes[0];
1156 		btrfs_item_key_to_cpu(leaf, &key, slot);
1157 
1158 		if (key.objectid != ctx->bytenr)
1159 			break;
1160 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1161 			continue;
1162 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1163 			break;
1164 
1165 		switch (key.type) {
1166 		case BTRFS_SHARED_BLOCK_REF_KEY:
1167 			/* SHARED DIRECT METADATA backref */
1168 			ret = add_direct_ref(fs_info, preftrees,
1169 					     info_level + 1, key.offset,
1170 					     ctx->bytenr, 1, NULL, GFP_NOFS);
1171 			break;
1172 		case BTRFS_SHARED_DATA_REF_KEY: {
1173 			/* SHARED DIRECT FULL backref */
1174 			struct btrfs_shared_data_ref *sdref;
1175 			int count;
1176 
1177 			sdref = btrfs_item_ptr(leaf, slot,
1178 					      struct btrfs_shared_data_ref);
1179 			count = btrfs_shared_data_ref_count(leaf, sdref);
1180 			ret = add_direct_ref(fs_info, preftrees, 0,
1181 					     key.offset, ctx->bytenr, count,
1182 					     sc, GFP_NOFS);
1183 			break;
1184 		}
1185 		case BTRFS_TREE_BLOCK_REF_KEY:
1186 			/* NORMAL INDIRECT METADATA backref */
1187 			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1188 					       NULL, info_level + 1, ctx->bytenr,
1189 					       1, NULL, GFP_NOFS);
1190 			break;
1191 		case BTRFS_EXTENT_DATA_REF_KEY: {
1192 			/* NORMAL INDIRECT DATA backref */
1193 			struct btrfs_extent_data_ref *dref;
1194 			int count;
1195 			u64 root;
1196 
1197 			dref = btrfs_item_ptr(leaf, slot,
1198 					      struct btrfs_extent_data_ref);
1199 			count = btrfs_extent_data_ref_count(leaf, dref);
1200 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1201 								      dref);
1202 			key.type = BTRFS_EXTENT_DATA_KEY;
1203 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1204 
1205 			if (sc && key.objectid != sc->inum &&
1206 			    !sc->have_delayed_delete_refs) {
1207 				ret = BACKREF_FOUND_SHARED;
1208 				break;
1209 			}
1210 
1211 			root = btrfs_extent_data_ref_root(leaf, dref);
1212 
1213 			if (!ctx->skip_data_ref ||
1214 			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1215 						ctx->user_ctx))
1216 				ret = add_indirect_ref(fs_info, preftrees, root,
1217 						       &key, 0, ctx->bytenr,
1218 						       count, sc, GFP_NOFS);
1219 			break;
1220 		}
1221 		default:
1222 			WARN_ON(1);
1223 		}
1224 		if (ret)
1225 			return ret;
1226 
1227 	}
1228 
1229 	return ret;
1230 }
1231 
1232 /*
1233  * The caller has joined a transaction or is holding a read lock on the
1234  * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1235  * snapshot field changing while updating or checking the cache.
1236  */
1237 static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1238 					struct btrfs_root *root,
1239 					u64 bytenr, int level, bool *is_shared)
1240 {
1241 	const struct btrfs_fs_info *fs_info = root->fs_info;
1242 	struct btrfs_backref_shared_cache_entry *entry;
1243 
1244 	if (!current->journal_info)
1245 		lockdep_assert_held(&fs_info->commit_root_sem);
1246 
1247 	if (!ctx->use_path_cache)
1248 		return false;
1249 
1250 	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1251 		return false;
1252 
1253 	/*
1254 	 * Level -1 is used for the data extent, which is not reliable to cache
1255 	 * because its reference count can increase or decrease without us
1256 	 * realizing. We cache results only for extent buffers that lead from
1257 	 * the root node down to the leaf with the file extent item.
1258 	 */
1259 	ASSERT(level >= 0);
1260 
1261 	entry = &ctx->path_cache_entries[level];
1262 
1263 	/* Unused cache entry or being used for some other extent buffer. */
1264 	if (entry->bytenr != bytenr)
1265 		return false;
1266 
1267 	/*
1268 	 * We cached a false result, but the last snapshot generation of the
1269 	 * root changed, so we now have a snapshot. Don't trust the result.
1270 	 */
1271 	if (!entry->is_shared &&
1272 	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1273 		return false;
1274 
1275 	/*
1276 	 * If we cached a true result and the last generation used for dropping
1277 	 * a root changed, we can not trust the result, because the dropped root
1278 	 * could be a snapshot sharing this extent buffer.
1279 	 */
1280 	if (entry->is_shared &&
1281 	    entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1282 		return false;
1283 
1284 	*is_shared = entry->is_shared;
1285 	/*
1286 	 * If the node at this level is shared, than all nodes below are also
1287 	 * shared. Currently some of the nodes below may be marked as not shared
1288 	 * because we have just switched from one leaf to another, and switched
1289 	 * also other nodes above the leaf and below the current level, so mark
1290 	 * them as shared.
1291 	 */
1292 	if (*is_shared) {
1293 		for (int i = 0; i < level; i++) {
1294 			ctx->path_cache_entries[i].is_shared = true;
1295 			ctx->path_cache_entries[i].gen = entry->gen;
1296 		}
1297 	}
1298 
1299 	return true;
1300 }
1301 
1302 /*
1303  * The caller has joined a transaction or is holding a read lock on the
1304  * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1305  * snapshot field changing while updating or checking the cache.
1306  */
1307 static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1308 				       struct btrfs_root *root,
1309 				       u64 bytenr, int level, bool is_shared)
1310 {
1311 	const struct btrfs_fs_info *fs_info = root->fs_info;
1312 	struct btrfs_backref_shared_cache_entry *entry;
1313 	u64 gen;
1314 
1315 	if (!current->journal_info)
1316 		lockdep_assert_held(&fs_info->commit_root_sem);
1317 
1318 	if (!ctx->use_path_cache)
1319 		return;
1320 
1321 	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1322 		return;
1323 
1324 	/*
1325 	 * Level -1 is used for the data extent, which is not reliable to cache
1326 	 * because its reference count can increase or decrease without us
1327 	 * realizing. We cache results only for extent buffers that lead from
1328 	 * the root node down to the leaf with the file extent item.
1329 	 */
1330 	ASSERT(level >= 0);
1331 
1332 	if (is_shared)
1333 		gen = btrfs_get_last_root_drop_gen(fs_info);
1334 	else
1335 		gen = btrfs_root_last_snapshot(&root->root_item);
1336 
1337 	entry = &ctx->path_cache_entries[level];
1338 	entry->bytenr = bytenr;
1339 	entry->is_shared = is_shared;
1340 	entry->gen = gen;
1341 
1342 	/*
1343 	 * If we found an extent buffer is shared, set the cache result for all
1344 	 * extent buffers below it to true. As nodes in the path are COWed,
1345 	 * their sharedness is moved to their children, and if a leaf is COWed,
1346 	 * then the sharedness of a data extent becomes direct, the refcount of
1347 	 * data extent is increased in the extent item at the extent tree.
1348 	 */
1349 	if (is_shared) {
1350 		for (int i = 0; i < level; i++) {
1351 			entry = &ctx->path_cache_entries[i];
1352 			entry->is_shared = is_shared;
1353 			entry->gen = gen;
1354 		}
1355 	}
1356 }
1357 
1358 /*
1359  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1360  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1361  * indirect refs to their parent bytenr.
1362  * When roots are found, they're added to the roots list
1363  *
1364  * @ctx:     Backref walking context object, must be not NULL.
1365  * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1366  *           shared extent is detected.
1367  *
1368  * Otherwise this returns 0 for success and <0 for an error.
1369  *
1370  * FIXME some caching might speed things up
1371  */
1372 static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1373 			     struct share_check *sc)
1374 {
1375 	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1376 	struct btrfs_key key;
1377 	struct btrfs_path *path;
1378 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1379 	struct btrfs_delayed_ref_head *head;
1380 	int info_level = 0;
1381 	int ret;
1382 	struct prelim_ref *ref;
1383 	struct rb_node *node;
1384 	struct extent_inode_elem *eie = NULL;
1385 	struct preftrees preftrees = {
1386 		.direct = PREFTREE_INIT,
1387 		.indirect = PREFTREE_INIT,
1388 		.indirect_missing_keys = PREFTREE_INIT
1389 	};
1390 
1391 	if (unlikely(!root)) {
1392 		btrfs_err(ctx->fs_info,
1393 			  "missing extent root for extent at bytenr %llu",
1394 			  ctx->bytenr);
1395 		return -EUCLEAN;
1396 	}
1397 
1398 	/* Roots ulist is not needed when using a sharedness check context. */
1399 	if (sc)
1400 		ASSERT(ctx->roots == NULL);
1401 
1402 	key.objectid = ctx->bytenr;
1403 	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1404 		key.type = BTRFS_METADATA_ITEM_KEY;
1405 	else
1406 		key.type = BTRFS_EXTENT_ITEM_KEY;
1407 	key.offset = (u64)-1;
1408 
1409 	path = btrfs_alloc_path();
1410 	if (!path)
1411 		return -ENOMEM;
1412 	if (!ctx->trans) {
1413 		path->search_commit_root = true;
1414 		path->skip_locking = true;
1415 	}
1416 
1417 	if (ctx->time_seq == BTRFS_SEQ_LAST)
1418 		path->skip_locking = true;
1419 
1420 again:
1421 	head = NULL;
1422 
1423 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1424 	if (ret < 0)
1425 		goto out;
1426 	if (unlikely(ret == 0)) {
1427 		/*
1428 		 * Key with offset -1 found, there would have to exist an extent
1429 		 * item with such offset, but this is out of the valid range.
1430 		 */
1431 		ret = -EUCLEAN;
1432 		goto out;
1433 	}
1434 
1435 	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1436 	    ctx->time_seq != BTRFS_SEQ_LAST) {
1437 		/*
1438 		 * We have a specific time_seq we care about and trans which
1439 		 * means we have the path lock, we need to grab the ref head and
1440 		 * lock it so we have a consistent view of the refs at the given
1441 		 * time.
1442 		 */
1443 		delayed_refs = &ctx->trans->transaction->delayed_refs;
1444 		spin_lock(&delayed_refs->lock);
1445 		head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
1446 						   ctx->bytenr);
1447 		if (head) {
1448 			if (!mutex_trylock(&head->mutex)) {
1449 				refcount_inc(&head->refs);
1450 				spin_unlock(&delayed_refs->lock);
1451 
1452 				btrfs_release_path(path);
1453 
1454 				/*
1455 				 * Mutex was contended, block until it's
1456 				 * released and try again
1457 				 */
1458 				mutex_lock(&head->mutex);
1459 				mutex_unlock(&head->mutex);
1460 				btrfs_put_delayed_ref_head(head);
1461 				goto again;
1462 			}
1463 			spin_unlock(&delayed_refs->lock);
1464 			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1465 					       &preftrees, sc);
1466 			mutex_unlock(&head->mutex);
1467 			if (ret)
1468 				goto out;
1469 		} else {
1470 			spin_unlock(&delayed_refs->lock);
1471 		}
1472 	}
1473 
1474 	if (path->slots[0]) {
1475 		struct extent_buffer *leaf;
1476 		int slot;
1477 
1478 		path->slots[0]--;
1479 		leaf = path->nodes[0];
1480 		slot = path->slots[0];
1481 		btrfs_item_key_to_cpu(leaf, &key, slot);
1482 		if (key.objectid == ctx->bytenr &&
1483 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1484 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1485 			ret = add_inline_refs(ctx, path, &info_level,
1486 					      &preftrees, sc);
1487 			if (ret)
1488 				goto out;
1489 			ret = add_keyed_refs(ctx, root, path, info_level,
1490 					     &preftrees, sc);
1491 			if (ret)
1492 				goto out;
1493 		}
1494 	}
1495 
1496 	/*
1497 	 * If we have a share context and we reached here, it means the extent
1498 	 * is not directly shared (no multiple reference items for it),
1499 	 * otherwise we would have exited earlier with a return value of
1500 	 * BACKREF_FOUND_SHARED after processing delayed references or while
1501 	 * processing inline or keyed references from the extent tree.
1502 	 * The extent may however be indirectly shared through shared subtrees
1503 	 * as a result from creating snapshots, so we determine below what is
1504 	 * its parent node, in case we are dealing with a metadata extent, or
1505 	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1506 	 * item pointing to it in case we are dealing with a data extent.
1507 	 */
1508 	ASSERT(extent_is_shared(sc) == 0);
1509 
1510 	/*
1511 	 * If we are here for a data extent and we have a share_check structure
1512 	 * it means the data extent is not directly shared (does not have
1513 	 * multiple reference items), so we have to check if a path in the fs
1514 	 * tree (going from the root node down to the leaf that has the file
1515 	 * extent item pointing to the data extent) is shared, that is, if any
1516 	 * of the extent buffers in the path is referenced by other trees.
1517 	 */
1518 	if (sc && ctx->bytenr == sc->data_bytenr) {
1519 		/*
1520 		 * If our data extent is from a generation more recent than the
1521 		 * last generation used to snapshot the root, then we know that
1522 		 * it can not be shared through subtrees, so we can skip
1523 		 * resolving indirect references, there's no point in
1524 		 * determining the extent buffers for the path from the fs tree
1525 		 * root node down to the leaf that has the file extent item that
1526 		 * points to the data extent.
1527 		 */
1528 		if (sc->data_extent_gen >
1529 		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1530 			ret = BACKREF_FOUND_NOT_SHARED;
1531 			goto out;
1532 		}
1533 
1534 		/*
1535 		 * If we are only determining if a data extent is shared or not
1536 		 * and the corresponding file extent item is located in the same
1537 		 * leaf as the previous file extent item, we can skip resolving
1538 		 * indirect references for a data extent, since the fs tree path
1539 		 * is the same (same leaf, so same path). We skip as long as the
1540 		 * cached result for the leaf is valid and only if there's only
1541 		 * one file extent item pointing to the data extent, because in
1542 		 * the case of multiple file extent items, they may be located
1543 		 * in different leaves and therefore we have multiple paths.
1544 		 */
1545 		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1546 		    sc->self_ref_count == 1) {
1547 			bool cached;
1548 			bool is_shared;
1549 
1550 			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1551 						     sc->ctx->curr_leaf_bytenr,
1552 						     0, &is_shared);
1553 			if (cached) {
1554 				if (is_shared)
1555 					ret = BACKREF_FOUND_SHARED;
1556 				else
1557 					ret = BACKREF_FOUND_NOT_SHARED;
1558 				goto out;
1559 			}
1560 		}
1561 	}
1562 
1563 	btrfs_release_path(path);
1564 
1565 	ret = add_missing_keys(ctx->fs_info, &preftrees, !path->skip_locking);
1566 	if (ret)
1567 		goto out;
1568 
1569 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1570 
1571 	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1572 	if (ret)
1573 		goto out;
1574 
1575 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1576 
1577 	/*
1578 	 * This walks the tree of merged and resolved refs. Tree blocks are
1579 	 * read in as needed. Unique entries are added to the ulist, and
1580 	 * the list of found roots is updated.
1581 	 *
1582 	 * We release the entire tree in one go before returning.
1583 	 */
1584 	node = rb_first_cached(&preftrees.direct.root);
1585 	while (node) {
1586 		ref = rb_entry(node, struct prelim_ref, rbnode);
1587 		node = rb_next(&ref->rbnode);
1588 		/*
1589 		 * ref->count < 0 can happen here if there are delayed
1590 		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1591 		 * prelim_ref_insert() relies on this when merging
1592 		 * identical refs to keep the overall count correct.
1593 		 * prelim_ref_insert() will merge only those refs
1594 		 * which compare identically.  Any refs having
1595 		 * e.g. different offsets would not be merged,
1596 		 * and would retain their original ref->count < 0.
1597 		 */
1598 		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1599 			/* no parent == root of tree */
1600 			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1601 			if (ret < 0)
1602 				goto out;
1603 		}
1604 		if (ref->count && ref->parent) {
1605 			if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1606 			    ref->level == 0) {
1607 				struct btrfs_tree_parent_check check = { 0 };
1608 				struct extent_buffer *eb;
1609 
1610 				check.level = ref->level;
1611 
1612 				eb = read_tree_block(ctx->fs_info, ref->parent,
1613 						     &check);
1614 				if (IS_ERR(eb)) {
1615 					ret = PTR_ERR(eb);
1616 					goto out;
1617 				}
1618 
1619 				if (!path->skip_locking)
1620 					btrfs_tree_read_lock(eb);
1621 				ret = find_extent_in_eb(ctx, eb, &eie);
1622 				if (!path->skip_locking)
1623 					btrfs_tree_read_unlock(eb);
1624 				free_extent_buffer(eb);
1625 				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1626 				    ret < 0)
1627 					goto out;
1628 				ref->inode_list = eie;
1629 				/*
1630 				 * We transferred the list ownership to the ref,
1631 				 * so set to NULL to avoid a double free in case
1632 				 * an error happens after this.
1633 				 */
1634 				eie = NULL;
1635 			}
1636 			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1637 						  ref->inode_list,
1638 						  (void **)&eie, GFP_NOFS);
1639 			if (ret < 0)
1640 				goto out;
1641 			if (!ret && !ctx->skip_inode_ref_list) {
1642 				/*
1643 				 * We've recorded that parent, so we must extend
1644 				 * its inode list here.
1645 				 *
1646 				 * However if there was corruption we may not
1647 				 * have found an eie, return an error in this
1648 				 * case.
1649 				 */
1650 				ASSERT(eie);
1651 				if (unlikely(!eie)) {
1652 					ret = -EUCLEAN;
1653 					goto out;
1654 				}
1655 				while (eie->next)
1656 					eie = eie->next;
1657 				eie->next = ref->inode_list;
1658 			}
1659 			eie = NULL;
1660 			/*
1661 			 * We have transferred the inode list ownership from
1662 			 * this ref to the ref we added to the 'refs' ulist.
1663 			 * So set this ref's inode list to NULL to avoid
1664 			 * use-after-free when our caller uses it or double
1665 			 * frees in case an error happens before we return.
1666 			 */
1667 			ref->inode_list = NULL;
1668 		}
1669 		cond_resched();
1670 	}
1671 
1672 out:
1673 	btrfs_free_path(path);
1674 
1675 	prelim_release(&preftrees.direct);
1676 	prelim_release(&preftrees.indirect);
1677 	prelim_release(&preftrees.indirect_missing_keys);
1678 
1679 	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1680 		free_inode_elem_list(eie);
1681 	return ret;
1682 }
1683 
1684 /*
1685  * Finds all leaves with a reference to the specified combination of
1686  * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1687  * added to the ulist at @ctx->refs, and that ulist is allocated by this
1688  * function. The caller should free the ulist with free_leaf_list() if
1689  * @ctx->ignore_extent_item_pos is false, otherwise a simple ulist_free() is
1690  * enough.
1691  *
1692  * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1693  */
1694 int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1695 {
1696 	int ret;
1697 
1698 	ASSERT(ctx->refs == NULL);
1699 
1700 	ctx->refs = ulist_alloc(GFP_NOFS);
1701 	if (!ctx->refs)
1702 		return -ENOMEM;
1703 
1704 	ret = find_parent_nodes(ctx, NULL);
1705 	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1706 	    (ret < 0 && ret != -ENOENT)) {
1707 		free_leaf_list(ctx->refs);
1708 		ctx->refs = NULL;
1709 		return ret;
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 /*
1716  * Walk all backrefs for a given extent to find all roots that reference this
1717  * extent. Walking a backref means finding all extents that reference this
1718  * extent and in turn walk the backrefs of those, too. Naturally this is a
1719  * recursive process, but here it is implemented in an iterative fashion: We
1720  * find all referencing extents for the extent in question and put them on a
1721  * list. In turn, we find all referencing extents for those, further appending
1722  * to the list. The way we iterate the list allows adding more elements after
1723  * the current while iterating. The process stops when we reach the end of the
1724  * list.
1725  *
1726  * Found roots are added to @ctx->roots, which is allocated by this function if
1727  * it points to NULL, in which case the caller is responsible for freeing it
1728  * after it's not needed anymore.
1729  * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1730  * ulist to do temporary work, and frees it before returning.
1731  *
1732  * Returns 0 on success, < 0 on error.
1733  */
1734 static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1735 {
1736 	const u64 orig_bytenr = ctx->bytenr;
1737 	const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1738 	bool roots_ulist_allocated = false;
1739 	struct ulist_iterator uiter;
1740 	int ret = 0;
1741 
1742 	ASSERT(ctx->refs == NULL);
1743 
1744 	ctx->refs = ulist_alloc(GFP_NOFS);
1745 	if (!ctx->refs)
1746 		return -ENOMEM;
1747 
1748 	if (!ctx->roots) {
1749 		ctx->roots = ulist_alloc(GFP_NOFS);
1750 		if (!ctx->roots) {
1751 			ulist_free(ctx->refs);
1752 			ctx->refs = NULL;
1753 			return -ENOMEM;
1754 		}
1755 		roots_ulist_allocated = true;
1756 	}
1757 
1758 	ctx->skip_inode_ref_list = true;
1759 
1760 	ULIST_ITER_INIT(&uiter);
1761 	while (1) {
1762 		struct ulist_node *node;
1763 
1764 		ret = find_parent_nodes(ctx, NULL);
1765 		if (ret < 0 && ret != -ENOENT) {
1766 			if (roots_ulist_allocated) {
1767 				ulist_free(ctx->roots);
1768 				ctx->roots = NULL;
1769 			}
1770 			break;
1771 		}
1772 		ret = 0;
1773 		node = ulist_next(ctx->refs, &uiter);
1774 		if (!node)
1775 			break;
1776 		ctx->bytenr = node->val;
1777 		cond_resched();
1778 	}
1779 
1780 	ulist_free(ctx->refs);
1781 	ctx->refs = NULL;
1782 	ctx->bytenr = orig_bytenr;
1783 	ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1784 
1785 	return ret;
1786 }
1787 
1788 int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1789 			 bool skip_commit_root_sem)
1790 {
1791 	int ret;
1792 
1793 	if (!ctx->trans && !skip_commit_root_sem)
1794 		down_read(&ctx->fs_info->commit_root_sem);
1795 	ret = btrfs_find_all_roots_safe(ctx);
1796 	if (!ctx->trans && !skip_commit_root_sem)
1797 		up_read(&ctx->fs_info->commit_root_sem);
1798 	return ret;
1799 }
1800 
1801 struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1802 {
1803 	struct btrfs_backref_share_check_ctx *ctx;
1804 
1805 	ctx = kzalloc_obj(*ctx);
1806 	if (!ctx)
1807 		return NULL;
1808 
1809 	ulist_init(&ctx->refs);
1810 
1811 	return ctx;
1812 }
1813 
1814 void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1815 {
1816 	if (!ctx)
1817 		return;
1818 
1819 	ulist_release(&ctx->refs);
1820 	kfree(ctx);
1821 }
1822 
1823 /*
1824  * Check if a data extent is shared or not.
1825  *
1826  * @inode:       The inode whose extent we are checking.
1827  * @bytenr:      Logical bytenr of the extent we are checking.
1828  * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1829  *               not known.
1830  * @ctx:         A backref sharedness check context.
1831  *
1832  * btrfs_is_data_extent_shared uses the backref walking code but will short
1833  * circuit as soon as it finds a root or inode that doesn't match the
1834  * one passed in. This provides a significant performance benefit for
1835  * callers (such as fiemap) which want to know whether the extent is
1836  * shared but do not need a ref count.
1837  *
1838  * This attempts to attach to the running transaction in order to account for
1839  * delayed refs, but continues on even when no running transaction exists.
1840  *
1841  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1842  */
1843 int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1844 				u64 extent_gen,
1845 				struct btrfs_backref_share_check_ctx *ctx)
1846 {
1847 	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1848 	struct btrfs_root *root = inode->root;
1849 	struct btrfs_fs_info *fs_info = root->fs_info;
1850 	struct btrfs_trans_handle *trans;
1851 	struct ulist_iterator uiter;
1852 	struct ulist_node *node;
1853 	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1854 	int ret = 0;
1855 	struct share_check shared = {
1856 		.ctx = ctx,
1857 		.root = root,
1858 		.inum = btrfs_ino(inode),
1859 		.data_bytenr = bytenr,
1860 		.data_extent_gen = extent_gen,
1861 		.share_count = 0,
1862 		.self_ref_count = 0,
1863 		.have_delayed_delete_refs = false,
1864 	};
1865 	int level;
1866 	bool leaf_cached;
1867 	bool leaf_is_shared;
1868 
1869 	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1870 		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1871 			return ctx->prev_extents_cache[i].is_shared;
1872 	}
1873 
1874 	ulist_init(&ctx->refs);
1875 
1876 	trans = btrfs_join_transaction_nostart(root);
1877 	if (IS_ERR(trans)) {
1878 		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1879 			ret = PTR_ERR(trans);
1880 			goto out;
1881 		}
1882 		trans = NULL;
1883 		down_read(&fs_info->commit_root_sem);
1884 	} else {
1885 		btrfs_get_tree_mod_seq(fs_info, &elem);
1886 		walk_ctx.time_seq = elem.seq;
1887 	}
1888 
1889 	ctx->use_path_cache = true;
1890 
1891 	/*
1892 	 * We may have previously determined that the current leaf is shared.
1893 	 * If it is, then we have a data extent that is shared due to a shared
1894 	 * subtree (caused by snapshotting) and we don't need to check for data
1895 	 * backrefs. If the leaf is not shared, then we must do backref walking
1896 	 * to determine if the data extent is shared through reflinks.
1897 	 */
1898 	leaf_cached = lookup_backref_shared_cache(ctx, root,
1899 						  ctx->curr_leaf_bytenr, 0,
1900 						  &leaf_is_shared);
1901 	if (leaf_cached && leaf_is_shared) {
1902 		ret = 1;
1903 		goto out_trans;
1904 	}
1905 
1906 	walk_ctx.skip_inode_ref_list = true;
1907 	walk_ctx.trans = trans;
1908 	walk_ctx.fs_info = fs_info;
1909 	walk_ctx.refs = &ctx->refs;
1910 
1911 	/* -1 means we are in the bytenr of the data extent. */
1912 	level = -1;
1913 	ULIST_ITER_INIT(&uiter);
1914 	while (1) {
1915 		const unsigned long prev_ref_count = ctx->refs.nnodes;
1916 
1917 		walk_ctx.bytenr = bytenr;
1918 		ret = find_parent_nodes(&walk_ctx, &shared);
1919 		if (ret == BACKREF_FOUND_SHARED ||
1920 		    ret == BACKREF_FOUND_NOT_SHARED) {
1921 			/* If shared must return 1, otherwise return 0. */
1922 			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1923 			if (level >= 0)
1924 				store_backref_shared_cache(ctx, root, bytenr,
1925 							   level, ret == 1);
1926 			break;
1927 		}
1928 		if (ret < 0 && ret != -ENOENT)
1929 			break;
1930 		ret = 0;
1931 
1932 		/*
1933 		 * More than one extent buffer (bytenr) may have been added to
1934 		 * the ctx->refs ulist, in which case we have to check multiple
1935 		 * tree paths in case the first one is not shared, so we can not
1936 		 * use the path cache which is made for a single path. Multiple
1937 		 * extent buffers at the current level happen when:
1938 		 *
1939 		 * 1) level -1, the data extent: If our data extent was not
1940 		 *    directly shared (without multiple reference items), then
1941 		 *    it might have a single reference item with a count > 1 for
1942 		 *    the same offset, which means there are 2 (or more) file
1943 		 *    extent items that point to the data extent - this happens
1944 		 *    when a file extent item needs to be split and then one
1945 		 *    item gets moved to another leaf due to a b+tree leaf split
1946 		 *    when inserting some item. In this case the file extent
1947 		 *    items may be located in different leaves and therefore
1948 		 *    some of the leaves may be referenced through shared
1949 		 *    subtrees while others are not. Since our extent buffer
1950 		 *    cache only works for a single path (by far the most common
1951 		 *    case and simpler to deal with), we can not use it if we
1952 		 *    have multiple leaves (which implies multiple paths).
1953 		 *
1954 		 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1955 		 *    and indirect references on a b+tree node/leaf, so we have
1956 		 *    to check multiple paths, and the extent buffer (the
1957 		 *    current bytenr) may be shared or not. One example is
1958 		 *    during relocation as we may get a shared tree block ref
1959 		 *    (direct ref) and a non-shared tree block ref (indirect
1960 		 *    ref) for the same node/leaf.
1961 		 */
1962 		if ((ctx->refs.nnodes - prev_ref_count) > 1)
1963 			ctx->use_path_cache = false;
1964 
1965 		if (level >= 0)
1966 			store_backref_shared_cache(ctx, root, bytenr,
1967 						   level, false);
1968 		node = ulist_next(&ctx->refs, &uiter);
1969 		if (!node)
1970 			break;
1971 		bytenr = node->val;
1972 		if (ctx->use_path_cache) {
1973 			bool is_shared;
1974 			bool cached;
1975 
1976 			level++;
1977 			cached = lookup_backref_shared_cache(ctx, root, bytenr,
1978 							     level, &is_shared);
1979 			if (cached) {
1980 				ret = (is_shared ? 1 : 0);
1981 				break;
1982 			}
1983 		}
1984 		shared.share_count = 0;
1985 		shared.have_delayed_delete_refs = false;
1986 		cond_resched();
1987 	}
1988 
1989 	/*
1990 	 * If the path cache is disabled, then it means at some tree level we
1991 	 * got multiple parents due to a mix of direct and indirect backrefs or
1992 	 * multiple leaves with file extent items pointing to the same data
1993 	 * extent. We have to invalidate the cache and cache only the sharedness
1994 	 * result for the levels where we got only one node/reference.
1995 	 */
1996 	if (!ctx->use_path_cache) {
1997 		int i = 0;
1998 
1999 		level--;
2000 		if (ret >= 0 && level >= 0) {
2001 			bytenr = ctx->path_cache_entries[level].bytenr;
2002 			ctx->use_path_cache = true;
2003 			store_backref_shared_cache(ctx, root, bytenr, level, ret);
2004 			i = level + 1;
2005 		}
2006 
2007 		for ( ; i < BTRFS_MAX_LEVEL; i++)
2008 			ctx->path_cache_entries[i].bytenr = 0;
2009 	}
2010 
2011 	/*
2012 	 * Cache the sharedness result for the data extent if we know our inode
2013 	 * has more than 1 file extent item that refers to the data extent.
2014 	 */
2015 	if (ret >= 0 && shared.self_ref_count > 1) {
2016 		int slot = ctx->prev_extents_cache_slot;
2017 
2018 		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2019 		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2020 
2021 		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2022 		ctx->prev_extents_cache_slot = slot;
2023 	}
2024 
2025 out_trans:
2026 	if (trans) {
2027 		btrfs_put_tree_mod_seq(fs_info, &elem);
2028 		btrfs_end_transaction(trans);
2029 	} else {
2030 		up_read(&fs_info->commit_root_sem);
2031 	}
2032 out:
2033 	ulist_release(&ctx->refs);
2034 	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2035 
2036 	return ret;
2037 }
2038 
2039 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2040 			  u64 start_off, struct btrfs_path *path,
2041 			  struct btrfs_inode_extref **ret_extref,
2042 			  u64 *found_off)
2043 {
2044 	int ret, slot;
2045 	struct btrfs_key key;
2046 	struct btrfs_key found_key;
2047 	struct btrfs_inode_extref *extref;
2048 	const struct extent_buffer *leaf;
2049 	unsigned long ptr;
2050 
2051 	key.objectid = inode_objectid;
2052 	key.type = BTRFS_INODE_EXTREF_KEY;
2053 	key.offset = start_off;
2054 
2055 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2056 	if (ret < 0)
2057 		return ret;
2058 
2059 	while (1) {
2060 		leaf = path->nodes[0];
2061 		slot = path->slots[0];
2062 		if (slot >= btrfs_header_nritems(leaf)) {
2063 			/*
2064 			 * If the item at offset is not found,
2065 			 * btrfs_search_slot will point us to the slot
2066 			 * where it should be inserted. In our case
2067 			 * that will be the slot directly before the
2068 			 * next INODE_REF_KEY_V2 item. In the case
2069 			 * that we're pointing to the last slot in a
2070 			 * leaf, we must move one leaf over.
2071 			 */
2072 			ret = btrfs_next_leaf(root, path);
2073 			if (ret) {
2074 				if (ret >= 1)
2075 					ret = -ENOENT;
2076 				break;
2077 			}
2078 			continue;
2079 		}
2080 
2081 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2082 
2083 		/*
2084 		 * Check that we're still looking at an extended ref key for
2085 		 * this particular objectid. If we have different
2086 		 * objectid or type then there are no more to be found
2087 		 * in the tree and we can exit.
2088 		 */
2089 		ret = -ENOENT;
2090 		if (found_key.objectid != inode_objectid)
2091 			break;
2092 		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2093 			break;
2094 
2095 		ret = 0;
2096 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2097 		extref = (struct btrfs_inode_extref *)ptr;
2098 		*ret_extref = extref;
2099 		if (found_off)
2100 			*found_off = found_key.offset;
2101 		break;
2102 	}
2103 
2104 	return ret;
2105 }
2106 
2107 /*
2108  * this iterates to turn a name (from iref/extref) into a full filesystem path.
2109  * Elements of the path are separated by '/' and the path is guaranteed to be
2110  * 0-terminated. the path is only given within the current file system.
2111  * Therefore, it never starts with a '/'. the caller is responsible to provide
2112  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2113  * the start point of the resulting string is returned. this pointer is within
2114  * dest, normally.
2115  * in case the path buffer would overflow, the pointer is decremented further
2116  * as if output was written to the buffer, though no more output is actually
2117  * generated. that way, the caller can determine how much space would be
2118  * required for the path to fit into the buffer. in that case, the returned
2119  * value will be smaller than dest. callers must check this!
2120  */
2121 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2122 			u32 name_len, unsigned long name_off,
2123 			struct extent_buffer *eb_in, u64 parent,
2124 			char *dest, u32 size)
2125 {
2126 	int slot;
2127 	u64 next_inum;
2128 	int ret;
2129 	s64 bytes_left = ((s64)size) - 1;
2130 	struct extent_buffer *eb = eb_in;
2131 	struct btrfs_key found_key;
2132 	struct btrfs_inode_ref *iref;
2133 
2134 	if (bytes_left >= 0)
2135 		dest[bytes_left] = '\0';
2136 
2137 	while (1) {
2138 		bytes_left -= name_len;
2139 		if (bytes_left >= 0)
2140 			read_extent_buffer(eb, dest + bytes_left,
2141 					   name_off, name_len);
2142 		if (eb != eb_in) {
2143 			if (!path->skip_locking)
2144 				btrfs_tree_read_unlock(eb);
2145 			free_extent_buffer(eb);
2146 		}
2147 		ret = btrfs_find_item(fs_root, path, parent, 0,
2148 				BTRFS_INODE_REF_KEY, &found_key);
2149 		if (ret > 0)
2150 			ret = -ENOENT;
2151 		if (ret)
2152 			break;
2153 
2154 		next_inum = found_key.offset;
2155 
2156 		/* regular exit ahead */
2157 		if (parent == next_inum)
2158 			break;
2159 
2160 		slot = path->slots[0];
2161 		eb = path->nodes[0];
2162 		/* make sure we can use eb after releasing the path */
2163 		if (eb != eb_in) {
2164 			path->nodes[0] = NULL;
2165 			path->locks[0] = 0;
2166 		}
2167 		btrfs_release_path(path);
2168 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2169 
2170 		name_len = btrfs_inode_ref_name_len(eb, iref);
2171 		name_off = (unsigned long)(iref + 1);
2172 
2173 		parent = next_inum;
2174 		--bytes_left;
2175 		if (bytes_left >= 0)
2176 			dest[bytes_left] = '/';
2177 	}
2178 
2179 	btrfs_release_path(path);
2180 
2181 	if (ret)
2182 		return ERR_PTR(ret);
2183 
2184 	return dest + bytes_left;
2185 }
2186 
2187 /*
2188  * this makes the path point to (logical EXTENT_ITEM *)
2189  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2190  * tree blocks and <0 on error.
2191  */
2192 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2193 			struct btrfs_path *path, struct btrfs_key *found_key,
2194 			u64 *flags_ret)
2195 {
2196 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2197 	int ret;
2198 	u64 flags;
2199 	u64 size = 0;
2200 	const struct extent_buffer *eb;
2201 	struct btrfs_extent_item *ei;
2202 	struct btrfs_key key;
2203 
2204 	if (unlikely(!extent_root)) {
2205 		btrfs_err(fs_info,
2206 			  "missing extent root for extent at bytenr %llu",
2207 			  logical);
2208 		return -EUCLEAN;
2209 	}
2210 
2211 	key.objectid = logical;
2212 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2213 		key.type = BTRFS_METADATA_ITEM_KEY;
2214 	else
2215 		key.type = BTRFS_EXTENT_ITEM_KEY;
2216 	key.offset = (u64)-1;
2217 
2218 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2219 	if (ret < 0)
2220 		return ret;
2221 	if (unlikely(ret == 0)) {
2222 		/*
2223 		 * Key with offset -1 found, there would have to exist an extent
2224 		 * item with such offset, but this is out of the valid range.
2225 		 */
2226 		return -EUCLEAN;
2227 	}
2228 
2229 	ret = btrfs_previous_extent_item(extent_root, path, 0);
2230 	if (ret) {
2231 		if (ret > 0)
2232 			ret = -ENOENT;
2233 		return ret;
2234 	}
2235 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2236 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2237 		size = fs_info->nodesize;
2238 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2239 		size = found_key->offset;
2240 
2241 	if (found_key->objectid > logical ||
2242 	    found_key->objectid + size <= logical) {
2243 		btrfs_debug(fs_info,
2244 			"logical %llu is not within any extent", logical);
2245 		return -ENOENT;
2246 	}
2247 
2248 	eb = path->nodes[0];
2249 
2250 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2251 	flags = btrfs_extent_flags(eb, ei);
2252 
2253 	btrfs_debug(fs_info,
2254 		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2255 		 logical, logical - found_key->objectid, found_key->objectid,
2256 		 found_key->offset, flags, btrfs_item_size(eb, path->slots[0]));
2257 
2258 	WARN_ON(!flags_ret);
2259 	if (flags_ret) {
2260 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2261 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2262 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2263 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2264 		else
2265 			BUG();
2266 		return 0;
2267 	}
2268 
2269 	return -EIO;
2270 }
2271 
2272 /*
2273  * helper function to iterate extent inline refs. ptr must point to a 0 value
2274  * for the first call and may be modified. it is used to track state.
2275  * if more refs exist, 0 is returned and the next call to
2276  * get_extent_inline_ref must pass the modified ptr parameter to get the
2277  * next ref. after the last ref was processed, 1 is returned.
2278  * returns <0 on error
2279  */
2280 static int get_extent_inline_ref(unsigned long *ptr,
2281 				 const struct extent_buffer *eb,
2282 				 const struct btrfs_key *key,
2283 				 const struct btrfs_extent_item *ei,
2284 				 u32 item_size,
2285 				 struct btrfs_extent_inline_ref **out_eiref,
2286 				 int *out_type)
2287 {
2288 	unsigned long end;
2289 	u64 flags;
2290 	struct btrfs_tree_block_info *info;
2291 
2292 	if (!*ptr) {
2293 		/* first call */
2294 		flags = btrfs_extent_flags(eb, ei);
2295 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2296 			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2297 				/* a skinny metadata extent */
2298 				*out_eiref =
2299 				     (struct btrfs_extent_inline_ref *)(ei + 1);
2300 			} else {
2301 				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2302 				info = (struct btrfs_tree_block_info *)(ei + 1);
2303 				*out_eiref =
2304 				   (struct btrfs_extent_inline_ref *)(info + 1);
2305 			}
2306 		} else {
2307 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2308 		}
2309 		*ptr = (unsigned long)*out_eiref;
2310 		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2311 			return -ENOENT;
2312 	}
2313 
2314 	end = (unsigned long)ei + item_size;
2315 	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2316 	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2317 						     BTRFS_REF_TYPE_ANY);
2318 	if (unlikely(*out_type == BTRFS_REF_TYPE_INVALID))
2319 		return -EUCLEAN;
2320 
2321 	*ptr += btrfs_extent_inline_ref_size(*out_type);
2322 	WARN_ON(*ptr > end);
2323 	if (*ptr == end)
2324 		return 1; /* last */
2325 
2326 	return 0;
2327 }
2328 
2329 /*
2330  * reads the tree block backref for an extent. tree level and root are returned
2331  * through out_level and out_root. ptr must point to a 0 value for the first
2332  * call and may be modified (see get_extent_inline_ref comment).
2333  * returns 0 if data was provided, 1 if there was no more data to provide or
2334  * <0 on error.
2335  */
2336 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2337 			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2338 			    u32 item_size, u64 *out_root, u8 *out_level)
2339 {
2340 	int ret;
2341 	int type;
2342 	struct btrfs_extent_inline_ref *eiref;
2343 
2344 	if (*ptr == (unsigned long)-1)
2345 		return 1;
2346 
2347 	while (1) {
2348 		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2349 					      &eiref, &type);
2350 		if (ret < 0)
2351 			return ret;
2352 
2353 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2354 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2355 			break;
2356 
2357 		if (ret == 1)
2358 			return 1;
2359 	}
2360 
2361 	/* we can treat both ref types equally here */
2362 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2363 
2364 	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2365 		struct btrfs_tree_block_info *info;
2366 
2367 		info = (struct btrfs_tree_block_info *)(ei + 1);
2368 		*out_level = btrfs_tree_block_level(eb, info);
2369 	} else {
2370 		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2371 		*out_level = (u8)key->offset;
2372 	}
2373 
2374 	if (ret == 1)
2375 		*ptr = (unsigned long)-1;
2376 
2377 	return 0;
2378 }
2379 
2380 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2381 			     struct extent_inode_elem *inode_list,
2382 			     u64 root, u64 extent_item_objectid,
2383 			     iterate_extent_inodes_t *iterate, void *ctx)
2384 {
2385 	struct extent_inode_elem *eie;
2386 	int ret = 0;
2387 
2388 	for (eie = inode_list; eie; eie = eie->next) {
2389 		btrfs_debug(fs_info,
2390 			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2391 			    extent_item_objectid, eie->inum,
2392 			    eie->offset, root);
2393 		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2394 		if (ret) {
2395 			btrfs_debug(fs_info,
2396 				    "stopping iteration for %llu due to ret=%d",
2397 				    extent_item_objectid, ret);
2398 			break;
2399 		}
2400 	}
2401 
2402 	return ret;
2403 }
2404 
2405 /*
2406  * calls iterate() for every inode that references the extent identified by
2407  * the given parameters.
2408  * when the iterator function returns a non-zero value, iteration stops.
2409  */
2410 int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2411 			  bool search_commit_root,
2412 			  iterate_extent_inodes_t *iterate, void *user_ctx)
2413 {
2414 	int ret;
2415 	struct ulist *refs;
2416 	struct ulist_node *ref_node;
2417 	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2418 	struct ulist_iterator ref_uiter;
2419 
2420 	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2421 		    ctx->bytenr);
2422 
2423 	ASSERT(ctx->trans == NULL);
2424 	ASSERT(ctx->roots == NULL);
2425 
2426 	if (!search_commit_root) {
2427 		struct btrfs_trans_handle *trans;
2428 
2429 		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2430 		if (IS_ERR(trans)) {
2431 			if (PTR_ERR(trans) != -ENOENT &&
2432 			    PTR_ERR(trans) != -EROFS)
2433 				return PTR_ERR(trans);
2434 			trans = NULL;
2435 		}
2436 		ctx->trans = trans;
2437 	}
2438 
2439 	if (ctx->trans) {
2440 		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2441 		ctx->time_seq = seq_elem.seq;
2442 	} else {
2443 		down_read(&ctx->fs_info->commit_root_sem);
2444 	}
2445 
2446 	ret = btrfs_find_all_leafs(ctx);
2447 	if (ret)
2448 		goto out;
2449 	refs = ctx->refs;
2450 	ctx->refs = NULL;
2451 
2452 	ULIST_ITER_INIT(&ref_uiter);
2453 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2454 		const u64 leaf_bytenr = ref_node->val;
2455 		struct ulist_node *root_node;
2456 		struct ulist_iterator root_uiter;
2457 		struct extent_inode_elem *inode_list;
2458 
2459 		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2460 
2461 		if (ctx->cache_lookup) {
2462 			const u64 *root_ids;
2463 			int root_count;
2464 			bool cached;
2465 
2466 			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2467 						   &root_ids, &root_count);
2468 			if (cached) {
2469 				for (int i = 0; i < root_count; i++) {
2470 					ret = iterate_leaf_refs(ctx->fs_info,
2471 								inode_list,
2472 								root_ids[i],
2473 								leaf_bytenr,
2474 								iterate,
2475 								user_ctx);
2476 					if (ret)
2477 						break;
2478 				}
2479 				continue;
2480 			}
2481 		}
2482 
2483 		if (!ctx->roots) {
2484 			ctx->roots = ulist_alloc(GFP_NOFS);
2485 			if (!ctx->roots) {
2486 				ret = -ENOMEM;
2487 				break;
2488 			}
2489 		}
2490 
2491 		ctx->bytenr = leaf_bytenr;
2492 		ret = btrfs_find_all_roots_safe(ctx);
2493 		if (ret)
2494 			break;
2495 
2496 		if (ctx->cache_store)
2497 			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2498 
2499 		ULIST_ITER_INIT(&root_uiter);
2500 		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2501 			btrfs_debug(ctx->fs_info,
2502 				    "root %llu references leaf %llu, data list %#llx",
2503 				    root_node->val, ref_node->val,
2504 				    ref_node->aux);
2505 			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2506 						root_node->val, ctx->bytenr,
2507 						iterate, user_ctx);
2508 		}
2509 		ulist_reinit(ctx->roots);
2510 	}
2511 
2512 	free_leaf_list(refs);
2513 out:
2514 	if (ctx->trans) {
2515 		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2516 		btrfs_end_transaction(ctx->trans);
2517 		ctx->trans = NULL;
2518 	} else {
2519 		up_read(&ctx->fs_info->commit_root_sem);
2520 	}
2521 
2522 	ulist_free(ctx->roots);
2523 	ctx->roots = NULL;
2524 
2525 	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2526 		ret = 0;
2527 
2528 	return ret;
2529 }
2530 
2531 static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2532 {
2533 	struct btrfs_data_container *inodes = ctx;
2534 	const size_t c = 3 * sizeof(u64);
2535 
2536 	if (inodes->bytes_left >= c) {
2537 		inodes->bytes_left -= c;
2538 		inodes->val[inodes->elem_cnt] = inum;
2539 		inodes->val[inodes->elem_cnt + 1] = offset;
2540 		inodes->val[inodes->elem_cnt + 2] = root;
2541 		inodes->elem_cnt += 3;
2542 	} else {
2543 		inodes->bytes_missing += c - inodes->bytes_left;
2544 		inodes->bytes_left = 0;
2545 		inodes->elem_missed += 3;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2552 				void *ctx, bool ignore_offset)
2553 {
2554 	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2555 	int ret;
2556 	u64 flags = 0;
2557 	struct btrfs_key found_key;
2558 	struct btrfs_path *path;
2559 
2560 	path = btrfs_alloc_path();
2561 	if (!path)
2562 		return -ENOMEM;
2563 
2564 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2565 	btrfs_free_path(path);
2566 	if (ret < 0)
2567 		return ret;
2568 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2569 		return -EINVAL;
2570 
2571 	walk_ctx.bytenr = found_key.objectid;
2572 	if (ignore_offset)
2573 		walk_ctx.ignore_extent_item_pos = true;
2574 	else
2575 		walk_ctx.extent_item_pos = logical - found_key.objectid;
2576 	walk_ctx.fs_info = fs_info;
2577 
2578 	return iterate_extent_inodes(&walk_ctx, false, build_ino_list, ctx);
2579 }
2580 
2581 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2582 			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2583 
2584 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2585 {
2586 	int ret = 0;
2587 	int slot;
2588 	u32 cur;
2589 	u32 len;
2590 	u32 name_len;
2591 	u64 parent = 0;
2592 	int found = 0;
2593 	struct btrfs_root *fs_root = ipath->fs_root;
2594 	struct btrfs_path *path = ipath->btrfs_path;
2595 	struct extent_buffer *eb;
2596 	struct btrfs_inode_ref *iref;
2597 	struct btrfs_key found_key;
2598 
2599 	while (!ret) {
2600 		ret = btrfs_find_item(fs_root, path, inum,
2601 				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2602 				&found_key);
2603 
2604 		if (ret < 0)
2605 			break;
2606 		if (ret) {
2607 			ret = found ? 0 : -ENOENT;
2608 			break;
2609 		}
2610 		++found;
2611 
2612 		parent = found_key.offset;
2613 		slot = path->slots[0];
2614 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2615 		if (!eb) {
2616 			ret = -ENOMEM;
2617 			break;
2618 		}
2619 		btrfs_release_path(path);
2620 
2621 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2622 
2623 		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2624 			name_len = btrfs_inode_ref_name_len(eb, iref);
2625 			/* path must be released before calling iterate()! */
2626 			btrfs_debug(fs_root->fs_info,
2627 				"following ref at offset %u for inode %llu in tree %llu",
2628 				cur, found_key.objectid,
2629 				btrfs_root_id(fs_root));
2630 			ret = inode_to_path(parent, name_len,
2631 				      (unsigned long)(iref + 1), eb, ipath);
2632 			if (ret)
2633 				break;
2634 			len = sizeof(*iref) + name_len;
2635 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2636 		}
2637 		free_extent_buffer(eb);
2638 	}
2639 
2640 	btrfs_release_path(path);
2641 
2642 	return ret;
2643 }
2644 
2645 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2646 {
2647 	int ret;
2648 	int slot;
2649 	u64 offset = 0;
2650 	u64 parent;
2651 	int found = 0;
2652 	struct btrfs_root *fs_root = ipath->fs_root;
2653 	struct btrfs_path *path = ipath->btrfs_path;
2654 	struct extent_buffer *eb;
2655 	struct btrfs_inode_extref *extref;
2656 	u32 item_size;
2657 	u32 cur_offset;
2658 	unsigned long ptr;
2659 
2660 	while (1) {
2661 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2662 					    &offset);
2663 		if (ret < 0)
2664 			break;
2665 		if (ret) {
2666 			ret = found ? 0 : -ENOENT;
2667 			break;
2668 		}
2669 		++found;
2670 
2671 		slot = path->slots[0];
2672 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2673 		if (!eb) {
2674 			ret = -ENOMEM;
2675 			break;
2676 		}
2677 		btrfs_release_path(path);
2678 
2679 		item_size = btrfs_item_size(eb, slot);
2680 		ptr = btrfs_item_ptr_offset(eb, slot);
2681 		cur_offset = 0;
2682 
2683 		while (cur_offset < item_size) {
2684 			u32 name_len;
2685 
2686 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2687 			parent = btrfs_inode_extref_parent(eb, extref);
2688 			name_len = btrfs_inode_extref_name_len(eb, extref);
2689 			ret = inode_to_path(parent, name_len,
2690 				      (unsigned long)&extref->name, eb, ipath);
2691 			if (ret)
2692 				break;
2693 
2694 			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2695 			cur_offset += sizeof(*extref);
2696 		}
2697 		free_extent_buffer(eb);
2698 
2699 		offset++;
2700 	}
2701 
2702 	btrfs_release_path(path);
2703 
2704 	return ret;
2705 }
2706 
2707 /*
2708  * returns 0 if the path could be dumped (probably truncated)
2709  * returns <0 in case of an error
2710  */
2711 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2712 			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2713 {
2714 	char *fspath;
2715 	char *fspath_min;
2716 	int i = ipath->fspath->elem_cnt;
2717 	const int s_ptr = sizeof(char *);
2718 	u32 bytes_left;
2719 
2720 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2721 					ipath->fspath->bytes_left - s_ptr : 0;
2722 
2723 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2724 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2725 				   name_off, eb, inum, fspath_min, bytes_left);
2726 	if (IS_ERR(fspath))
2727 		return PTR_ERR(fspath);
2728 
2729 	if (fspath > fspath_min) {
2730 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2731 		++ipath->fspath->elem_cnt;
2732 		ipath->fspath->bytes_left = fspath - fspath_min;
2733 	} else {
2734 		++ipath->fspath->elem_missed;
2735 		ipath->fspath->bytes_missing += fspath_min - fspath;
2736 		ipath->fspath->bytes_left = 0;
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 /*
2743  * this dumps all file system paths to the inode into the ipath struct, provided
2744  * is has been created large enough. each path is zero-terminated and accessed
2745  * from ipath->fspath->val[i].
2746  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2747  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2748  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2749  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2750  * have been needed to return all paths.
2751  */
2752 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2753 {
2754 	int ret;
2755 	int found_refs = 0;
2756 
2757 	ret = iterate_inode_refs(inum, ipath);
2758 	if (!ret)
2759 		++found_refs;
2760 	else if (ret != -ENOENT)
2761 		return ret;
2762 
2763 	ret = iterate_inode_extrefs(inum, ipath);
2764 	if (ret == -ENOENT && found_refs)
2765 		return 0;
2766 
2767 	return ret;
2768 }
2769 
2770 struct btrfs_data_container *init_data_container(u32 total_bytes)
2771 {
2772 	struct btrfs_data_container *data;
2773 	size_t alloc_bytes;
2774 
2775 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2776 	data = kvzalloc(alloc_bytes, GFP_KERNEL);
2777 	if (!data)
2778 		return ERR_PTR(-ENOMEM);
2779 
2780 	if (total_bytes >= sizeof(*data))
2781 		data->bytes_left = total_bytes - sizeof(*data);
2782 	else
2783 		data->bytes_missing = sizeof(*data) - total_bytes;
2784 
2785 	return data;
2786 }
2787 
2788 /*
2789  * allocates space to return multiple file system paths for an inode.
2790  * total_bytes to allocate are passed, note that space usable for actual path
2791  * information will be total_bytes - sizeof(struct inode_fs_paths).
2792  * the returned pointer must be freed with __free_inode_fs_paths() in the end.
2793  */
2794 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2795 					struct btrfs_path *path)
2796 {
2797 	struct inode_fs_paths *ifp;
2798 	struct btrfs_data_container *fspath;
2799 
2800 	fspath = init_data_container(total_bytes);
2801 	if (IS_ERR(fspath))
2802 		return ERR_CAST(fspath);
2803 
2804 	ifp = kmalloc_obj(*ifp);
2805 	if (!ifp) {
2806 		kvfree(fspath);
2807 		return ERR_PTR(-ENOMEM);
2808 	}
2809 
2810 	ifp->btrfs_path = path;
2811 	ifp->fspath = fspath;
2812 	ifp->fs_root = fs_root;
2813 
2814 	return ifp;
2815 }
2816 
2817 struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2818 {
2819 	struct btrfs_backref_iter *ret;
2820 
2821 	ret = kzalloc_obj(*ret, GFP_NOFS);
2822 	if (!ret)
2823 		return NULL;
2824 
2825 	ret->path = btrfs_alloc_path();
2826 	if (!ret->path) {
2827 		kfree(ret);
2828 		return NULL;
2829 	}
2830 
2831 	/* Current backref iterator only supports iteration in commit root */
2832 	ret->path->search_commit_root = true;
2833 	ret->path->skip_locking = true;
2834 	ret->fs_info = fs_info;
2835 
2836 	return ret;
2837 }
2838 
2839 static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
2840 {
2841 	iter->bytenr = 0;
2842 	iter->item_ptr = 0;
2843 	iter->cur_ptr = 0;
2844 	iter->end_ptr = 0;
2845 	btrfs_release_path(iter->path);
2846 	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
2847 }
2848 
2849 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2850 {
2851 	struct btrfs_fs_info *fs_info = iter->fs_info;
2852 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2853 	struct btrfs_path *path = iter->path;
2854 	struct btrfs_extent_item *ei;
2855 	struct btrfs_key key;
2856 	int ret;
2857 
2858 	if (unlikely(!extent_root)) {
2859 		btrfs_err(fs_info,
2860 			  "missing extent root for extent at bytenr %llu",
2861 			  bytenr);
2862 		return -EUCLEAN;
2863 	}
2864 
2865 	key.objectid = bytenr;
2866 	key.type = BTRFS_METADATA_ITEM_KEY;
2867 	key.offset = (u64)-1;
2868 	iter->bytenr = bytenr;
2869 
2870 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2871 	if (ret < 0)
2872 		return ret;
2873 	if (unlikely(ret == 0)) {
2874 		/*
2875 		 * Key with offset -1 found, there would have to exist an extent
2876 		 * item with such offset, but this is out of the valid range.
2877 		 */
2878 		ret = -EUCLEAN;
2879 		goto release;
2880 	}
2881 	if (unlikely(path->slots[0] == 0)) {
2882 		DEBUG_WARN();
2883 		ret = -EUCLEAN;
2884 		goto release;
2885 	}
2886 	path->slots[0]--;
2887 
2888 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2889 	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2890 	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2891 		ret = -ENOENT;
2892 		goto release;
2893 	}
2894 	memcpy(&iter->cur_key, &key, sizeof(key));
2895 	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2896 						    path->slots[0]);
2897 	iter->end_ptr = (u32)(iter->item_ptr +
2898 			btrfs_item_size(path->nodes[0], path->slots[0]));
2899 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2900 			    struct btrfs_extent_item);
2901 
2902 	/*
2903 	 * Only support iteration on tree backref yet.
2904 	 *
2905 	 * This is an extra precaution for non skinny-metadata, where
2906 	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2907 	 * extent flags to determine if it's a tree block.
2908 	 */
2909 	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2910 		ret = -ENOTSUPP;
2911 		goto release;
2912 	}
2913 	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2914 
2915 	/* If there is no inline backref, go search for keyed backref */
2916 	if (iter->cur_ptr >= iter->end_ptr) {
2917 		ret = btrfs_next_item(extent_root, path);
2918 
2919 		/* No inline nor keyed ref */
2920 		if (ret > 0) {
2921 			ret = -ENOENT;
2922 			goto release;
2923 		}
2924 		if (ret < 0)
2925 			goto release;
2926 
2927 		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2928 				path->slots[0]);
2929 		if (iter->cur_key.objectid != bytenr ||
2930 		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2931 		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2932 			ret = -ENOENT;
2933 			goto release;
2934 		}
2935 		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2936 							   path->slots[0]);
2937 		iter->item_ptr = iter->cur_ptr;
2938 		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2939 				      path->nodes[0], path->slots[0]));
2940 	}
2941 
2942 	return 0;
2943 release:
2944 	btrfs_backref_iter_release(iter);
2945 	return ret;
2946 }
2947 
2948 static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
2949 {
2950 	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
2951 	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
2952 		return true;
2953 	return false;
2954 }
2955 
2956 /*
2957  * Go to the next backref item of current bytenr, can be either inlined or
2958  * keyed.
2959  *
2960  * Caller needs to check whether it's inline ref or not by iter->cur_key.
2961  *
2962  * Return 0 if we get next backref without problem.
2963  * Return >0 if there is no extra backref for this bytenr.
2964  * Return <0 if there is something wrong happened.
2965  */
2966 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2967 {
2968 	struct extent_buffer *eb = iter->path->nodes[0];
2969 	struct btrfs_root *extent_root;
2970 	struct btrfs_path *path = iter->path;
2971 	struct btrfs_extent_inline_ref *iref;
2972 	int ret;
2973 	u32 size;
2974 
2975 	if (btrfs_backref_iter_is_inline_ref(iter)) {
2976 		/* We're still inside the inline refs */
2977 		ASSERT(iter->cur_ptr < iter->end_ptr);
2978 
2979 		if (btrfs_backref_has_tree_block_info(iter)) {
2980 			/* First tree block info */
2981 			size = sizeof(struct btrfs_tree_block_info);
2982 		} else {
2983 			/* Use inline ref type to determine the size */
2984 			int type;
2985 
2986 			iref = (struct btrfs_extent_inline_ref *)
2987 				((unsigned long)iter->cur_ptr);
2988 			type = btrfs_extent_inline_ref_type(eb, iref);
2989 
2990 			size = btrfs_extent_inline_ref_size(type);
2991 		}
2992 		iter->cur_ptr += size;
2993 		if (iter->cur_ptr < iter->end_ptr)
2994 			return 0;
2995 
2996 		/* All inline items iterated, fall through */
2997 	}
2998 
2999 	/* We're at keyed items, there is no inline item, go to the next one */
3000 	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
3001 	if (unlikely(!extent_root)) {
3002 		btrfs_err(iter->fs_info,
3003 			  "missing extent root for extent at bytenr %llu",
3004 			  iter->bytenr);
3005 		return -EUCLEAN;
3006 	}
3007 
3008 	ret = btrfs_next_item(extent_root, iter->path);
3009 	if (ret)
3010 		return ret;
3011 
3012 	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
3013 	if (iter->cur_key.objectid != iter->bytenr ||
3014 	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3015 	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
3016 		return 1;
3017 	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
3018 					path->slots[0]);
3019 	iter->cur_ptr = iter->item_ptr;
3020 	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
3021 						path->slots[0]);
3022 	return 0;
3023 }
3024 
3025 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3026 			      struct btrfs_backref_cache *cache, bool is_reloc)
3027 {
3028 	int i;
3029 
3030 	cache->rb_root = RB_ROOT;
3031 	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3032 		INIT_LIST_HEAD(&cache->pending[i]);
3033 	INIT_LIST_HEAD(&cache->pending_edge);
3034 	INIT_LIST_HEAD(&cache->useless_node);
3035 	cache->fs_info = fs_info;
3036 	cache->is_reloc = is_reloc;
3037 }
3038 
3039 struct btrfs_backref_node *btrfs_backref_alloc_node(
3040 		struct btrfs_backref_cache *cache, u64 bytenr, int level)
3041 {
3042 	struct btrfs_backref_node *node;
3043 
3044 	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3045 	node = kzalloc_obj(*node, GFP_NOFS);
3046 	if (!node)
3047 		return node;
3048 
3049 	INIT_LIST_HEAD(&node->list);
3050 	INIT_LIST_HEAD(&node->upper);
3051 	INIT_LIST_HEAD(&node->lower);
3052 	RB_CLEAR_NODE(&node->rb_node);
3053 	cache->nr_nodes++;
3054 	node->level = level;
3055 	node->bytenr = bytenr;
3056 
3057 	return node;
3058 }
3059 
3060 void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
3061 			     struct btrfs_backref_node *node)
3062 {
3063 	if (node) {
3064 		ASSERT(list_empty(&node->list));
3065 		ASSERT(list_empty(&node->lower));
3066 		ASSERT(node->eb == NULL);
3067 		cache->nr_nodes--;
3068 		btrfs_put_root(node->root);
3069 		kfree(node);
3070 	}
3071 }
3072 
3073 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3074 		struct btrfs_backref_cache *cache)
3075 {
3076 	struct btrfs_backref_edge *edge;
3077 
3078 	edge = kzalloc_obj(*edge, GFP_NOFS);
3079 	if (edge)
3080 		cache->nr_edges++;
3081 	return edge;
3082 }
3083 
3084 void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
3085 			     struct btrfs_backref_edge *edge)
3086 {
3087 	if (edge) {
3088 		cache->nr_edges--;
3089 		kfree(edge);
3090 	}
3091 }
3092 
3093 void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
3094 {
3095 	if (node->locked) {
3096 		btrfs_tree_unlock(node->eb);
3097 		node->locked = 0;
3098 	}
3099 }
3100 
3101 void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
3102 {
3103 	if (node->eb) {
3104 		btrfs_backref_unlock_node_buffer(node);
3105 		free_extent_buffer(node->eb);
3106 		node->eb = NULL;
3107 	}
3108 }
3109 
3110 /*
3111  * Drop the backref node from cache without cleaning up its children
3112  * edges.
3113  *
3114  * This can only be called on node without parent edges.
3115  * The children edges are still kept as is.
3116  */
3117 void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
3118 			     struct btrfs_backref_node *node)
3119 {
3120 	ASSERT(list_empty(&node->upper));
3121 
3122 	btrfs_backref_drop_node_buffer(node);
3123 	list_del_init(&node->list);
3124 	list_del_init(&node->lower);
3125 	if (!RB_EMPTY_NODE(&node->rb_node))
3126 		rb_erase(&node->rb_node, &tree->rb_root);
3127 	btrfs_backref_free_node(tree, node);
3128 }
3129 
3130 /*
3131  * Drop the backref node from cache, also cleaning up all its
3132  * upper edges and any uncached nodes in the path.
3133  *
3134  * This cleanup happens bottom up, thus the node should either
3135  * be the lowest node in the cache or a detached node.
3136  */
3137 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3138 				struct btrfs_backref_node *node)
3139 {
3140 	struct btrfs_backref_edge *edge;
3141 
3142 	if (!node)
3143 		return;
3144 
3145 	while (!list_empty(&node->upper)) {
3146 		edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
3147 					list[LOWER]);
3148 		list_del(&edge->list[LOWER]);
3149 		list_del(&edge->list[UPPER]);
3150 		btrfs_backref_free_edge(cache, edge);
3151 	}
3152 
3153 	btrfs_backref_drop_node(cache, node);
3154 }
3155 
3156 /*
3157  * Release all nodes/edges from current cache
3158  */
3159 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3160 {
3161 	struct btrfs_backref_node *node;
3162 
3163 	while ((node = rb_entry_safe(rb_first(&cache->rb_root),
3164 				     struct btrfs_backref_node, rb_node)))
3165 		btrfs_backref_cleanup_node(cache, node);
3166 
3167 	ASSERT(list_empty(&cache->pending_edge));
3168 	ASSERT(list_empty(&cache->useless_node));
3169 	ASSERT(!cache->nr_nodes);
3170 	ASSERT(!cache->nr_edges);
3171 }
3172 
3173 static void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
3174 				    struct btrfs_backref_node *lower,
3175 				    struct btrfs_backref_node *upper)
3176 {
3177 	ASSERT(upper && lower && upper->level == lower->level + 1);
3178 	edge->node[LOWER] = lower;
3179 	edge->node[UPPER] = upper;
3180 	list_add_tail(&edge->list[LOWER], &lower->upper);
3181 }
3182 /*
3183  * Handle direct tree backref
3184  *
3185  * Direct tree backref means, the backref item shows its parent bytenr
3186  * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3187  *
3188  * @ref_key:	The converted backref key.
3189  *		For keyed backref, it's the item key.
3190  *		For inlined backref, objectid is the bytenr,
3191  *		type is btrfs_inline_ref_type, offset is
3192  *		btrfs_inline_ref_offset.
3193  */
3194 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3195 				      struct btrfs_key *ref_key,
3196 				      struct btrfs_backref_node *cur)
3197 {
3198 	struct btrfs_backref_edge *edge;
3199 	struct btrfs_backref_node *upper;
3200 	struct rb_node *rb_node;
3201 
3202 	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3203 
3204 	/* Only reloc root uses backref pointing to itself */
3205 	if (ref_key->objectid == ref_key->offset) {
3206 		struct btrfs_root *root;
3207 
3208 		cur->is_reloc_root = 1;
3209 		/* Only reloc backref cache cares about a specific root */
3210 		if (cache->is_reloc) {
3211 			root = find_reloc_root(cache->fs_info, cur->bytenr);
3212 			if (!root)
3213 				return -ENOENT;
3214 			cur->root = root;
3215 		} else {
3216 			/*
3217 			 * For generic purpose backref cache, reloc root node
3218 			 * is useless.
3219 			 */
3220 			list_add(&cur->list, &cache->useless_node);
3221 		}
3222 		return 0;
3223 	}
3224 
3225 	edge = btrfs_backref_alloc_edge(cache);
3226 	if (!edge)
3227 		return -ENOMEM;
3228 
3229 	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3230 	if (!rb_node) {
3231 		/* Parent node not yet cached */
3232 		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3233 					   cur->level + 1);
3234 		if (!upper) {
3235 			btrfs_backref_free_edge(cache, edge);
3236 			return -ENOMEM;
3237 		}
3238 
3239 		/*
3240 		 *  Backrefs for the upper level block isn't cached, add the
3241 		 *  block to pending list
3242 		 */
3243 		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3244 	} else {
3245 		/* Parent node already cached */
3246 		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3247 		ASSERT(upper->checked);
3248 		INIT_LIST_HEAD(&edge->list[UPPER]);
3249 	}
3250 	btrfs_backref_link_edge(edge, cur, upper);
3251 	return 0;
3252 }
3253 
3254 /*
3255  * Handle indirect tree backref
3256  *
3257  * Indirect tree backref means, we only know which tree the node belongs to.
3258  * We still need to do a tree search to find out the parents. This is for
3259  * TREE_BLOCK_REF backref (keyed or inlined).
3260  *
3261  * @trans:	Transaction handle.
3262  * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3263  * @tree_key:	The first key of this tree block.
3264  * @path:	A clean (released) path, to avoid allocating path every time
3265  *		the function get called.
3266  */
3267 static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3268 					struct btrfs_backref_cache *cache,
3269 					struct btrfs_path *path,
3270 					struct btrfs_key *ref_key,
3271 					struct btrfs_key *tree_key,
3272 					struct btrfs_backref_node *cur)
3273 {
3274 	struct btrfs_fs_info *fs_info = cache->fs_info;
3275 	struct btrfs_backref_node *upper;
3276 	struct btrfs_backref_node *lower;
3277 	struct btrfs_backref_edge *edge;
3278 	struct extent_buffer *eb;
3279 	struct btrfs_root *root;
3280 	struct rb_node *rb_node;
3281 	int level;
3282 	bool need_check = true;
3283 	int ret;
3284 
3285 	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3286 	if (IS_ERR(root))
3287 		return PTR_ERR(root);
3288 
3289 	/* We shouldn't be using backref cache for non-shareable roots. */
3290 	if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
3291 		btrfs_put_root(root);
3292 		return -EUCLEAN;
3293 	}
3294 
3295 	if (btrfs_root_level(&root->root_item) == cur->level) {
3296 		/* Tree root */
3297 		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3298 		/*
3299 		 * For reloc backref cache, we may ignore reloc root.  But for
3300 		 * general purpose backref cache, we can't rely on
3301 		 * btrfs_should_ignore_reloc_root() as it may conflict with
3302 		 * current running relocation and lead to missing root.
3303 		 *
3304 		 * For general purpose backref cache, reloc root detection is
3305 		 * completely relying on direct backref (key->offset is parent
3306 		 * bytenr), thus only do such check for reloc cache.
3307 		 */
3308 		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3309 			btrfs_put_root(root);
3310 			list_add(&cur->list, &cache->useless_node);
3311 		} else {
3312 			cur->root = root;
3313 		}
3314 		return 0;
3315 	}
3316 
3317 	level = cur->level + 1;
3318 
3319 	/* Search the tree to find parent blocks referring to the block */
3320 	path->search_commit_root = true;
3321 	path->skip_locking = true;
3322 	path->lowest_level = level;
3323 	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3324 	path->lowest_level = 0;
3325 	if (ret < 0) {
3326 		btrfs_put_root(root);
3327 		return ret;
3328 	}
3329 	if (ret > 0 && path->slots[level] > 0)
3330 		path->slots[level]--;
3331 
3332 	eb = path->nodes[level];
3333 	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3334 		btrfs_err(fs_info,
3335 "couldn't find block (%llu) (level %d) in tree (%llu) with key " BTRFS_KEY_FMT,
3336 			  cur->bytenr, level - 1, btrfs_root_id(root),
3337 			  BTRFS_KEY_FMT_VALUE(tree_key));
3338 		btrfs_put_root(root);
3339 		ret = -ENOENT;
3340 		goto out;
3341 	}
3342 	lower = cur;
3343 
3344 	/* Add all nodes and edges in the path */
3345 	for (; level < BTRFS_MAX_LEVEL; level++) {
3346 		if (!path->nodes[level]) {
3347 			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3348 			       lower->bytenr);
3349 			/* Same as previous should_ignore_reloc_root() call */
3350 			if (btrfs_should_ignore_reloc_root(root) &&
3351 			    cache->is_reloc) {
3352 				btrfs_put_root(root);
3353 				list_add(&lower->list, &cache->useless_node);
3354 			} else {
3355 				lower->root = root;
3356 			}
3357 			break;
3358 		}
3359 
3360 		edge = btrfs_backref_alloc_edge(cache);
3361 		if (!edge) {
3362 			btrfs_put_root(root);
3363 			ret = -ENOMEM;
3364 			goto out;
3365 		}
3366 
3367 		eb = path->nodes[level];
3368 		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3369 		if (!rb_node) {
3370 			upper = btrfs_backref_alloc_node(cache, eb->start,
3371 							 lower->level + 1);
3372 			if (!upper) {
3373 				btrfs_put_root(root);
3374 				btrfs_backref_free_edge(cache, edge);
3375 				ret = -ENOMEM;
3376 				goto out;
3377 			}
3378 			upper->owner = btrfs_header_owner(eb);
3379 
3380 			/* We shouldn't be using backref cache for non shareable roots. */
3381 			if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
3382 				btrfs_put_root(root);
3383 				btrfs_backref_free_edge(cache, edge);
3384 				btrfs_backref_free_node(cache, upper);
3385 				ret = -EUCLEAN;
3386 				goto out;
3387 			}
3388 
3389 			/*
3390 			 * If we know the block isn't shared we can avoid
3391 			 * checking its backrefs.
3392 			 */
3393 			if (btrfs_block_can_be_shared(trans, root, eb))
3394 				upper->checked = 0;
3395 			else
3396 				upper->checked = 1;
3397 
3398 			/*
3399 			 * Add the block to pending list if we need to check its
3400 			 * backrefs, we only do this once while walking up a
3401 			 * tree as we will catch anything else later on.
3402 			 */
3403 			if (!upper->checked && need_check) {
3404 				need_check = false;
3405 				list_add_tail(&edge->list[UPPER],
3406 					      &cache->pending_edge);
3407 			} else {
3408 				if (upper->checked)
3409 					need_check = true;
3410 				INIT_LIST_HEAD(&edge->list[UPPER]);
3411 			}
3412 		} else {
3413 			upper = rb_entry(rb_node, struct btrfs_backref_node,
3414 					 rb_node);
3415 			ASSERT(upper->checked);
3416 			INIT_LIST_HEAD(&edge->list[UPPER]);
3417 			if (!upper->owner)
3418 				upper->owner = btrfs_header_owner(eb);
3419 		}
3420 		btrfs_backref_link_edge(edge, lower, upper);
3421 
3422 		if (rb_node) {
3423 			btrfs_put_root(root);
3424 			break;
3425 		}
3426 		lower = upper;
3427 		upper = NULL;
3428 	}
3429 out:
3430 	btrfs_release_path(path);
3431 	return ret;
3432 }
3433 
3434 /*
3435  * Add backref node @cur into @cache.
3436  *
3437  * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3438  *	 links aren't yet bi-directional. Needs to finish such links.
3439  *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3440  *
3441  * @trans:	Transaction handle.
3442  * @path:	Released path for indirect tree backref lookup
3443  * @iter:	Released backref iter for extent tree search
3444  * @node_key:	The first key of the tree block
3445  */
3446 int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3447 				struct btrfs_backref_cache *cache,
3448 				struct btrfs_path *path,
3449 				struct btrfs_backref_iter *iter,
3450 				struct btrfs_key *node_key,
3451 				struct btrfs_backref_node *cur)
3452 {
3453 	struct btrfs_backref_edge *edge;
3454 	struct btrfs_backref_node *exist;
3455 	int ret;
3456 
3457 	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3458 	if (ret < 0)
3459 		return ret;
3460 	/*
3461 	 * We skip the first btrfs_tree_block_info, as we don't use the key
3462 	 * stored in it, but fetch it from the tree block
3463 	 */
3464 	if (btrfs_backref_has_tree_block_info(iter)) {
3465 		ret = btrfs_backref_iter_next(iter);
3466 		if (ret < 0)
3467 			goto out;
3468 		/* No extra backref? This means the tree block is corrupted */
3469 		if (unlikely(ret > 0)) {
3470 			ret = -EUCLEAN;
3471 			goto out;
3472 		}
3473 	}
3474 	WARN_ON(cur->checked);
3475 	if (!list_empty(&cur->upper)) {
3476 		/*
3477 		 * The backref was added previously when processing backref of
3478 		 * type BTRFS_TREE_BLOCK_REF_KEY
3479 		 */
3480 		ASSERT(list_is_singular(&cur->upper));
3481 		edge = list_first_entry(&cur->upper, struct btrfs_backref_edge,
3482 					list[LOWER]);
3483 		ASSERT(list_empty(&edge->list[UPPER]));
3484 		exist = edge->node[UPPER];
3485 		/*
3486 		 * Add the upper level block to pending list if we need check
3487 		 * its backrefs
3488 		 */
3489 		if (!exist->checked)
3490 			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3491 	} else {
3492 		exist = NULL;
3493 	}
3494 
3495 	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3496 		struct extent_buffer *eb;
3497 		struct btrfs_key key;
3498 		int type;
3499 
3500 		cond_resched();
3501 		eb = iter->path->nodes[0];
3502 
3503 		key.objectid = iter->bytenr;
3504 		if (btrfs_backref_iter_is_inline_ref(iter)) {
3505 			struct btrfs_extent_inline_ref *iref;
3506 
3507 			/* Update key for inline backref */
3508 			iref = (struct btrfs_extent_inline_ref *)
3509 				((unsigned long)iter->cur_ptr);
3510 			type = btrfs_get_extent_inline_ref_type(eb, iref,
3511 							BTRFS_REF_TYPE_BLOCK);
3512 			if (unlikely(type == BTRFS_REF_TYPE_INVALID)) {
3513 				ret = -EUCLEAN;
3514 				goto out;
3515 			}
3516 			key.type = type;
3517 			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3518 		} else {
3519 			key.type = iter->cur_key.type;
3520 			key.offset = iter->cur_key.offset;
3521 		}
3522 
3523 		/*
3524 		 * Parent node found and matches current inline ref, no need to
3525 		 * rebuild this node for this inline ref
3526 		 */
3527 		if (exist &&
3528 		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3529 		      exist->owner == key.offset) ||
3530 		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3531 		      exist->bytenr == key.offset))) {
3532 			exist = NULL;
3533 			continue;
3534 		}
3535 
3536 		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3537 		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3538 			ret = handle_direct_tree_backref(cache, &key, cur);
3539 			if (ret < 0)
3540 				goto out;
3541 		} else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3542 			/*
3543 			 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3544 			 * offset means the root objectid. We need to search
3545 			 * the tree to get its parent bytenr.
3546 			 */
3547 			ret = handle_indirect_tree_backref(trans, cache, path,
3548 							   &key, node_key, cur);
3549 			if (ret < 0)
3550 				goto out;
3551 		}
3552 		/*
3553 		 * Unrecognized tree backref items (if it can pass tree-checker)
3554 		 * would be ignored.
3555 		 */
3556 	}
3557 	ret = 0;
3558 	cur->checked = 1;
3559 	WARN_ON(exist);
3560 out:
3561 	btrfs_backref_iter_release(iter);
3562 	return ret;
3563 }
3564 
3565 /*
3566  * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3567  */
3568 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3569 				     struct btrfs_backref_node *start)
3570 {
3571 	struct list_head *useless_node = &cache->useless_node;
3572 	struct btrfs_backref_edge *edge;
3573 	struct rb_node *rb_node;
3574 	LIST_HEAD(pending_edge);
3575 
3576 	ASSERT(start->checked);
3577 
3578 	rb_node = rb_simple_insert(&cache->rb_root, &start->simple_node);
3579 	if (rb_node)
3580 		btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST);
3581 
3582 	/*
3583 	 * Use breadth first search to iterate all related edges.
3584 	 *
3585 	 * The starting points are all the edges of this node
3586 	 */
3587 	list_for_each_entry(edge, &start->upper, list[LOWER])
3588 		list_add_tail(&edge->list[UPPER], &pending_edge);
3589 
3590 	while (!list_empty(&pending_edge)) {
3591 		struct btrfs_backref_node *upper;
3592 		struct btrfs_backref_node *lower;
3593 
3594 		edge = list_first_entry(&pending_edge,
3595 				struct btrfs_backref_edge, list[UPPER]);
3596 		list_del_init(&edge->list[UPPER]);
3597 		upper = edge->node[UPPER];
3598 		lower = edge->node[LOWER];
3599 
3600 		/* Parent is detached, no need to keep any edges */
3601 		if (upper->detached) {
3602 			list_del(&edge->list[LOWER]);
3603 			btrfs_backref_free_edge(cache, edge);
3604 
3605 			/* Lower node is orphan, queue for cleanup */
3606 			if (list_empty(&lower->upper))
3607 				list_add(&lower->list, useless_node);
3608 			continue;
3609 		}
3610 
3611 		/*
3612 		 * All new nodes added in current build_backref_tree() haven't
3613 		 * been linked to the cache rb tree.
3614 		 * So if we have upper->rb_node populated, this means a cache
3615 		 * hit. We only need to link the edge, as @upper and all its
3616 		 * parents have already been linked.
3617 		 */
3618 		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3619 			list_add_tail(&edge->list[UPPER], &upper->lower);
3620 			continue;
3621 		}
3622 
3623 		/* Sanity check, we shouldn't have any unchecked nodes */
3624 		if (unlikely(!upper->checked)) {
3625 			DEBUG_WARN("we should not have any unchecked nodes");
3626 			return -EUCLEAN;
3627 		}
3628 
3629 		rb_node = rb_simple_insert(&cache->rb_root, &upper->simple_node);
3630 		if (unlikely(rb_node))
3631 			btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST);
3632 
3633 		list_add_tail(&edge->list[UPPER], &upper->lower);
3634 
3635 		/*
3636 		 * Also queue all the parent edges of this uncached node
3637 		 * to finish the upper linkage
3638 		 */
3639 		list_for_each_entry(edge, &upper->upper, list[LOWER])
3640 			list_add_tail(&edge->list[UPPER], &pending_edge);
3641 	}
3642 	return 0;
3643 }
3644 
3645 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3646 				 struct btrfs_backref_node *node)
3647 {
3648 	struct btrfs_backref_node *lower;
3649 	struct btrfs_backref_node *upper;
3650 	struct btrfs_backref_edge *edge;
3651 
3652 	while (!list_empty(&cache->useless_node)) {
3653 		lower = list_first_entry(&cache->useless_node,
3654 				   struct btrfs_backref_node, list);
3655 		list_del_init(&lower->list);
3656 	}
3657 	while (!list_empty(&cache->pending_edge)) {
3658 		edge = list_first_entry(&cache->pending_edge,
3659 				struct btrfs_backref_edge, list[UPPER]);
3660 		list_del(&edge->list[UPPER]);
3661 		list_del(&edge->list[LOWER]);
3662 		lower = edge->node[LOWER];
3663 		upper = edge->node[UPPER];
3664 		btrfs_backref_free_edge(cache, edge);
3665 
3666 		/*
3667 		 * Lower is no longer linked to any upper backref nodes and
3668 		 * isn't in the cache, we can free it ourselves.
3669 		 */
3670 		if (list_empty(&lower->upper) &&
3671 		    RB_EMPTY_NODE(&lower->rb_node))
3672 			list_add(&lower->list, &cache->useless_node);
3673 
3674 		if (!RB_EMPTY_NODE(&upper->rb_node))
3675 			continue;
3676 
3677 		/* Add this guy's upper edges to the list to process */
3678 		list_for_each_entry(edge, &upper->upper, list[LOWER])
3679 			list_add_tail(&edge->list[UPPER],
3680 				      &cache->pending_edge);
3681 		if (list_empty(&upper->upper))
3682 			list_add(&upper->list, &cache->useless_node);
3683 	}
3684 
3685 	while (!list_empty(&cache->useless_node)) {
3686 		lower = list_first_entry(&cache->useless_node,
3687 				   struct btrfs_backref_node, list);
3688 		list_del_init(&lower->list);
3689 		if (lower == node)
3690 			node = NULL;
3691 		btrfs_backref_drop_node(cache, lower);
3692 	}
3693 
3694 	btrfs_backref_cleanup_node(cache, node);
3695 	ASSERT(list_empty(&cache->useless_node) &&
3696 	       list_empty(&cache->pending_edge));
3697 }
3698