xref: /linux/fs/btrfs/relocation.c (revision 0eb6c12491ca44140a5facdaee3c8cb6f41202d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "locking.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "qgroup.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
25 #include "backref.h"
26 #include "misc.h"
27 #include "subpage.h"
28 #include "zoned.h"
29 #include "inode-item.h"
30 #include "space-info.h"
31 #include "fs.h"
32 #include "accessors.h"
33 #include "extent-tree.h"
34 #include "root-tree.h"
35 #include "file-item.h"
36 #include "relocation.h"
37 #include "super.h"
38 #include "tree-checker.h"
39 #include "raid-stripe-tree.h"
40 #include "free-space-tree.h"
41 
42 /*
43  * Relocation overview
44  *
45  * [What does relocation do]
46  *
47  * The objective of relocation is to relocate all extents of the target block
48  * group to other block groups.
49  * This is utilized by resize (shrink only), profile converting, compacting
50  * space, or balance routine to spread chunks over devices.
51  *
52  * 		Before		|		After
53  * ------------------------------------------------------------------
54  *  BG A: 10 data extents	| BG A: deleted
55  *  BG B:  2 data extents	| BG B: 10 data extents (2 old + 8 relocated)
56  *  BG C:  1 extents		| BG C:  3 data extents (1 old + 2 relocated)
57  *
58  * [How does relocation work]
59  *
60  * 1.   Mark the target block group read-only
61  *      New extents won't be allocated from the target block group.
62  *
63  * 2.1  Record each extent in the target block group
64  *      To build a proper map of extents to be relocated.
65  *
66  * 2.2  Build data reloc tree and reloc trees
67  *      Data reloc tree will contain an inode, recording all newly relocated
68  *      data extents.
69  *      There will be only one data reloc tree for one data block group.
70  *
71  *      Reloc tree will be a special snapshot of its source tree, containing
72  *      relocated tree blocks.
73  *      Each tree referring to a tree block in target block group will get its
74  *      reloc tree built.
75  *
76  * 2.3  Swap source tree with its corresponding reloc tree
77  *      Each involved tree only refers to new extents after swap.
78  *
79  * 3.   Cleanup reloc trees and data reloc tree.
80  *      As old extents in the target block group are still referenced by reloc
81  *      trees, we need to clean them up before really freeing the target block
82  *      group.
83  *
84  * The main complexity is in steps 2.2 and 2.3.
85  *
86  * The entry point of relocation is relocate_block_group() function.
87  */
88 
89 #define RELOCATION_RESERVED_NODES	256
90 /*
91  * map address of tree root to tree
92  */
93 struct mapping_node {
94 	union {
95 		/* Use rb_simple_node for search/insert */
96 		struct {
97 			struct rb_node rb_node;
98 			u64 bytenr;
99 		};
100 
101 		struct rb_simple_node simple_node;
102 	};
103 	void *data;
104 };
105 
106 struct mapping_tree {
107 	struct rb_root rb_root;
108 	spinlock_t lock;
109 };
110 
111 /*
112  * present a tree block to process
113  */
114 struct tree_block {
115 	union {
116 		/* Use rb_simple_node for search/insert */
117 		struct {
118 			struct rb_node rb_node;
119 			u64 bytenr;
120 		};
121 
122 		struct rb_simple_node simple_node;
123 	};
124 	u64 owner;
125 	struct btrfs_key key;
126 	u8 level;
127 	bool key_ready;
128 };
129 
130 #define MAX_EXTENTS 128
131 
132 struct file_extent_cluster {
133 	u64 start;
134 	u64 end;
135 	u64 boundary[MAX_EXTENTS];
136 	unsigned int nr;
137 	u64 owning_root;
138 };
139 
140 /* Stages of data relocation. */
141 enum reloc_stage {
142 	MOVE_DATA_EXTENTS,
143 	UPDATE_DATA_PTRS
144 };
145 
146 struct reloc_control {
147 	/* block group to relocate */
148 	struct btrfs_block_group *block_group;
149 	/* extent tree */
150 	struct btrfs_root *extent_root;
151 	/* inode for moving data */
152 	struct inode *data_inode;
153 
154 	struct btrfs_block_rsv *block_rsv;
155 
156 	struct btrfs_backref_cache backref_cache;
157 
158 	struct file_extent_cluster cluster;
159 	/* tree blocks have been processed */
160 	struct extent_io_tree processed_blocks;
161 	/* map start of tree root to corresponding reloc tree */
162 	struct mapping_tree reloc_root_tree;
163 	/* list of reloc trees */
164 	struct list_head reloc_roots;
165 	/* list of subvolume trees that get relocated */
166 	struct list_head dirty_subvol_roots;
167 	/* size of metadata reservation for merging reloc trees */
168 	u64 merging_rsv_size;
169 	/* size of relocated tree nodes */
170 	u64 nodes_relocated;
171 	/* reserved size for block group relocation*/
172 	u64 reserved_bytes;
173 
174 	u64 search_start;
175 	u64 extents_found;
176 
177 	enum reloc_stage stage;
178 	bool create_reloc_tree;
179 	bool merge_reloc_tree;
180 	bool found_file_extent;
181 };
182 
183 static void mark_block_processed(struct reloc_control *rc,
184 				 struct btrfs_backref_node *node)
185 {
186 	u32 blocksize;
187 
188 	if (node->level == 0 ||
189 	    in_range(node->bytenr, rc->block_group->start,
190 		     rc->block_group->length)) {
191 		blocksize = rc->extent_root->fs_info->nodesize;
192 		btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr,
193 				     node->bytenr + blocksize - 1, EXTENT_DIRTY,
194 				     NULL);
195 	}
196 	node->processed = 1;
197 }
198 
199 /*
200  * walk up backref nodes until reach node presents tree root
201  */
202 static struct btrfs_backref_node *walk_up_backref(
203 		struct btrfs_backref_node *node,
204 		struct btrfs_backref_edge *edges[], int *index)
205 {
206 	struct btrfs_backref_edge *edge;
207 	int idx = *index;
208 
209 	while (!list_empty(&node->upper)) {
210 		edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
211 					list[LOWER]);
212 		edges[idx++] = edge;
213 		node = edge->node[UPPER];
214 	}
215 	BUG_ON(node->detached);
216 	*index = idx;
217 	return node;
218 }
219 
220 /*
221  * walk down backref nodes to find start of next reference path
222  */
223 static struct btrfs_backref_node *walk_down_backref(
224 		struct btrfs_backref_edge *edges[], int *index)
225 {
226 	struct btrfs_backref_edge *edge;
227 	struct btrfs_backref_node *lower;
228 	int idx = *index;
229 
230 	while (idx > 0) {
231 		edge = edges[idx - 1];
232 		lower = edge->node[LOWER];
233 		if (list_is_last(&edge->list[LOWER], &lower->upper)) {
234 			idx--;
235 			continue;
236 		}
237 		edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge,
238 					list[LOWER]);
239 		edges[idx - 1] = edge;
240 		*index = idx;
241 		return edge->node[UPPER];
242 	}
243 	*index = 0;
244 	return NULL;
245 }
246 
247 static bool reloc_root_is_dead(const struct btrfs_root *root)
248 {
249 	/*
250 	 * Pair with set_bit/clear_bit in clean_dirty_subvols and
251 	 * btrfs_update_reloc_root. We need to see the updated bit before
252 	 * trying to access reloc_root
253 	 */
254 	smp_rmb();
255 	if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
256 		return true;
257 	return false;
258 }
259 
260 /*
261  * Check if this subvolume tree has valid reloc tree.
262  *
263  * Reloc tree after swap is considered dead, thus not considered as valid.
264  * This is enough for most callers, as they don't distinguish dead reloc root
265  * from no reloc root.  But btrfs_should_ignore_reloc_root() below is a
266  * special case.
267  */
268 static bool have_reloc_root(const struct btrfs_root *root)
269 {
270 	if (reloc_root_is_dead(root))
271 		return false;
272 	if (!root->reloc_root)
273 		return false;
274 	return true;
275 }
276 
277 bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
278 {
279 	struct btrfs_root *reloc_root;
280 
281 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
282 		return false;
283 
284 	/* This root has been merged with its reloc tree, we can ignore it */
285 	if (reloc_root_is_dead(root))
286 		return true;
287 
288 	reloc_root = root->reloc_root;
289 	if (!reloc_root)
290 		return false;
291 
292 	if (btrfs_header_generation(reloc_root->commit_root) ==
293 	    root->fs_info->running_transaction->transid)
294 		return false;
295 	/*
296 	 * If there is reloc tree and it was created in previous transaction
297 	 * backref lookup can find the reloc tree, so backref node for the fs
298 	 * tree root is useless for relocation.
299 	 */
300 	return true;
301 }
302 
303 /*
304  * find reloc tree by address of tree root
305  */
306 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
307 {
308 	struct reloc_control *rc = fs_info->reloc_ctl;
309 	struct rb_node *rb_node;
310 	struct mapping_node *node;
311 	struct btrfs_root *root = NULL;
312 
313 	ASSERT(rc);
314 	spin_lock(&rc->reloc_root_tree.lock);
315 	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
316 	if (rb_node) {
317 		node = rb_entry(rb_node, struct mapping_node, rb_node);
318 		root = node->data;
319 	}
320 	spin_unlock(&rc->reloc_root_tree.lock);
321 	return btrfs_grab_root(root);
322 }
323 
324 /*
325  * For useless nodes, do two major clean ups:
326  *
327  * - Cleanup the children edges and nodes
328  *   If child node is also orphan (no parent) during cleanup, then the child
329  *   node will also be cleaned up.
330  *
331  * - Freeing up leaves (level 0), keeps nodes detached
332  *   For nodes, the node is still cached as "detached"
333  *
334  * Return false if @node is not in the @useless_nodes list.
335  * Return true if @node is in the @useless_nodes list.
336  */
337 static bool handle_useless_nodes(struct reloc_control *rc,
338 				 struct btrfs_backref_node *node)
339 {
340 	struct btrfs_backref_cache *cache = &rc->backref_cache;
341 	struct list_head *useless_node = &cache->useless_node;
342 	bool ret = false;
343 
344 	while (!list_empty(useless_node)) {
345 		struct btrfs_backref_node *cur;
346 
347 		cur = list_first_entry(useless_node, struct btrfs_backref_node,
348 				 list);
349 		list_del_init(&cur->list);
350 
351 		/* Only tree root nodes can be added to @useless_nodes */
352 		ASSERT(list_empty(&cur->upper));
353 
354 		if (cur == node)
355 			ret = true;
356 
357 		/* Cleanup the lower edges */
358 		while (!list_empty(&cur->lower)) {
359 			struct btrfs_backref_edge *edge;
360 			struct btrfs_backref_node *lower;
361 
362 			edge = list_first_entry(&cur->lower, struct btrfs_backref_edge,
363 						list[UPPER]);
364 			list_del(&edge->list[UPPER]);
365 			list_del(&edge->list[LOWER]);
366 			lower = edge->node[LOWER];
367 			btrfs_backref_free_edge(cache, edge);
368 
369 			/* Child node is also orphan, queue for cleanup */
370 			if (list_empty(&lower->upper))
371 				list_add(&lower->list, useless_node);
372 		}
373 		/* Mark this block processed for relocation */
374 		mark_block_processed(rc, cur);
375 
376 		/*
377 		 * Backref nodes for tree leaves are deleted from the cache.
378 		 * Backref nodes for upper level tree blocks are left in the
379 		 * cache to avoid unnecessary backref lookup.
380 		 */
381 		if (cur->level > 0) {
382 			cur->detached = 1;
383 		} else {
384 			rb_erase(&cur->rb_node, &cache->rb_root);
385 			btrfs_backref_free_node(cache, cur);
386 		}
387 	}
388 	return ret;
389 }
390 
391 /*
392  * Build backref tree for a given tree block. Root of the backref tree
393  * corresponds the tree block, leaves of the backref tree correspond roots of
394  * b-trees that reference the tree block.
395  *
396  * The basic idea of this function is check backrefs of a given block to find
397  * upper level blocks that reference the block, and then check backrefs of
398  * these upper level blocks recursively. The recursion stops when tree root is
399  * reached or backrefs for the block is cached.
400  *
401  * NOTE: if we find that backrefs for a block are cached, we know backrefs for
402  * all upper level blocks that directly/indirectly reference the block are also
403  * cached.
404  */
405 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
406 			struct btrfs_trans_handle *trans,
407 			struct reloc_control *rc, struct btrfs_key *node_key,
408 			int level, u64 bytenr)
409 {
410 	struct btrfs_backref_iter *iter;
411 	struct btrfs_backref_cache *cache = &rc->backref_cache;
412 	/* For searching parent of TREE_BLOCK_REF */
413 	struct btrfs_path *path;
414 	struct btrfs_backref_node *cur;
415 	struct btrfs_backref_node *node = NULL;
416 	struct btrfs_backref_edge *edge;
417 	int ret;
418 
419 	iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
420 	if (!iter)
421 		return ERR_PTR(-ENOMEM);
422 	path = btrfs_alloc_path();
423 	if (!path) {
424 		ret = -ENOMEM;
425 		goto out;
426 	}
427 
428 	node = btrfs_backref_alloc_node(cache, bytenr, level);
429 	if (!node) {
430 		ret = -ENOMEM;
431 		goto out;
432 	}
433 
434 	cur = node;
435 
436 	/* Breadth-first search to build backref cache */
437 	do {
438 		ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
439 						  node_key, cur);
440 		if (ret < 0)
441 			goto out;
442 
443 		edge = list_first_entry_or_null(&cache->pending_edge,
444 				struct btrfs_backref_edge, list[UPPER]);
445 		/*
446 		 * The pending list isn't empty, take the first block to
447 		 * process
448 		 */
449 		if (edge) {
450 			list_del_init(&edge->list[UPPER]);
451 			cur = edge->node[UPPER];
452 		}
453 	} while (edge);
454 
455 	/* Finish the upper linkage of newly added edges/nodes */
456 	ret = btrfs_backref_finish_upper_links(cache, node);
457 	if (ret < 0)
458 		goto out;
459 
460 	if (handle_useless_nodes(rc, node))
461 		node = NULL;
462 out:
463 	btrfs_free_path(iter->path);
464 	kfree(iter);
465 	btrfs_free_path(path);
466 	if (ret) {
467 		btrfs_backref_error_cleanup(cache, node);
468 		return ERR_PTR(ret);
469 	}
470 	ASSERT(!node || !node->detached);
471 	ASSERT(list_empty(&cache->useless_node) &&
472 	       list_empty(&cache->pending_edge));
473 	return node;
474 }
475 
476 /*
477  * helper to add 'address of tree root -> reloc tree' mapping
478  */
479 static int __add_reloc_root(struct btrfs_root *root)
480 {
481 	struct btrfs_fs_info *fs_info = root->fs_info;
482 	struct rb_node *rb_node;
483 	struct mapping_node *node;
484 	struct reloc_control *rc = fs_info->reloc_ctl;
485 
486 	node = kmalloc_obj(*node, GFP_NOFS);
487 	if (!node)
488 		return -ENOMEM;
489 
490 	node->bytenr = root->commit_root->start;
491 	node->data = root;
492 
493 	spin_lock(&rc->reloc_root_tree.lock);
494 	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
495 	spin_unlock(&rc->reloc_root_tree.lock);
496 	if (rb_node) {
497 		btrfs_err(fs_info,
498 			    "Duplicate root found for start=%llu while inserting into relocation tree",
499 			    node->bytenr);
500 		return -EEXIST;
501 	}
502 
503 	list_add_tail(&root->root_list, &rc->reloc_roots);
504 	return 0;
505 }
506 
507 /*
508  * helper to delete the 'address of tree root -> reloc tree'
509  * mapping
510  */
511 static void __del_reloc_root(struct btrfs_root *root)
512 {
513 	struct btrfs_fs_info *fs_info = root->fs_info;
514 	struct rb_node *rb_node;
515 	struct mapping_node AUTO_KFREE(node);
516 	struct reloc_control *rc = fs_info->reloc_ctl;
517 	bool put_ref = false;
518 
519 	if (rc && root->node) {
520 		spin_lock(&rc->reloc_root_tree.lock);
521 		rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
522 					   root->commit_root->start);
523 		if (rb_node) {
524 			node = rb_entry(rb_node, struct mapping_node, rb_node);
525 			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
526 			RB_CLEAR_NODE(&node->rb_node);
527 		}
528 		spin_unlock(&rc->reloc_root_tree.lock);
529 		ASSERT(!node || (struct btrfs_root *)node->data == root);
530 	}
531 
532 	/*
533 	 * We only put the reloc root here if it's on the list.  There's a lot
534 	 * of places where the pattern is to splice the rc->reloc_roots, process
535 	 * the reloc roots, and then add the reloc root back onto
536 	 * rc->reloc_roots.  If we call __del_reloc_root while it's off of the
537 	 * list we don't want the reference being dropped, because the guy
538 	 * messing with the list is in charge of the reference.
539 	 */
540 	spin_lock(&fs_info->trans_lock);
541 	if (!list_empty(&root->root_list)) {
542 		put_ref = true;
543 		list_del_init(&root->root_list);
544 	}
545 	spin_unlock(&fs_info->trans_lock);
546 	if (put_ref)
547 		btrfs_put_root(root);
548 }
549 
550 /*
551  * helper to update the 'address of tree root -> reloc tree'
552  * mapping
553  */
554 static int __update_reloc_root(struct btrfs_root *root)
555 {
556 	struct btrfs_fs_info *fs_info = root->fs_info;
557 	struct rb_node *rb_node;
558 	struct mapping_node *node = NULL;
559 	struct reloc_control *rc = fs_info->reloc_ctl;
560 
561 	spin_lock(&rc->reloc_root_tree.lock);
562 	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
563 				   root->commit_root->start);
564 	if (rb_node) {
565 		node = rb_entry(rb_node, struct mapping_node, rb_node);
566 		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
567 	}
568 	spin_unlock(&rc->reloc_root_tree.lock);
569 
570 	if (!node)
571 		return 0;
572 	BUG_ON((struct btrfs_root *)node->data != root);
573 
574 	spin_lock(&rc->reloc_root_tree.lock);
575 	node->bytenr = root->node->start;
576 	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root, &node->simple_node);
577 	spin_unlock(&rc->reloc_root_tree.lock);
578 	if (rb_node)
579 		btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
580 	return 0;
581 }
582 
583 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
584 					struct btrfs_root *root, u64 objectid)
585 {
586 	struct btrfs_fs_info *fs_info = root->fs_info;
587 	struct btrfs_root *reloc_root;
588 	struct extent_buffer *eb;
589 	struct btrfs_root_item AUTO_KFREE(root_item);
590 	struct btrfs_key root_key;
591 	int ret = 0;
592 
593 	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
594 	if (!root_item)
595 		return ERR_PTR(-ENOMEM);
596 
597 	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
598 	root_key.type = BTRFS_ROOT_ITEM_KEY;
599 	root_key.offset = objectid;
600 
601 	if (btrfs_root_id(root) == objectid) {
602 		u64 commit_root_gen;
603 
604 		/*
605 		 * Relocation will wait for cleaner thread, and any half-dropped
606 		 * subvolume will be fully cleaned up at mount time.
607 		 * So here we shouldn't hit a subvolume with non-zero drop_progress.
608 		 *
609 		 * If this isn't the case, error out since it can make us attempt to
610 		 * drop references for extents that were already dropped before.
611 		 */
612 		if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) {
613 			struct btrfs_key cpu_key;
614 
615 			btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress);
616 			btrfs_err(fs_info,
617 	"cannot relocate partially dropped subvolume %llu, drop progress key " BTRFS_KEY_FMT,
618 				  objectid, BTRFS_KEY_FMT_VALUE(&cpu_key));
619 			return ERR_PTR(-EUCLEAN);
620 		}
621 
622 		/* called by btrfs_init_reloc_root */
623 		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
624 				      BTRFS_TREE_RELOC_OBJECTID);
625 		if (ret)
626 			return ERR_PTR(ret);
627 
628 		/*
629 		 * Set the last_snapshot field to the generation of the commit
630 		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
631 		 * correctly (returns true) when the relocation root is created
632 		 * either inside the critical section of a transaction commit
633 		 * (through transaction.c:qgroup_account_snapshot()) and when
634 		 * it's created before the transaction commit is started.
635 		 */
636 		commit_root_gen = btrfs_header_generation(root->commit_root);
637 		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
638 	} else {
639 		/*
640 		 * called by btrfs_reloc_post_snapshot_hook.
641 		 * the source tree is a reloc tree, all tree blocks
642 		 * modified after it was created have RELOC flag
643 		 * set in their headers. so it's OK to not update
644 		 * the 'last_snapshot'.
645 		 */
646 		ret = btrfs_copy_root(trans, root, root->node, &eb,
647 				      BTRFS_TREE_RELOC_OBJECTID);
648 		if (ret)
649 			return ERR_PTR(ret);
650 	}
651 
652 	/*
653 	 * We have changed references at this point, we must abort the
654 	 * transaction if anything fails (i.e. 'goto abort').
655 	 */
656 
657 	memcpy(root_item, &root->root_item, sizeof(*root_item));
658 	btrfs_set_root_bytenr(root_item, eb->start);
659 	btrfs_set_root_level(root_item, btrfs_header_level(eb));
660 	btrfs_set_root_generation(root_item, trans->transid);
661 
662 	if (btrfs_root_id(root) == objectid) {
663 		btrfs_set_root_refs(root_item, 0);
664 		memset(&root_item->drop_progress, 0,
665 		       sizeof(struct btrfs_disk_key));
666 		btrfs_set_root_drop_level(root_item, 0);
667 	}
668 
669 	btrfs_tree_unlock(eb);
670 	free_extent_buffer(eb);
671 
672 	ret = btrfs_insert_root(trans, fs_info->tree_root,
673 				&root_key, root_item);
674 	if (ret)
675 		goto abort;
676 
677 	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
678 	if (IS_ERR(reloc_root)) {
679 		ret = PTR_ERR(reloc_root);
680 		goto abort;
681 	}
682 	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
683 	btrfs_set_root_last_trans(reloc_root, trans->transid);
684 	return reloc_root;
685 
686 abort:
687 	btrfs_abort_transaction(trans, ret);
688 	return ERR_PTR(ret);
689 }
690 
691 /*
692  * create reloc tree for a given fs tree. reloc tree is just a
693  * snapshot of the fs tree with special root objectid.
694  *
695  * The reloc_root comes out of here with two references, one for
696  * root->reloc_root, and another for being on the rc->reloc_roots list.
697  */
698 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
699 			  struct btrfs_root *root)
700 {
701 	struct btrfs_fs_info *fs_info = root->fs_info;
702 	struct btrfs_root *reloc_root;
703 	struct reloc_control *rc = fs_info->reloc_ctl;
704 	struct btrfs_block_rsv *rsv;
705 	int clear_rsv = 0;
706 	int ret;
707 
708 	if (!rc)
709 		return 0;
710 
711 	/*
712 	 * The subvolume has reloc tree but the swap is finished, no need to
713 	 * create/update the dead reloc tree
714 	 */
715 	if (reloc_root_is_dead(root))
716 		return 0;
717 
718 	/*
719 	 * This is subtle but important.  We do not do
720 	 * record_root_in_transaction for reloc roots, instead we record their
721 	 * corresponding fs root, and then here we update the last trans for the
722 	 * reloc root.  This means that we have to do this for the entire life
723 	 * of the reloc root, regardless of which stage of the relocation we are
724 	 * in.
725 	 */
726 	if (root->reloc_root) {
727 		reloc_root = root->reloc_root;
728 		btrfs_set_root_last_trans(reloc_root, trans->transid);
729 		return 0;
730 	}
731 
732 	/*
733 	 * We are merging reloc roots, we do not need new reloc trees.  Also
734 	 * reloc trees never need their own reloc tree.
735 	 */
736 	if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
737 		return 0;
738 
739 	if (!trans->reloc_reserved) {
740 		rsv = trans->block_rsv;
741 		trans->block_rsv = rc->block_rsv;
742 		clear_rsv = 1;
743 	}
744 	reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
745 	if (clear_rsv)
746 		trans->block_rsv = rsv;
747 	if (IS_ERR(reloc_root))
748 		return PTR_ERR(reloc_root);
749 
750 	ret = __add_reloc_root(reloc_root);
751 	ASSERT(ret != -EEXIST);
752 	if (ret) {
753 		/* Pairs with create_reloc_root */
754 		btrfs_put_root(reloc_root);
755 		return ret;
756 	}
757 	root->reloc_root = btrfs_grab_root(reloc_root);
758 	return 0;
759 }
760 
761 /*
762  * update root item of reloc tree
763  */
764 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
765 			    struct btrfs_root *root)
766 {
767 	struct btrfs_fs_info *fs_info = root->fs_info;
768 	struct btrfs_root *reloc_root;
769 	struct btrfs_root_item *root_item;
770 	int ret;
771 
772 	if (!have_reloc_root(root))
773 		return 0;
774 
775 	reloc_root = root->reloc_root;
776 	root_item = &reloc_root->root_item;
777 
778 	/*
779 	 * We are probably ok here, but __del_reloc_root() will drop its ref of
780 	 * the root.  We have the ref for root->reloc_root, but just in case
781 	 * hold it while we update the reloc root.
782 	 */
783 	btrfs_grab_root(reloc_root);
784 
785 	/* root->reloc_root will stay until current relocation finished */
786 	if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
787 	    btrfs_root_refs(root_item) == 0) {
788 		set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
789 		/*
790 		 * Mark the tree as dead before we change reloc_root so
791 		 * have_reloc_root will not touch it from now on.
792 		 */
793 		smp_wmb();
794 		__del_reloc_root(reloc_root);
795 	}
796 
797 	if (reloc_root->commit_root != reloc_root->node) {
798 		__update_reloc_root(reloc_root);
799 		btrfs_set_root_node(root_item, reloc_root->node);
800 		free_extent_buffer(reloc_root->commit_root);
801 		reloc_root->commit_root = btrfs_root_node(reloc_root);
802 	}
803 
804 	ret = btrfs_update_root(trans, fs_info->tree_root,
805 				&reloc_root->root_key, root_item);
806 	btrfs_put_root(reloc_root);
807 	return ret;
808 }
809 
810 /*
811  * get new location of data
812  */
813 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
814 			    u64 bytenr, u64 num_bytes)
815 {
816 	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
817 	BTRFS_PATH_AUTO_FREE(path);
818 	struct btrfs_file_extent_item *fi;
819 	struct extent_buffer *leaf;
820 	int ret;
821 
822 	path = btrfs_alloc_path();
823 	if (!path)
824 		return -ENOMEM;
825 
826 	bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
827 	ret = btrfs_lookup_file_extent(NULL, root, path,
828 			btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
829 	if (ret < 0)
830 		return ret;
831 	if (ret > 0)
832 		return -ENOENT;
833 
834 	leaf = path->nodes[0];
835 	fi = btrfs_item_ptr(leaf, path->slots[0],
836 			    struct btrfs_file_extent_item);
837 
838 	BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
839 	       btrfs_file_extent_compression(leaf, fi) ||
840 	       btrfs_file_extent_encryption(leaf, fi) ||
841 	       btrfs_file_extent_other_encoding(leaf, fi));
842 
843 	if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi))
844 		return -EINVAL;
845 
846 	*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
847 	return 0;
848 }
849 
850 /*
851  * update file extent items in the tree leaf to point to
852  * the new locations.
853  */
854 static noinline_for_stack
855 int replace_file_extents(struct btrfs_trans_handle *trans,
856 			 struct reloc_control *rc,
857 			 struct btrfs_root *root,
858 			 struct extent_buffer *leaf)
859 {
860 	struct btrfs_fs_info *fs_info = root->fs_info;
861 	struct btrfs_key key;
862 	struct btrfs_file_extent_item *fi;
863 	struct btrfs_inode *inode = NULL;
864 	u64 parent;
865 	u64 bytenr;
866 	u64 new_bytenr = 0;
867 	u64 num_bytes;
868 	u64 end;
869 	u32 nritems;
870 	u32 i;
871 	int ret = 0;
872 	int first = 1;
873 
874 	if (rc->stage != UPDATE_DATA_PTRS)
875 		return 0;
876 
877 	/* reloc trees always use full backref */
878 	if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
879 		parent = leaf->start;
880 	else
881 		parent = 0;
882 
883 	nritems = btrfs_header_nritems(leaf);
884 	for (i = 0; i < nritems; i++) {
885 		struct btrfs_ref ref = { 0 };
886 
887 		cond_resched();
888 		btrfs_item_key_to_cpu(leaf, &key, i);
889 		if (key.type != BTRFS_EXTENT_DATA_KEY)
890 			continue;
891 		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
892 		if (btrfs_file_extent_type(leaf, fi) ==
893 		    BTRFS_FILE_EXTENT_INLINE)
894 			continue;
895 		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
896 		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
897 		if (bytenr == 0)
898 			continue;
899 		if (!in_range(bytenr, rc->block_group->start,
900 			      rc->block_group->length))
901 			continue;
902 
903 		/*
904 		 * if we are modifying block in fs tree, wait for read_folio
905 		 * to complete and drop the extent cache
906 		 */
907 		if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
908 			if (first) {
909 				inode = btrfs_find_first_inode(root, key.objectid);
910 				first = 0;
911 			} else if (inode && btrfs_ino(inode) < key.objectid) {
912 				btrfs_add_delayed_iput(inode);
913 				inode = btrfs_find_first_inode(root, key.objectid);
914 			}
915 			if (inode && btrfs_ino(inode) == key.objectid) {
916 				struct extent_state *cached_state = NULL;
917 
918 				end = key.offset +
919 				      btrfs_file_extent_num_bytes(leaf, fi);
920 				WARN_ON(!IS_ALIGNED(key.offset,
921 						    fs_info->sectorsize));
922 				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
923 				end--;
924 				/* Take mmap lock to serialize with reflinks. */
925 				if (!down_read_trylock(&inode->i_mmap_lock))
926 					continue;
927 				ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
928 							    end, &cached_state);
929 				if (!ret) {
930 					up_read(&inode->i_mmap_lock);
931 					continue;
932 				}
933 
934 				btrfs_drop_extent_map_range(inode, key.offset, end, true);
935 				btrfs_unlock_extent(&inode->io_tree, key.offset, end,
936 						    &cached_state);
937 				up_read(&inode->i_mmap_lock);
938 			}
939 		}
940 
941 		ret = get_new_location(rc->data_inode, &new_bytenr,
942 				       bytenr, num_bytes);
943 		if (ret) {
944 			/*
945 			 * Don't have to abort since we've not changed anything
946 			 * in the file extent yet.
947 			 */
948 			break;
949 		}
950 
951 		btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
952 
953 		key.offset -= btrfs_file_extent_offset(leaf, fi);
954 		ref.action = BTRFS_ADD_DELAYED_REF;
955 		ref.bytenr = new_bytenr;
956 		ref.num_bytes = num_bytes;
957 		ref.parent = parent;
958 		ref.owning_root = btrfs_root_id(root);
959 		ref.ref_root = btrfs_header_owner(leaf);
960 		btrfs_init_data_ref(&ref, key.objectid, key.offset,
961 				    btrfs_root_id(root), false);
962 		ret = btrfs_inc_extent_ref(trans, &ref);
963 		if (unlikely(ret)) {
964 			btrfs_abort_transaction(trans, ret);
965 			break;
966 		}
967 
968 		ref.action = BTRFS_DROP_DELAYED_REF;
969 		ref.bytenr = bytenr;
970 		ref.num_bytes = num_bytes;
971 		ref.parent = parent;
972 		ref.owning_root = btrfs_root_id(root);
973 		ref.ref_root = btrfs_header_owner(leaf);
974 		btrfs_init_data_ref(&ref, key.objectid, key.offset,
975 				    btrfs_root_id(root), false);
976 		ret = btrfs_free_extent(trans, &ref);
977 		if (unlikely(ret)) {
978 			btrfs_abort_transaction(trans, ret);
979 			break;
980 		}
981 	}
982 	if (inode)
983 		btrfs_add_delayed_iput(inode);
984 	return ret;
985 }
986 
987 static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
988 					       int slot, const struct btrfs_path *path,
989 					       int level)
990 {
991 	struct btrfs_disk_key key1;
992 	struct btrfs_disk_key key2;
993 	btrfs_node_key(eb, &key1, slot);
994 	btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
995 	return memcmp(&key1, &key2, sizeof(key1));
996 }
997 
998 /*
999  * try to replace tree blocks in fs tree with the new blocks
1000  * in reloc tree. tree blocks haven't been modified since the
1001  * reloc tree was create can be replaced.
1002  *
1003  * if a block was replaced, level of the block + 1 is returned.
1004  * if no block got replaced, 0 is returned. if there are other
1005  * errors, a negative error number is returned.
1006  */
1007 static noinline_for_stack
1008 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1009 		 struct btrfs_root *dest, struct btrfs_root *src,
1010 		 struct btrfs_path *path, struct btrfs_key *next_key,
1011 		 int lowest_level, int max_level)
1012 {
1013 	struct btrfs_fs_info *fs_info = dest->fs_info;
1014 	struct extent_buffer *eb;
1015 	struct extent_buffer *parent;
1016 	struct btrfs_ref ref = { 0 };
1017 	struct btrfs_key key;
1018 	u64 old_bytenr;
1019 	u64 new_bytenr;
1020 	u64 old_ptr_gen;
1021 	u64 new_ptr_gen;
1022 	u64 last_snapshot;
1023 	u32 blocksize;
1024 	int cow = 0;
1025 	int level;
1026 	int ret;
1027 	int slot;
1028 
1029 	ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
1030 	ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
1031 
1032 	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1033 again:
1034 	slot = path->slots[lowest_level];
1035 	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1036 
1037 	eb = btrfs_lock_root_node(dest);
1038 	level = btrfs_header_level(eb);
1039 
1040 	if (level < lowest_level) {
1041 		btrfs_tree_unlock(eb);
1042 		free_extent_buffer(eb);
1043 		return 0;
1044 	}
1045 
1046 	if (cow) {
1047 		ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1048 				      BTRFS_NESTING_COW);
1049 		if (ret) {
1050 			btrfs_tree_unlock(eb);
1051 			free_extent_buffer(eb);
1052 			return ret;
1053 		}
1054 	}
1055 
1056 	if (next_key) {
1057 		next_key->objectid = (u64)-1;
1058 		next_key->type = (u8)-1;
1059 		next_key->offset = (u64)-1;
1060 	}
1061 
1062 	parent = eb;
1063 	while (1) {
1064 		level = btrfs_header_level(parent);
1065 		ASSERT(level >= lowest_level);
1066 
1067 		ret = btrfs_bin_search(parent, 0, &key, &slot);
1068 		if (ret < 0)
1069 			break;
1070 		if (ret && slot > 0)
1071 			slot--;
1072 
1073 		if (next_key && slot + 1 < btrfs_header_nritems(parent))
1074 			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1075 
1076 		old_bytenr = btrfs_node_blockptr(parent, slot);
1077 		blocksize = fs_info->nodesize;
1078 		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1079 
1080 		if (level <= max_level) {
1081 			eb = path->nodes[level];
1082 			new_bytenr = btrfs_node_blockptr(eb,
1083 							path->slots[level]);
1084 			new_ptr_gen = btrfs_node_ptr_generation(eb,
1085 							path->slots[level]);
1086 		} else {
1087 			new_bytenr = 0;
1088 			new_ptr_gen = 0;
1089 		}
1090 
1091 		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1092 			ret = level;
1093 			break;
1094 		}
1095 
1096 		if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1097 		    memcmp_node_keys(parent, slot, path, level)) {
1098 			if (level <= lowest_level) {
1099 				ret = 0;
1100 				break;
1101 			}
1102 
1103 			eb = btrfs_read_node_slot(parent, slot);
1104 			if (IS_ERR(eb)) {
1105 				ret = PTR_ERR(eb);
1106 				break;
1107 			}
1108 			btrfs_tree_lock(eb);
1109 			if (cow) {
1110 				ret = btrfs_cow_block(trans, dest, eb, parent,
1111 						      slot, &eb,
1112 						      BTRFS_NESTING_COW);
1113 				if (ret) {
1114 					btrfs_tree_unlock(eb);
1115 					free_extent_buffer(eb);
1116 					break;
1117 				}
1118 			}
1119 
1120 			btrfs_tree_unlock(parent);
1121 			free_extent_buffer(parent);
1122 
1123 			parent = eb;
1124 			continue;
1125 		}
1126 
1127 		if (!cow) {
1128 			btrfs_tree_unlock(parent);
1129 			free_extent_buffer(parent);
1130 			cow = 1;
1131 			goto again;
1132 		}
1133 
1134 		btrfs_node_key_to_cpu(path->nodes[level], &key,
1135 				      path->slots[level]);
1136 		btrfs_release_path(path);
1137 
1138 		path->lowest_level = level;
1139 		set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1140 		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1141 		clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1142 		path->lowest_level = 0;
1143 		if (ret) {
1144 			if (ret > 0)
1145 				ret = -ENOENT;
1146 			break;
1147 		}
1148 
1149 		/*
1150 		 * Info qgroup to trace both subtrees.
1151 		 *
1152 		 * We must trace both trees.
1153 		 * 1) Tree reloc subtree
1154 		 *    If not traced, we will leak data numbers
1155 		 * 2) Fs subtree
1156 		 *    If not traced, we will double count old data
1157 		 *
1158 		 * We don't scan the subtree right now, but only record
1159 		 * the swapped tree blocks.
1160 		 * The real subtree rescan is delayed until we have new
1161 		 * CoW on the subtree root node before transaction commit.
1162 		 */
1163 		ret = btrfs_qgroup_add_swapped_blocks(dest,
1164 				rc->block_group, parent, slot,
1165 				path->nodes[level], path->slots[level],
1166 				last_snapshot);
1167 		if (ret < 0)
1168 			break;
1169 		/*
1170 		 * swap blocks in fs tree and reloc tree.
1171 		 */
1172 		btrfs_set_node_blockptr(parent, slot, new_bytenr);
1173 		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1174 
1175 		btrfs_set_node_blockptr(path->nodes[level],
1176 					path->slots[level], old_bytenr);
1177 		btrfs_set_node_ptr_generation(path->nodes[level],
1178 					      path->slots[level], old_ptr_gen);
1179 
1180 		ref.action = BTRFS_ADD_DELAYED_REF;
1181 		ref.bytenr = old_bytenr;
1182 		ref.num_bytes = blocksize;
1183 		ref.parent = path->nodes[level]->start;
1184 		ref.owning_root = btrfs_root_id(src);
1185 		ref.ref_root = btrfs_root_id(src);
1186 		btrfs_init_tree_ref(&ref, level - 1, 0, true);
1187 		ret = btrfs_inc_extent_ref(trans, &ref);
1188 		if (unlikely(ret)) {
1189 			btrfs_abort_transaction(trans, ret);
1190 			break;
1191 		}
1192 
1193 		ref.action = BTRFS_ADD_DELAYED_REF;
1194 		ref.bytenr = new_bytenr;
1195 		ref.num_bytes = blocksize;
1196 		ref.parent = 0;
1197 		ref.owning_root = btrfs_root_id(dest);
1198 		ref.ref_root = btrfs_root_id(dest);
1199 		btrfs_init_tree_ref(&ref, level - 1, 0, true);
1200 		ret = btrfs_inc_extent_ref(trans, &ref);
1201 		if (unlikely(ret)) {
1202 			btrfs_abort_transaction(trans, ret);
1203 			break;
1204 		}
1205 
1206 		/* We don't know the real owning_root, use 0. */
1207 		ref.action = BTRFS_DROP_DELAYED_REF;
1208 		ref.bytenr = new_bytenr;
1209 		ref.num_bytes = blocksize;
1210 		ref.parent = path->nodes[level]->start;
1211 		ref.owning_root = 0;
1212 		ref.ref_root = btrfs_root_id(src);
1213 		btrfs_init_tree_ref(&ref, level - 1, 0, true);
1214 		ret = btrfs_free_extent(trans, &ref);
1215 		if (unlikely(ret)) {
1216 			btrfs_abort_transaction(trans, ret);
1217 			break;
1218 		}
1219 
1220 		/* We don't know the real owning_root, use 0. */
1221 		ref.action = BTRFS_DROP_DELAYED_REF;
1222 		ref.bytenr = old_bytenr;
1223 		ref.num_bytes = blocksize;
1224 		ref.parent = 0;
1225 		ref.owning_root = 0;
1226 		ref.ref_root = btrfs_root_id(dest);
1227 		btrfs_init_tree_ref(&ref, level - 1, 0, true);
1228 		ret = btrfs_free_extent(trans, &ref);
1229 		if (unlikely(ret)) {
1230 			btrfs_abort_transaction(trans, ret);
1231 			break;
1232 		}
1233 
1234 		btrfs_unlock_up_safe(path, 0);
1235 
1236 		ret = level;
1237 		break;
1238 	}
1239 	btrfs_tree_unlock(parent);
1240 	free_extent_buffer(parent);
1241 	return ret;
1242 }
1243 
1244 /*
1245  * helper to find next relocated block in reloc tree
1246  */
1247 static noinline_for_stack
1248 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1249 		       int *level)
1250 {
1251 	struct extent_buffer *eb;
1252 	int i;
1253 	u64 last_snapshot;
1254 	u32 nritems;
1255 
1256 	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1257 
1258 	for (i = 0; i < *level; i++) {
1259 		free_extent_buffer(path->nodes[i]);
1260 		path->nodes[i] = NULL;
1261 	}
1262 
1263 	for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1264 		eb = path->nodes[i];
1265 		nritems = btrfs_header_nritems(eb);
1266 		while (path->slots[i] + 1 < nritems) {
1267 			path->slots[i]++;
1268 			if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1269 			    last_snapshot)
1270 				continue;
1271 
1272 			*level = i;
1273 			return 0;
1274 		}
1275 		free_extent_buffer(path->nodes[i]);
1276 		path->nodes[i] = NULL;
1277 	}
1278 	return 1;
1279 }
1280 
1281 /*
1282  * walk down reloc tree to find relocated block of lowest level
1283  */
1284 static noinline_for_stack
1285 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1286 			 int *level)
1287 {
1288 	struct extent_buffer *eb = NULL;
1289 	int i;
1290 	u64 ptr_gen = 0;
1291 	u64 last_snapshot;
1292 	u32 nritems;
1293 
1294 	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1295 
1296 	for (i = *level; i > 0; i--) {
1297 		eb = path->nodes[i];
1298 		nritems = btrfs_header_nritems(eb);
1299 		while (path->slots[i] < nritems) {
1300 			ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1301 			if (ptr_gen > last_snapshot)
1302 				break;
1303 			path->slots[i]++;
1304 		}
1305 		if (path->slots[i] >= nritems) {
1306 			if (i == *level)
1307 				break;
1308 			*level = i + 1;
1309 			return 0;
1310 		}
1311 		if (i == 1) {
1312 			*level = i;
1313 			return 0;
1314 		}
1315 
1316 		eb = btrfs_read_node_slot(eb, path->slots[i]);
1317 		if (IS_ERR(eb))
1318 			return PTR_ERR(eb);
1319 		BUG_ON(btrfs_header_level(eb) != i - 1);
1320 		path->nodes[i - 1] = eb;
1321 		path->slots[i - 1] = 0;
1322 	}
1323 	return 1;
1324 }
1325 
1326 /*
1327  * invalidate extent cache for file extents whose key in range of
1328  * [min_key, max_key)
1329  */
1330 static int invalidate_extent_cache(struct btrfs_root *root,
1331 				   const struct btrfs_key *min_key,
1332 				   const struct btrfs_key *max_key)
1333 {
1334 	struct btrfs_fs_info *fs_info = root->fs_info;
1335 	struct btrfs_inode *inode = NULL;
1336 	u64 objectid;
1337 	u64 start, end;
1338 	u64 ino;
1339 
1340 	objectid = min_key->objectid;
1341 	while (1) {
1342 		struct extent_state *cached_state = NULL;
1343 
1344 		cond_resched();
1345 		if (inode)
1346 			iput(&inode->vfs_inode);
1347 
1348 		if (objectid > max_key->objectid)
1349 			break;
1350 
1351 		inode = btrfs_find_first_inode(root, objectid);
1352 		if (!inode)
1353 			break;
1354 		ino = btrfs_ino(inode);
1355 
1356 		if (ino > max_key->objectid) {
1357 			iput(&inode->vfs_inode);
1358 			break;
1359 		}
1360 
1361 		objectid = ino + 1;
1362 		if (!S_ISREG(inode->vfs_inode.i_mode))
1363 			continue;
1364 
1365 		if (unlikely(min_key->objectid == ino)) {
1366 			if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1367 				continue;
1368 			if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1369 				start = 0;
1370 			else {
1371 				start = min_key->offset;
1372 				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1373 			}
1374 		} else {
1375 			start = 0;
1376 		}
1377 
1378 		if (unlikely(max_key->objectid == ino)) {
1379 			if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1380 				continue;
1381 			if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1382 				end = (u64)-1;
1383 			} else {
1384 				if (max_key->offset == 0)
1385 					continue;
1386 				end = max_key->offset;
1387 				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1388 				end--;
1389 			}
1390 		} else {
1391 			end = (u64)-1;
1392 		}
1393 
1394 		/* the lock_extent waits for read_folio to complete */
1395 		btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
1396 		btrfs_drop_extent_map_range(inode, start, end, true);
1397 		btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
1398 	}
1399 	return 0;
1400 }
1401 
1402 static int find_next_key(struct btrfs_path *path, int level,
1403 			 struct btrfs_key *key)
1404 
1405 {
1406 	while (level < BTRFS_MAX_LEVEL) {
1407 		if (!path->nodes[level])
1408 			break;
1409 		if (path->slots[level] + 1 <
1410 		    btrfs_header_nritems(path->nodes[level])) {
1411 			btrfs_node_key_to_cpu(path->nodes[level], key,
1412 					      path->slots[level] + 1);
1413 			return 0;
1414 		}
1415 		level++;
1416 	}
1417 	return 1;
1418 }
1419 
1420 /*
1421  * Insert current subvolume into reloc_control::dirty_subvol_roots
1422  */
1423 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1424 			       struct reloc_control *rc,
1425 			       struct btrfs_root *root)
1426 {
1427 	struct btrfs_root *reloc_root = root->reloc_root;
1428 	struct btrfs_root_item *reloc_root_item;
1429 	int ret;
1430 
1431 	/* @root must be a subvolume tree root with a valid reloc tree */
1432 	ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
1433 	ASSERT(reloc_root);
1434 
1435 	reloc_root_item = &reloc_root->root_item;
1436 	memset(&reloc_root_item->drop_progress, 0,
1437 		sizeof(reloc_root_item->drop_progress));
1438 	btrfs_set_root_drop_level(reloc_root_item, 0);
1439 	btrfs_set_root_refs(reloc_root_item, 0);
1440 	ret = btrfs_update_reloc_root(trans, root);
1441 	if (ret)
1442 		return ret;
1443 
1444 	if (list_empty(&root->reloc_dirty_list)) {
1445 		btrfs_grab_root(root);
1446 		list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static int clean_dirty_subvols(struct reloc_control *rc)
1453 {
1454 	struct btrfs_root *root;
1455 	struct btrfs_root *next;
1456 	int ret = 0;
1457 	int ret2;
1458 
1459 	list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1460 				 reloc_dirty_list) {
1461 		if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
1462 			/* Merged subvolume, cleanup its reloc root */
1463 			struct btrfs_root *reloc_root = root->reloc_root;
1464 
1465 			list_del_init(&root->reloc_dirty_list);
1466 			root->reloc_root = NULL;
1467 			/*
1468 			 * Need barrier to ensure clear_bit() only happens after
1469 			 * root->reloc_root = NULL. Pairs with have_reloc_root.
1470 			 */
1471 			smp_wmb();
1472 			clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1473 			if (reloc_root) {
1474 				/*
1475 				 * btrfs_drop_snapshot drops our ref we hold for
1476 				 * ->reloc_root.  If it fails however we must
1477 				 * drop the ref ourselves.
1478 				 */
1479 				ret2 = btrfs_drop_snapshot(reloc_root, false, true);
1480 				if (ret2 < 0) {
1481 					btrfs_put_root(reloc_root);
1482 					if (!ret)
1483 						ret = ret2;
1484 				}
1485 			}
1486 			btrfs_put_root(root);
1487 		} else {
1488 			/* Orphan reloc tree, just clean it up */
1489 			ret2 = btrfs_drop_snapshot(root, false, true);
1490 			if (ret2 < 0) {
1491 				btrfs_put_root(root);
1492 				if (!ret)
1493 					ret = ret2;
1494 			}
1495 		}
1496 	}
1497 	return ret;
1498 }
1499 
1500 /*
1501  * merge the relocated tree blocks in reloc tree with corresponding
1502  * fs tree.
1503  */
1504 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1505 					       struct btrfs_root *root)
1506 {
1507 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1508 	struct btrfs_key key;
1509 	struct btrfs_key next_key;
1510 	struct btrfs_trans_handle *trans = NULL;
1511 	struct btrfs_root *reloc_root;
1512 	struct btrfs_root_item *root_item;
1513 	struct btrfs_path *path;
1514 	struct extent_buffer *leaf;
1515 	int reserve_level;
1516 	int level;
1517 	int max_level;
1518 	int replaced = 0;
1519 	int ret = 0;
1520 	u32 min_reserved;
1521 
1522 	path = btrfs_alloc_path();
1523 	if (!path)
1524 		return -ENOMEM;
1525 	path->reada = READA_FORWARD;
1526 
1527 	reloc_root = root->reloc_root;
1528 	root_item = &reloc_root->root_item;
1529 
1530 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1531 		level = btrfs_root_level(root_item);
1532 		refcount_inc(&reloc_root->node->refs);
1533 		path->nodes[level] = reloc_root->node;
1534 		path->slots[level] = 0;
1535 	} else {
1536 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1537 
1538 		level = btrfs_root_drop_level(root_item);
1539 		BUG_ON(level == 0);
1540 		path->lowest_level = level;
1541 		ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1542 		path->lowest_level = 0;
1543 		if (ret < 0) {
1544 			btrfs_free_path(path);
1545 			return ret;
1546 		}
1547 
1548 		btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1549 				      path->slots[level]);
1550 		WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1551 
1552 		btrfs_unlock_up_safe(path, 0);
1553 	}
1554 
1555 	/*
1556 	 * In merge_reloc_root(), we modify the upper level pointer to swap the
1557 	 * tree blocks between reloc tree and subvolume tree.  Thus for tree
1558 	 * block COW, we COW at most from level 1 to root level for each tree.
1559 	 *
1560 	 * Thus the needed metadata size is at most root_level * nodesize,
1561 	 * and * 2 since we have two trees to COW.
1562 	 */
1563 	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1564 	min_reserved = fs_info->nodesize * reserve_level * 2;
1565 	memset(&next_key, 0, sizeof(next_key));
1566 
1567 	while (1) {
1568 		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1569 					     min_reserved,
1570 					     BTRFS_RESERVE_FLUSH_LIMIT);
1571 		if (ret)
1572 			goto out;
1573 		trans = btrfs_start_transaction(root, 0);
1574 		if (IS_ERR(trans)) {
1575 			ret = PTR_ERR(trans);
1576 			trans = NULL;
1577 			goto out;
1578 		}
1579 
1580 		/*
1581 		 * At this point we no longer have a reloc_control, so we can't
1582 		 * depend on btrfs_init_reloc_root to update our last_trans.
1583 		 *
1584 		 * But that's ok, we started the trans handle on our
1585 		 * corresponding fs_root, which means it's been added to the
1586 		 * dirty list.  At commit time we'll still call
1587 		 * btrfs_update_reloc_root() and update our root item
1588 		 * appropriately.
1589 		 */
1590 		btrfs_set_root_last_trans(reloc_root, trans->transid);
1591 		trans->block_rsv = rc->block_rsv;
1592 
1593 		replaced = 0;
1594 		max_level = level;
1595 
1596 		ret = walk_down_reloc_tree(reloc_root, path, &level);
1597 		if (ret < 0)
1598 			goto out;
1599 		if (ret > 0)
1600 			break;
1601 
1602 		if (!find_next_key(path, level, &key) &&
1603 		    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1604 			ret = 0;
1605 		} else {
1606 			ret = replace_path(trans, rc, root, reloc_root, path,
1607 					   &next_key, level, max_level);
1608 		}
1609 		if (ret < 0)
1610 			goto out;
1611 		if (ret > 0) {
1612 			level = ret;
1613 			btrfs_node_key_to_cpu(path->nodes[level], &key,
1614 					      path->slots[level]);
1615 			replaced = 1;
1616 		}
1617 
1618 		ret = walk_up_reloc_tree(reloc_root, path, &level);
1619 		if (ret > 0)
1620 			break;
1621 
1622 		BUG_ON(level == 0);
1623 		/*
1624 		 * save the merging progress in the drop_progress.
1625 		 * this is OK since root refs == 1 in this case.
1626 		 */
1627 		btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1628 			       path->slots[level]);
1629 		btrfs_set_root_drop_level(root_item, level);
1630 
1631 		btrfs_end_transaction_throttle(trans);
1632 		trans = NULL;
1633 
1634 		btrfs_btree_balance_dirty(fs_info);
1635 
1636 		if (replaced && rc->stage == UPDATE_DATA_PTRS)
1637 			invalidate_extent_cache(root, &key, &next_key);
1638 	}
1639 
1640 	/*
1641 	 * handle the case only one block in the fs tree need to be
1642 	 * relocated and the block is tree root.
1643 	 */
1644 	leaf = btrfs_lock_root_node(root);
1645 	ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1646 			      BTRFS_NESTING_COW);
1647 	btrfs_tree_unlock(leaf);
1648 	free_extent_buffer(leaf);
1649 out:
1650 	btrfs_free_path(path);
1651 
1652 	if (ret == 0) {
1653 		ret = insert_dirty_subvol(trans, rc, root);
1654 		if (ret)
1655 			btrfs_abort_transaction(trans, ret);
1656 	}
1657 
1658 	if (trans)
1659 		btrfs_end_transaction_throttle(trans);
1660 
1661 	btrfs_btree_balance_dirty(fs_info);
1662 
1663 	if (replaced && rc->stage == UPDATE_DATA_PTRS)
1664 		invalidate_extent_cache(root, &key, &next_key);
1665 
1666 	return ret;
1667 }
1668 
1669 static noinline_for_stack
1670 int prepare_to_merge(struct reloc_control *rc, int err)
1671 {
1672 	struct btrfs_root *root = rc->extent_root;
1673 	struct btrfs_fs_info *fs_info = root->fs_info;
1674 	struct btrfs_root *reloc_root;
1675 	struct btrfs_trans_handle *trans;
1676 	LIST_HEAD(reloc_roots);
1677 	u64 num_bytes = 0;
1678 	int ret;
1679 
1680 	mutex_lock(&fs_info->reloc_mutex);
1681 	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1682 	rc->merging_rsv_size += rc->nodes_relocated * 2;
1683 	mutex_unlock(&fs_info->reloc_mutex);
1684 
1685 again:
1686 	if (!err) {
1687 		num_bytes = rc->merging_rsv_size;
1688 		ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1689 					  BTRFS_RESERVE_FLUSH_ALL);
1690 		if (ret)
1691 			err = ret;
1692 	}
1693 
1694 	trans = btrfs_join_transaction(rc->extent_root);
1695 	if (IS_ERR(trans)) {
1696 		if (!err)
1697 			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1698 						num_bytes, NULL);
1699 		return PTR_ERR(trans);
1700 	}
1701 
1702 	if (!err) {
1703 		if (num_bytes != rc->merging_rsv_size) {
1704 			btrfs_end_transaction(trans);
1705 			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1706 						num_bytes, NULL);
1707 			goto again;
1708 		}
1709 	}
1710 
1711 	rc->merge_reloc_tree = true;
1712 
1713 	while (!list_empty(&rc->reloc_roots)) {
1714 		reloc_root = list_first_entry(&rc->reloc_roots,
1715 					      struct btrfs_root, root_list);
1716 		list_del_init(&reloc_root->root_list);
1717 
1718 		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1719 				false);
1720 		if (IS_ERR(root)) {
1721 			/*
1722 			 * Even if we have an error we need this reloc root
1723 			 * back on our list so we can clean up properly.
1724 			 */
1725 			list_add(&reloc_root->root_list, &reloc_roots);
1726 			btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1727 			if (!err)
1728 				err = PTR_ERR(root);
1729 			break;
1730 		}
1731 
1732 		if (unlikely(root->reloc_root != reloc_root)) {
1733 			if (root->reloc_root) {
1734 				btrfs_err(fs_info,
1735 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
1736 					  btrfs_root_id(root),
1737 					  btrfs_root_id(root->reloc_root),
1738 					  root->reloc_root->root_key.type,
1739 					  root->reloc_root->root_key.offset,
1740 					  btrfs_root_generation(
1741 						  &root->reloc_root->root_item),
1742 					  btrfs_root_id(reloc_root),
1743 					  reloc_root->root_key.type,
1744 					  reloc_root->root_key.offset,
1745 					  btrfs_root_generation(
1746 						  &reloc_root->root_item));
1747 			} else {
1748 				btrfs_err(fs_info,
1749 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
1750 					  btrfs_root_id(root),
1751 					  btrfs_root_id(reloc_root),
1752 					  reloc_root->root_key.type,
1753 					  reloc_root->root_key.offset,
1754 					  btrfs_root_generation(
1755 						  &reloc_root->root_item));
1756 			}
1757 			list_add(&reloc_root->root_list, &reloc_roots);
1758 			btrfs_put_root(root);
1759 			btrfs_abort_transaction(trans, -EUCLEAN);
1760 			if (!err)
1761 				err = -EUCLEAN;
1762 			break;
1763 		}
1764 
1765 		/*
1766 		 * set reference count to 1, so btrfs_recover_relocation
1767 		 * knows it should resumes merging
1768 		 */
1769 		if (!err)
1770 			btrfs_set_root_refs(&reloc_root->root_item, 1);
1771 		ret = btrfs_update_reloc_root(trans, root);
1772 
1773 		/*
1774 		 * Even if we have an error we need this reloc root back on our
1775 		 * list so we can clean up properly.
1776 		 */
1777 		list_add(&reloc_root->root_list, &reloc_roots);
1778 		btrfs_put_root(root);
1779 
1780 		if (unlikely(ret)) {
1781 			btrfs_abort_transaction(trans, ret);
1782 			if (!err)
1783 				err = ret;
1784 			break;
1785 		}
1786 	}
1787 
1788 	list_splice(&reloc_roots, &rc->reloc_roots);
1789 
1790 	if (!err)
1791 		err = btrfs_commit_transaction(trans);
1792 	else
1793 		btrfs_end_transaction(trans);
1794 	return err;
1795 }
1796 
1797 static noinline_for_stack
1798 void free_reloc_roots(struct list_head *list)
1799 {
1800 	struct btrfs_root *reloc_root, *tmp;
1801 
1802 	list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1803 		__del_reloc_root(reloc_root);
1804 }
1805 
1806 static noinline_for_stack
1807 void merge_reloc_roots(struct reloc_control *rc)
1808 {
1809 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1810 	struct btrfs_root *root;
1811 	struct btrfs_root *reloc_root;
1812 	LIST_HEAD(reloc_roots);
1813 	int found = 0;
1814 	int ret = 0;
1815 again:
1816 	root = rc->extent_root;
1817 
1818 	/*
1819 	 * this serializes us with btrfs_record_root_in_transaction,
1820 	 * we have to make sure nobody is in the middle of
1821 	 * adding their roots to the list while we are
1822 	 * doing this splice
1823 	 */
1824 	mutex_lock(&fs_info->reloc_mutex);
1825 	list_splice_init(&rc->reloc_roots, &reloc_roots);
1826 	mutex_unlock(&fs_info->reloc_mutex);
1827 
1828 	while (!list_empty(&reloc_roots)) {
1829 		found = 1;
1830 		reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
1831 
1832 		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1833 					 false);
1834 		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1835 			if (WARN_ON(IS_ERR(root))) {
1836 				/*
1837 				 * For recovery we read the fs roots on mount,
1838 				 * and if we didn't find the root then we marked
1839 				 * the reloc root as a garbage root.  For normal
1840 				 * relocation obviously the root should exist in
1841 				 * memory.  However there's no reason we can't
1842 				 * handle the error properly here just in case.
1843 				 */
1844 				ret = PTR_ERR(root);
1845 				goto out;
1846 			}
1847 			if (WARN_ON(root->reloc_root != reloc_root)) {
1848 				/*
1849 				 * This can happen if on-disk metadata has some
1850 				 * corruption, e.g. bad reloc tree key offset.
1851 				 */
1852 				ret = -EINVAL;
1853 				goto out;
1854 			}
1855 			ret = merge_reloc_root(rc, root);
1856 			btrfs_put_root(root);
1857 			if (ret) {
1858 				if (list_empty(&reloc_root->root_list))
1859 					list_add_tail(&reloc_root->root_list,
1860 						      &reloc_roots);
1861 				goto out;
1862 			}
1863 		} else {
1864 			if (!IS_ERR(root)) {
1865 				if (root->reloc_root == reloc_root) {
1866 					root->reloc_root = NULL;
1867 					btrfs_put_root(reloc_root);
1868 				}
1869 				clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
1870 					  &root->state);
1871 				btrfs_put_root(root);
1872 			}
1873 
1874 			list_del_init(&reloc_root->root_list);
1875 			/* Don't forget to queue this reloc root for cleanup */
1876 			list_add_tail(&reloc_root->reloc_dirty_list,
1877 				      &rc->dirty_subvol_roots);
1878 		}
1879 	}
1880 
1881 	if (found) {
1882 		found = 0;
1883 		goto again;
1884 	}
1885 out:
1886 	if (ret) {
1887 		btrfs_handle_fs_error(fs_info, ret, NULL);
1888 		free_reloc_roots(&reloc_roots);
1889 
1890 		/* new reloc root may be added */
1891 		mutex_lock(&fs_info->reloc_mutex);
1892 		list_splice_init(&rc->reloc_roots, &reloc_roots);
1893 		mutex_unlock(&fs_info->reloc_mutex);
1894 		free_reloc_roots(&reloc_roots);
1895 	}
1896 
1897 	/*
1898 	 * We used to have
1899 	 *
1900 	 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
1901 	 *
1902 	 * here, but it's wrong.  If we fail to start the transaction in
1903 	 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
1904 	 * have actually been removed from the reloc_root_tree rb tree.  This is
1905 	 * fine because we're bailing here, and we hold a reference on the root
1906 	 * for the list that holds it, so these roots will be cleaned up when we
1907 	 * do the reloc_dirty_list afterwards.  Meanwhile the root->reloc_root
1908 	 * will be cleaned up on unmount.
1909 	 *
1910 	 * The remaining nodes will be cleaned up by free_reloc_control.
1911 	 */
1912 }
1913 
1914 static void free_block_list(struct rb_root *blocks)
1915 {
1916 	struct tree_block *block;
1917 	struct rb_node *rb_node;
1918 	while ((rb_node = rb_first(blocks))) {
1919 		block = rb_entry(rb_node, struct tree_block, rb_node);
1920 		rb_erase(rb_node, blocks);
1921 		kfree(block);
1922 	}
1923 }
1924 
1925 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
1926 				      struct btrfs_root *reloc_root)
1927 {
1928 	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
1929 	struct btrfs_root *root;
1930 	int ret;
1931 
1932 	if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
1933 		return 0;
1934 
1935 	root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
1936 
1937 	/*
1938 	 * This should succeed, since we can't have a reloc root without having
1939 	 * already looked up the actual root and created the reloc root for this
1940 	 * root.
1941 	 *
1942 	 * However if there's some sort of corruption where we have a ref to a
1943 	 * reloc root without a corresponding root this could return ENOENT.
1944 	 */
1945 	if (IS_ERR(root)) {
1946 		DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root));
1947 		return PTR_ERR(root);
1948 	}
1949 	if (unlikely(root->reloc_root != reloc_root)) {
1950 		DEBUG_WARN("unexpected reloc root found");
1951 		btrfs_err(fs_info,
1952 			  "root %llu has two reloc roots associated with it",
1953 			  reloc_root->root_key.offset);
1954 		btrfs_put_root(root);
1955 		return -EUCLEAN;
1956 	}
1957 	ret = btrfs_record_root_in_trans(trans, root);
1958 	btrfs_put_root(root);
1959 
1960 	return ret;
1961 }
1962 
1963 static noinline_for_stack
1964 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
1965 				     struct reloc_control *rc,
1966 				     struct btrfs_backref_node *node,
1967 				     struct btrfs_backref_edge *edges[])
1968 {
1969 	struct btrfs_backref_node *next;
1970 	struct btrfs_root *root;
1971 	int index = 0;
1972 	int ret;
1973 
1974 	next = walk_up_backref(node, edges, &index);
1975 	root = next->root;
1976 
1977 	/*
1978 	 * If there is no root, then our references for this block are
1979 	 * incomplete, as we should be able to walk all the way up to a block
1980 	 * that is owned by a root.
1981 	 *
1982 	 * This path is only for SHAREABLE roots, so if we come upon a
1983 	 * non-SHAREABLE root then we have backrefs that resolve improperly.
1984 	 *
1985 	 * Both of these cases indicate file system corruption, or a bug in the
1986 	 * backref walking code.
1987 	 */
1988 	if (unlikely(!root)) {
1989 		btrfs_err(trans->fs_info,
1990 			  "bytenr %llu doesn't have a backref path ending in a root",
1991 			  node->bytenr);
1992 		return ERR_PTR(-EUCLEAN);
1993 	}
1994 	if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
1995 		btrfs_err(trans->fs_info,
1996 			  "bytenr %llu has multiple refs with one ending in a non-shareable root",
1997 			  node->bytenr);
1998 		return ERR_PTR(-EUCLEAN);
1999 	}
2000 
2001 	if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
2002 		ret = record_reloc_root_in_trans(trans, root);
2003 		if (ret)
2004 			return ERR_PTR(ret);
2005 		goto found;
2006 	}
2007 
2008 	ret = btrfs_record_root_in_trans(trans, root);
2009 	if (ret)
2010 		return ERR_PTR(ret);
2011 	root = root->reloc_root;
2012 
2013 	/*
2014 	 * We could have raced with another thread which failed, so
2015 	 * root->reloc_root may not be set, return ENOENT in this case.
2016 	 */
2017 	if (!root)
2018 		return ERR_PTR(-ENOENT);
2019 
2020 	if (unlikely(next->new_bytenr)) {
2021 		/*
2022 		 * We just created the reloc root, so we shouldn't have
2023 		 * ->new_bytenr set yet. If it is then we have multiple roots
2024 		 *  pointing at the same bytenr which indicates corruption, or
2025 		 *  we've made a mistake in the backref walking code.
2026 		 */
2027 		ASSERT(next->new_bytenr == 0);
2028 		btrfs_err(trans->fs_info,
2029 			  "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2030 			  node->bytenr, next->bytenr);
2031 		return ERR_PTR(-EUCLEAN);
2032 	}
2033 
2034 	next->new_bytenr = root->node->start;
2035 	btrfs_put_root(next->root);
2036 	next->root = btrfs_grab_root(root);
2037 	ASSERT(next->root);
2038 	mark_block_processed(rc, next);
2039 found:
2040 	next = node;
2041 	/* setup backref node path for btrfs_reloc_cow_block */
2042 	while (1) {
2043 		rc->backref_cache.path[next->level] = next;
2044 		if (--index < 0)
2045 			break;
2046 		next = edges[index]->node[UPPER];
2047 	}
2048 	return root;
2049 }
2050 
2051 /*
2052  * Select a tree root for relocation.
2053  *
2054  * Return NULL if the block is not shareable. We should use do_relocation() in
2055  * this case.
2056  *
2057  * Return a tree root pointer if the block is shareable.
2058  * Return -ENOENT if the block is root of reloc tree.
2059  */
2060 static noinline_for_stack
2061 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2062 {
2063 	struct btrfs_backref_node *next;
2064 	struct btrfs_root *root;
2065 	struct btrfs_root *fs_root = NULL;
2066 	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2067 	int index = 0;
2068 
2069 	next = node;
2070 	while (1) {
2071 		cond_resched();
2072 		next = walk_up_backref(next, edges, &index);
2073 		root = next->root;
2074 
2075 		/*
2076 		 * This can occur if we have incomplete extent refs leading all
2077 		 * the way up a particular path, in this case return -EUCLEAN.
2078 		 */
2079 		if (unlikely(!root))
2080 			return ERR_PTR(-EUCLEAN);
2081 
2082 		/* No other choice for non-shareable tree */
2083 		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2084 			return root;
2085 
2086 		if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
2087 			fs_root = root;
2088 
2089 		if (next != node)
2090 			return NULL;
2091 
2092 		next = walk_down_backref(edges, &index);
2093 		if (!next || next->level <= node->level)
2094 			break;
2095 	}
2096 
2097 	if (!fs_root)
2098 		return ERR_PTR(-ENOENT);
2099 	return fs_root;
2100 }
2101 
2102 static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
2103 						  struct btrfs_backref_node *node)
2104 {
2105 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2106 	struct btrfs_backref_node *next = node;
2107 	struct btrfs_backref_edge *edge;
2108 	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2109 	u64 num_bytes = 0;
2110 	int index = 0;
2111 
2112 	BUG_ON(node->processed);
2113 
2114 	while (next) {
2115 		cond_resched();
2116 		while (1) {
2117 			if (next->processed)
2118 				break;
2119 
2120 			num_bytes += fs_info->nodesize;
2121 
2122 			if (list_empty(&next->upper))
2123 				break;
2124 
2125 			edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
2126 						list[LOWER]);
2127 			edges[index++] = edge;
2128 			next = edge->node[UPPER];
2129 		}
2130 		next = walk_down_backref(edges, &index);
2131 	}
2132 	return num_bytes;
2133 }
2134 
2135 static int refill_metadata_space(struct btrfs_trans_handle *trans,
2136 				 struct reloc_control *rc, u64 num_bytes)
2137 {
2138 	struct btrfs_fs_info *fs_info = trans->fs_info;
2139 	int ret;
2140 
2141 	trans->block_rsv = rc->block_rsv;
2142 	rc->reserved_bytes += num_bytes;
2143 
2144 	/*
2145 	 * We are under a transaction here so we can only do limited flushing.
2146 	 * If we get an enospc just kick back -EAGAIN so we know to drop the
2147 	 * transaction and try to refill when we can flush all the things.
2148 	 */
2149 	ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2150 				     BTRFS_RESERVE_FLUSH_LIMIT);
2151 	if (ret) {
2152 		u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2153 
2154 		while (tmp <= rc->reserved_bytes)
2155 			tmp <<= 1;
2156 		/*
2157 		 * only one thread can access block_rsv at this point,
2158 		 * so we don't need hold lock to protect block_rsv.
2159 		 * we expand more reservation size here to allow enough
2160 		 * space for relocation and we will return earlier in
2161 		 * enospc case.
2162 		 */
2163 		rc->block_rsv->size = tmp + fs_info->nodesize *
2164 				      RELOCATION_RESERVED_NODES;
2165 		return -EAGAIN;
2166 	}
2167 
2168 	return 0;
2169 }
2170 
2171 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2172 				  struct reloc_control *rc,
2173 				  struct btrfs_backref_node *node)
2174 {
2175 	u64 num_bytes;
2176 
2177 	num_bytes = calcu_metadata_size(rc, node) * 2;
2178 	return refill_metadata_space(trans, rc, num_bytes);
2179 }
2180 
2181 /*
2182  * relocate a block tree, and then update pointers in upper level
2183  * blocks that reference the block to point to the new location.
2184  *
2185  * if called by link_to_upper, the block has already been relocated.
2186  * in that case this function just updates pointers.
2187  */
2188 static int do_relocation(struct btrfs_trans_handle *trans,
2189 			 struct reloc_control *rc,
2190 			 struct btrfs_backref_node *node,
2191 			 struct btrfs_key *key,
2192 			 struct btrfs_path *path, int lowest)
2193 {
2194 	struct btrfs_backref_node *upper;
2195 	struct btrfs_backref_edge *edge;
2196 	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2197 	struct btrfs_root *root;
2198 	struct extent_buffer *eb;
2199 	u32 blocksize;
2200 	u64 bytenr;
2201 	int slot;
2202 	int ret = 0;
2203 
2204 	/*
2205 	 * If we are lowest then this is the first time we're processing this
2206 	 * block, and thus shouldn't have an eb associated with it yet.
2207 	 */
2208 	ASSERT(!lowest || !node->eb);
2209 
2210 	path->lowest_level = node->level + 1;
2211 	rc->backref_cache.path[node->level] = node;
2212 	list_for_each_entry(edge, &node->upper, list[LOWER]) {
2213 		cond_resched();
2214 
2215 		upper = edge->node[UPPER];
2216 		root = select_reloc_root(trans, rc, upper, edges);
2217 		if (IS_ERR(root)) {
2218 			ret = PTR_ERR(root);
2219 			goto next;
2220 		}
2221 
2222 		if (upper->eb && !upper->locked) {
2223 			if (!lowest) {
2224 				ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2225 				if (ret < 0)
2226 					goto next;
2227 				BUG_ON(ret);
2228 				bytenr = btrfs_node_blockptr(upper->eb, slot);
2229 				if (node->eb->start == bytenr)
2230 					goto next;
2231 			}
2232 			btrfs_backref_drop_node_buffer(upper);
2233 		}
2234 
2235 		if (!upper->eb) {
2236 			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2237 			if (ret) {
2238 				if (ret > 0)
2239 					ret = -ENOENT;
2240 
2241 				btrfs_release_path(path);
2242 				break;
2243 			}
2244 
2245 			if (!upper->eb) {
2246 				upper->eb = path->nodes[upper->level];
2247 				path->nodes[upper->level] = NULL;
2248 			} else {
2249 				BUG_ON(upper->eb != path->nodes[upper->level]);
2250 			}
2251 
2252 			upper->locked = 1;
2253 			path->locks[upper->level] = 0;
2254 
2255 			slot = path->slots[upper->level];
2256 			btrfs_release_path(path);
2257 		} else {
2258 			ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2259 			if (ret < 0)
2260 				goto next;
2261 			BUG_ON(ret);
2262 		}
2263 
2264 		bytenr = btrfs_node_blockptr(upper->eb, slot);
2265 		if (lowest) {
2266 			if (unlikely(bytenr != node->bytenr)) {
2267 				btrfs_err(root->fs_info,
2268 		"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2269 					  bytenr, node->bytenr, slot,
2270 					  upper->eb->start);
2271 				ret = -EIO;
2272 				goto next;
2273 			}
2274 		} else {
2275 			if (node->eb->start == bytenr)
2276 				goto next;
2277 		}
2278 
2279 		blocksize = root->fs_info->nodesize;
2280 		eb = btrfs_read_node_slot(upper->eb, slot);
2281 		if (IS_ERR(eb)) {
2282 			ret = PTR_ERR(eb);
2283 			goto next;
2284 		}
2285 		btrfs_tree_lock(eb);
2286 
2287 		if (!node->eb) {
2288 			ret = btrfs_cow_block(trans, root, eb, upper->eb,
2289 					      slot, &eb, BTRFS_NESTING_COW);
2290 			btrfs_tree_unlock(eb);
2291 			free_extent_buffer(eb);
2292 			if (ret < 0)
2293 				goto next;
2294 			/*
2295 			 * We've just COWed this block, it should have updated
2296 			 * the correct backref node entry.
2297 			 */
2298 			ASSERT(node->eb == eb);
2299 		} else {
2300 			struct btrfs_ref ref = {
2301 				.action = BTRFS_ADD_DELAYED_REF,
2302 				.bytenr = node->eb->start,
2303 				.num_bytes = blocksize,
2304 				.parent = upper->eb->start,
2305 				.owning_root = btrfs_header_owner(upper->eb),
2306 				.ref_root = btrfs_header_owner(upper->eb),
2307 			};
2308 
2309 			btrfs_set_node_blockptr(upper->eb, slot,
2310 						node->eb->start);
2311 			btrfs_set_node_ptr_generation(upper->eb, slot,
2312 						      trans->transid);
2313 			btrfs_mark_buffer_dirty(trans, upper->eb);
2314 
2315 			btrfs_init_tree_ref(&ref, node->level,
2316 					    btrfs_root_id(root), false);
2317 			ret = btrfs_inc_extent_ref(trans, &ref);
2318 			if (!ret)
2319 				ret = btrfs_drop_subtree(trans, root, eb,
2320 							 upper->eb);
2321 			if (unlikely(ret))
2322 				btrfs_abort_transaction(trans, ret);
2323 		}
2324 next:
2325 		if (!upper->pending)
2326 			btrfs_backref_drop_node_buffer(upper);
2327 		else
2328 			btrfs_backref_unlock_node_buffer(upper);
2329 		if (ret)
2330 			break;
2331 	}
2332 
2333 	if (!ret && node->pending) {
2334 		btrfs_backref_drop_node_buffer(node);
2335 		list_del_init(&node->list);
2336 		node->pending = 0;
2337 	}
2338 
2339 	path->lowest_level = 0;
2340 
2341 	/*
2342 	 * We should have allocated all of our space in the block rsv and thus
2343 	 * shouldn't ENOSPC.
2344 	 */
2345 	ASSERT(ret != -ENOSPC);
2346 	return ret;
2347 }
2348 
2349 static int link_to_upper(struct btrfs_trans_handle *trans,
2350 			 struct reloc_control *rc,
2351 			 struct btrfs_backref_node *node,
2352 			 struct btrfs_path *path)
2353 {
2354 	struct btrfs_key key;
2355 
2356 	btrfs_node_key_to_cpu(node->eb, &key, 0);
2357 	return do_relocation(trans, rc, node, &key, path, 0);
2358 }
2359 
2360 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2361 				struct reloc_control *rc,
2362 				struct btrfs_path *path, int err)
2363 {
2364 	LIST_HEAD(list);
2365 	struct btrfs_backref_cache *cache = &rc->backref_cache;
2366 	struct btrfs_backref_node *node;
2367 	int level;
2368 	int ret;
2369 
2370 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2371 		while (!list_empty(&cache->pending[level])) {
2372 			node = list_first_entry(&cache->pending[level],
2373 						struct btrfs_backref_node, list);
2374 			list_move_tail(&node->list, &list);
2375 			BUG_ON(!node->pending);
2376 
2377 			if (!err) {
2378 				ret = link_to_upper(trans, rc, node, path);
2379 				if (ret < 0)
2380 					err = ret;
2381 			}
2382 		}
2383 		list_splice_init(&list, &cache->pending[level]);
2384 	}
2385 	return err;
2386 }
2387 
2388 /*
2389  * mark a block and all blocks directly/indirectly reference the block
2390  * as processed.
2391  */
2392 static void update_processed_blocks(struct reloc_control *rc,
2393 				    struct btrfs_backref_node *node)
2394 {
2395 	struct btrfs_backref_node *next = node;
2396 	struct btrfs_backref_edge *edge;
2397 	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2398 	int index = 0;
2399 
2400 	while (next) {
2401 		cond_resched();
2402 		while (1) {
2403 			if (next->processed)
2404 				break;
2405 
2406 			mark_block_processed(rc, next);
2407 
2408 			if (list_empty(&next->upper))
2409 				break;
2410 
2411 			edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
2412 						list[LOWER]);
2413 			edges[index++] = edge;
2414 			next = edge->node[UPPER];
2415 		}
2416 		next = walk_down_backref(edges, &index);
2417 	}
2418 }
2419 
2420 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2421 {
2422 	u32 blocksize = rc->extent_root->fs_info->nodesize;
2423 
2424 	if (btrfs_test_range_bit(&rc->processed_blocks, bytenr,
2425 				 bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
2426 		return 1;
2427 	return 0;
2428 }
2429 
2430 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2431 			      struct tree_block *block)
2432 {
2433 	struct btrfs_tree_parent_check check = {
2434 		.level = block->level,
2435 		.owner_root = block->owner,
2436 		.transid = block->key.offset
2437 	};
2438 	struct extent_buffer *eb;
2439 
2440 	eb = read_tree_block(fs_info, block->bytenr, &check);
2441 	if (IS_ERR(eb))
2442 		return PTR_ERR(eb);
2443 
2444 	if (block->level == 0)
2445 		btrfs_item_key_to_cpu(eb, &block->key, 0);
2446 	else
2447 		btrfs_node_key_to_cpu(eb, &block->key, 0);
2448 	free_extent_buffer(eb);
2449 	block->key_ready = true;
2450 	return 0;
2451 }
2452 
2453 /*
2454  * helper function to relocate a tree block
2455  */
2456 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2457 				struct reloc_control *rc,
2458 				struct btrfs_backref_node *node,
2459 				struct btrfs_key *key,
2460 				struct btrfs_path *path)
2461 {
2462 	struct btrfs_root *root;
2463 	int ret = 0;
2464 
2465 	if (!node)
2466 		return 0;
2467 
2468 	/*
2469 	 * If we fail here we want to drop our backref_node because we are going
2470 	 * to start over and regenerate the tree for it.
2471 	 */
2472 	ret = reserve_metadata_space(trans, rc, node);
2473 	if (ret)
2474 		goto out;
2475 
2476 	BUG_ON(node->processed);
2477 	root = select_one_root(node);
2478 	if (IS_ERR(root)) {
2479 		ret = PTR_ERR(root);
2480 
2481 		/* See explanation in select_one_root for the -EUCLEAN case. */
2482 		ASSERT(ret == -ENOENT);
2483 		if (ret == -ENOENT) {
2484 			ret = 0;
2485 			update_processed_blocks(rc, node);
2486 		}
2487 		goto out;
2488 	}
2489 
2490 	if (root) {
2491 		if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2492 			/*
2493 			 * This block was the root block of a root, and this is
2494 			 * the first time we're processing the block and thus it
2495 			 * should not have had the ->new_bytenr modified.
2496 			 *
2497 			 * However in the case of corruption we could have
2498 			 * multiple refs pointing to the same block improperly,
2499 			 * and thus we would trip over these checks.  ASSERT()
2500 			 * for the developer case, because it could indicate a
2501 			 * bug in the backref code, however error out for a
2502 			 * normal user in the case of corruption.
2503 			 */
2504 			ASSERT(node->new_bytenr == 0);
2505 			if (unlikely(node->new_bytenr)) {
2506 				btrfs_err(root->fs_info,
2507 				  "bytenr %llu has improper references to it",
2508 					  node->bytenr);
2509 				ret = -EUCLEAN;
2510 				goto out;
2511 			}
2512 			ret = btrfs_record_root_in_trans(trans, root);
2513 			if (ret)
2514 				goto out;
2515 			/*
2516 			 * Another thread could have failed, need to check if we
2517 			 * have reloc_root actually set.
2518 			 */
2519 			if (!root->reloc_root) {
2520 				ret = -ENOENT;
2521 				goto out;
2522 			}
2523 			root = root->reloc_root;
2524 			node->new_bytenr = root->node->start;
2525 			btrfs_put_root(node->root);
2526 			node->root = btrfs_grab_root(root);
2527 			ASSERT(node->root);
2528 		} else {
2529 			btrfs_err(root->fs_info,
2530 				  "bytenr %llu resolved to a non-shareable root",
2531 				  node->bytenr);
2532 			ret = -EUCLEAN;
2533 			goto out;
2534 		}
2535 		if (!ret)
2536 			update_processed_blocks(rc, node);
2537 	} else {
2538 		ret = do_relocation(trans, rc, node, key, path, 1);
2539 	}
2540 out:
2541 	if (ret || node->level == 0)
2542 		btrfs_backref_cleanup_node(&rc->backref_cache, node);
2543 	return ret;
2544 }
2545 
2546 static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
2547 				  struct reloc_control *rc, struct tree_block *block,
2548 				  struct btrfs_path *path)
2549 {
2550 	struct btrfs_fs_info *fs_info = trans->fs_info;
2551 	struct btrfs_root *root;
2552 	u64 num_bytes;
2553 	int nr_levels;
2554 	int ret;
2555 
2556 	root = btrfs_get_fs_root(fs_info, block->owner, true);
2557 	if (IS_ERR(root))
2558 		return PTR_ERR(root);
2559 
2560 	nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
2561 
2562 	num_bytes = fs_info->nodesize * nr_levels;
2563 	ret = refill_metadata_space(trans, rc, num_bytes);
2564 	if (ret) {
2565 		btrfs_put_root(root);
2566 		return ret;
2567 	}
2568 	path->lowest_level = block->level;
2569 	if (root == root->fs_info->chunk_root)
2570 		btrfs_reserve_chunk_metadata(trans, false);
2571 
2572 	ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
2573 	path->lowest_level = 0;
2574 	btrfs_release_path(path);
2575 
2576 	if (root == root->fs_info->chunk_root)
2577 		btrfs_trans_release_chunk_metadata(trans);
2578 	if (ret > 0)
2579 		ret = 0;
2580 	btrfs_put_root(root);
2581 
2582 	return ret;
2583 }
2584 
2585 /*
2586  * relocate a list of blocks
2587  */
2588 static noinline_for_stack
2589 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2590 			 struct reloc_control *rc, struct rb_root *blocks)
2591 {
2592 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2593 	struct btrfs_backref_node *node;
2594 	struct btrfs_path *path;
2595 	struct tree_block *block;
2596 	struct tree_block *next;
2597 	int ret = 0;
2598 
2599 	path = btrfs_alloc_path();
2600 	if (!path) {
2601 		ret = -ENOMEM;
2602 		goto out_free_blocks;
2603 	}
2604 
2605 	/* Kick in readahead for tree blocks with missing keys */
2606 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2607 		if (!block->key_ready)
2608 			btrfs_readahead_tree_block(fs_info, block->bytenr,
2609 						   block->owner, 0,
2610 						   block->level);
2611 	}
2612 
2613 	/* Get first keys */
2614 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2615 		if (!block->key_ready) {
2616 			ret = get_tree_block_key(fs_info, block);
2617 			if (ret)
2618 				goto out_free_path;
2619 		}
2620 	}
2621 
2622 	/* Do tree relocation */
2623 	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2624 		/*
2625 		 * For COWonly blocks, or the data reloc tree, we only need to
2626 		 * COW down to the block, there's no need to generate a backref
2627 		 * tree.
2628 		 */
2629 		if (block->owner &&
2630 		    (!btrfs_is_fstree(block->owner) ||
2631 		     block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
2632 			ret = relocate_cowonly_block(trans, rc, block, path);
2633 			if (ret)
2634 				break;
2635 			continue;
2636 		}
2637 
2638 		node = build_backref_tree(trans, rc, &block->key,
2639 					  block->level, block->bytenr);
2640 		if (IS_ERR(node)) {
2641 			ret = PTR_ERR(node);
2642 			goto out;
2643 		}
2644 
2645 		ret = relocate_tree_block(trans, rc, node, &block->key,
2646 					  path);
2647 		if (ret < 0)
2648 			break;
2649 	}
2650 out:
2651 	ret = finish_pending_nodes(trans, rc, path, ret);
2652 
2653 out_free_path:
2654 	btrfs_free_path(path);
2655 out_free_blocks:
2656 	free_block_list(blocks);
2657 	return ret;
2658 }
2659 
2660 static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
2661 {
2662 	const struct file_extent_cluster *cluster = &rc->cluster;
2663 	struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2664 	u64 alloc_hint = 0;
2665 	u64 start;
2666 	u64 end;
2667 	u64 offset = inode->reloc_block_group_start;
2668 	u64 num_bytes;
2669 	int nr;
2670 	int ret = 0;
2671 	u64 prealloc_start = cluster->start - offset;
2672 	u64 prealloc_end = cluster->end - offset;
2673 	u64 cur_offset = prealloc_start;
2674 
2675 	/*
2676 	 * For blocksize < folio size case (either bs < page size or large folios),
2677 	 * beyond i_size, all blocks are filled with zero.
2678 	 *
2679 	 * If the current cluster covers the above range, btrfs_do_readpage()
2680 	 * will skip the read, and relocate_one_folio() will later writeback
2681 	 * the padding zeros as new data, causing data corruption.
2682 	 *
2683 	 * Here we have to invalidate the cache covering our cluster.
2684 	 */
2685 	ret = filemap_invalidate_inode(&inode->vfs_inode, true, prealloc_start,
2686 				       prealloc_end);
2687 	if (ret < 0)
2688 		return ret;
2689 
2690 	BUG_ON(cluster->start != cluster->boundary[0]);
2691 	ret = btrfs_alloc_data_chunk_ondemand(inode,
2692 					      prealloc_end + 1 - prealloc_start);
2693 	if (ret)
2694 		return ret;
2695 
2696 	btrfs_inode_lock(inode, 0);
2697 	for (nr = 0; nr < cluster->nr; nr++) {
2698 		struct extent_state *cached_state = NULL;
2699 
2700 		start = cluster->boundary[nr] - offset;
2701 		if (nr + 1 < cluster->nr)
2702 			end = cluster->boundary[nr + 1] - 1 - offset;
2703 		else
2704 			end = cluster->end - offset;
2705 
2706 		btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
2707 		num_bytes = end + 1 - start;
2708 		ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2709 						num_bytes, num_bytes,
2710 						end + 1, &alloc_hint);
2711 		cur_offset = end + 1;
2712 		btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
2713 		if (ret)
2714 			break;
2715 	}
2716 	btrfs_inode_unlock(inode, 0);
2717 
2718 	if (cur_offset < prealloc_end)
2719 		btrfs_free_reserved_data_space_noquota(inode,
2720 						       prealloc_end + 1 - cur_offset);
2721 	return ret;
2722 }
2723 
2724 static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
2725 {
2726 	struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2727 	struct extent_map *em;
2728 	struct extent_state *cached_state = NULL;
2729 	u64 offset = inode->reloc_block_group_start;
2730 	u64 start = rc->cluster.start - offset;
2731 	u64 end = rc->cluster.end - offset;
2732 	int ret = 0;
2733 
2734 	em = btrfs_alloc_extent_map();
2735 	if (!em)
2736 		return -ENOMEM;
2737 
2738 	em->start = start;
2739 	em->len = end + 1 - start;
2740 	em->disk_bytenr = rc->cluster.start;
2741 	em->disk_num_bytes = em->len;
2742 	em->ram_bytes = em->len;
2743 	em->flags |= EXTENT_FLAG_PINNED;
2744 
2745 	btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
2746 	ret = btrfs_replace_extent_map_range(inode, em, false);
2747 	btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
2748 	btrfs_free_extent_map(em);
2749 
2750 	return ret;
2751 }
2752 
2753 /*
2754  * Allow error injection to test balance/relocation cancellation
2755  */
2756 noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
2757 {
2758 	return atomic_read(&fs_info->balance_cancel_req) ||
2759 		atomic_read(&fs_info->reloc_cancel_req) ||
2760 		fatal_signal_pending(current);
2761 }
2762 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2763 
2764 static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
2765 				    int cluster_nr)
2766 {
2767 	/* Last extent, use cluster end directly */
2768 	if (cluster_nr >= cluster->nr - 1)
2769 		return cluster->end;
2770 
2771 	/* Use next boundary start*/
2772 	return cluster->boundary[cluster_nr + 1] - 1;
2773 }
2774 
2775 static int relocate_one_folio(struct reloc_control *rc,
2776 			      struct file_ra_state *ra,
2777 			      int *cluster_nr, u64 *file_offset_ret)
2778 {
2779 	const struct file_extent_cluster *cluster = &rc->cluster;
2780 	struct inode *inode = rc->data_inode;
2781 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2782 	const u64 orig_file_offset = *file_offset_ret;
2783 	u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2784 	const pgoff_t last_index = (cluster->end - offset) >> PAGE_SHIFT;
2785 	const pgoff_t index = orig_file_offset >> PAGE_SHIFT;
2786 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2787 	struct folio *folio;
2788 	u64 folio_start;
2789 	u64 folio_end;
2790 	u64 cur;
2791 	int ret;
2792 	const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
2793 
2794 	ASSERT(index <= last_index);
2795 again:
2796 	folio = filemap_lock_folio(inode->i_mapping, index);
2797 	if (IS_ERR(folio)) {
2798 
2799 		/*
2800 		 * On relocation we're doing readahead on the relocation inode,
2801 		 * but if the filesystem is backed by a RAID stripe tree we can
2802 		 * get ENOENT (e.g. due to preallocated extents not being
2803 		 * mapped in the RST) from the lookup.
2804 		 *
2805 		 * But readahead doesn't handle the error and submits invalid
2806 		 * reads to the device, causing a assertion failures.
2807 		 */
2808 		if (!use_rst)
2809 			page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2810 						  index, last_index + 1 - index);
2811 		folio = __filemap_get_folio(inode->i_mapping, index,
2812 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
2813 					    mask);
2814 		if (IS_ERR(folio))
2815 			return PTR_ERR(folio);
2816 	}
2817 
2818 	if (folio_test_readahead(folio) && !use_rst)
2819 		page_cache_async_readahead(inode->i_mapping, ra, NULL,
2820 					   folio, last_index + 1 - index);
2821 
2822 	if (!folio_test_uptodate(folio)) {
2823 		btrfs_read_folio(NULL, folio);
2824 		folio_lock(folio);
2825 		if (unlikely(!folio_test_uptodate(folio))) {
2826 			ret = -EIO;
2827 			goto release_folio;
2828 		}
2829 		if (folio->mapping != inode->i_mapping) {
2830 			folio_unlock(folio);
2831 			folio_put(folio);
2832 			goto again;
2833 		}
2834 	}
2835 
2836 	/*
2837 	 * We could have lost folio private when we dropped the lock to read the
2838 	 * folio above, make sure we set_folio_extent_mapped() here so we have any
2839 	 * of the subpage blocksize stuff we need in place.
2840 	 */
2841 	ret = set_folio_extent_mapped(folio);
2842 	if (ret < 0)
2843 		goto release_folio;
2844 
2845 	folio_start = folio_pos(folio);
2846 	folio_end = folio_start + folio_size(folio) - 1;
2847 
2848 	/*
2849 	 * Start from the cluster, as for subpage case, the cluster can start
2850 	 * inside the folio.
2851 	 */
2852 	cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
2853 	while (cur <= folio_end) {
2854 		struct extent_state *cached_state = NULL;
2855 		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
2856 		u64 extent_end = get_cluster_boundary_end(cluster,
2857 						*cluster_nr) - offset;
2858 		u64 clamped_start = max(folio_start, extent_start);
2859 		u64 clamped_end = min(folio_end, extent_end);
2860 		u32 clamped_len = clamped_end + 1 - clamped_start;
2861 
2862 		/* Reserve metadata for this range */
2863 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2864 						      clamped_len, clamped_len,
2865 						      false);
2866 		if (ret)
2867 			goto release_folio;
2868 
2869 		/* Mark the range delalloc and dirty for later writeback */
2870 		btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
2871 				  clamped_end, &cached_state);
2872 		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
2873 						clamped_end, 0, &cached_state);
2874 		if (ret) {
2875 			btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
2876 					       clamped_start, clamped_end,
2877 					       EXTENT_LOCKED | EXTENT_BOUNDARY,
2878 					       &cached_state);
2879 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
2880 							clamped_len, true);
2881 			btrfs_delalloc_release_extents(BTRFS_I(inode),
2882 						       clamped_len);
2883 			goto release_folio;
2884 		}
2885 		btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
2886 
2887 		/*
2888 		 * Set the boundary if it's inside the folio.
2889 		 * Data relocation requires the destination extents to have the
2890 		 * same size as the source.
2891 		 * EXTENT_BOUNDARY bit prevents current extent from being merged
2892 		 * with previous extent.
2893 		 */
2894 		if (in_range(cluster->boundary[*cluster_nr] - offset,
2895 			     folio_start, folio_size(folio))) {
2896 			u64 boundary_start = cluster->boundary[*cluster_nr] -
2897 						offset;
2898 			u64 boundary_end = boundary_start +
2899 					   fs_info->sectorsize - 1;
2900 
2901 			btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree,
2902 					     boundary_start, boundary_end,
2903 					     EXTENT_BOUNDARY, NULL);
2904 		}
2905 		btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
2906 				    &cached_state);
2907 		btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
2908 		cur += clamped_len;
2909 
2910 		/* Crossed extent end, go to next extent */
2911 		if (cur >= extent_end) {
2912 			(*cluster_nr)++;
2913 			/* Just finished the last extent of the cluster, exit. */
2914 			if (*cluster_nr >= cluster->nr)
2915 				break;
2916 		}
2917 	}
2918 	folio_unlock(folio);
2919 	folio_put(folio);
2920 
2921 	balance_dirty_pages_ratelimited(inode->i_mapping);
2922 	btrfs_throttle(fs_info);
2923 	if (btrfs_should_cancel_balance(fs_info))
2924 		ret = -ECANCELED;
2925 	*file_offset_ret = folio_end + 1;
2926 	return ret;
2927 
2928 release_folio:
2929 	folio_unlock(folio);
2930 	folio_put(folio);
2931 	return ret;
2932 }
2933 
2934 static int relocate_file_extent_cluster(struct reloc_control *rc)
2935 {
2936 	struct inode *inode = rc->data_inode;
2937 	const struct file_extent_cluster *cluster = &rc->cluster;
2938 	u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2939 	u64 cur_file_offset = cluster->start - offset;
2940 	struct file_ra_state AUTO_KFREE(ra);
2941 	int cluster_nr = 0;
2942 	int ret = 0;
2943 
2944 	if (!cluster->nr)
2945 		return 0;
2946 
2947 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
2948 	if (!ra)
2949 		return -ENOMEM;
2950 
2951 	ret = prealloc_file_extent_cluster(rc);
2952 	if (ret)
2953 		return ret;
2954 
2955 	file_ra_state_init(ra, inode->i_mapping);
2956 
2957 	ret = setup_relocation_extent_mapping(rc);
2958 	if (ret)
2959 		return ret;
2960 
2961 	while (cur_file_offset < cluster->end - offset) {
2962 		ret = relocate_one_folio(rc, ra, &cluster_nr, &cur_file_offset);
2963 		if (ret)
2964 			break;
2965 	}
2966 	if (ret == 0)
2967 		WARN_ON(cluster_nr != cluster->nr);
2968 	return ret;
2969 }
2970 
2971 static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
2972 					   const struct btrfs_key *extent_key)
2973 {
2974 	struct inode *inode = rc->data_inode;
2975 	struct file_extent_cluster *cluster = &rc->cluster;
2976 	int ret;
2977 	struct btrfs_root *root = BTRFS_I(inode)->root;
2978 
2979 	if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
2980 		ret = relocate_file_extent_cluster(rc);
2981 		if (ret)
2982 			return ret;
2983 		cluster->nr = 0;
2984 	}
2985 
2986 	/*
2987 	 * Under simple quotas, we set root->relocation_src_root when we find
2988 	 * the extent. If adjacent extents have different owners, we can't merge
2989 	 * them while relocating. Handle this by storing the owning root that
2990 	 * started a cluster and if we see an extent from a different root break
2991 	 * cluster formation (just like the above case of non-adjacent extents).
2992 	 *
2993 	 * Without simple quotas, relocation_src_root is always 0, so we should
2994 	 * never see a mismatch, and it should have no effect on relocation
2995 	 * clusters.
2996 	 */
2997 	if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
2998 		u64 tmp = root->relocation_src_root;
2999 
3000 		/*
3001 		 * root->relocation_src_root is the state that actually affects
3002 		 * the preallocation we do here, so set it to the root owning
3003 		 * the cluster we need to relocate.
3004 		 */
3005 		root->relocation_src_root = cluster->owning_root;
3006 		ret = relocate_file_extent_cluster(rc);
3007 		if (ret)
3008 			return ret;
3009 		cluster->nr = 0;
3010 		/* And reset it back for the current extent's owning root. */
3011 		root->relocation_src_root = tmp;
3012 	}
3013 
3014 	if (!cluster->nr) {
3015 		cluster->start = extent_key->objectid;
3016 		cluster->owning_root = root->relocation_src_root;
3017 	}
3018 	else
3019 		BUG_ON(cluster->nr >= MAX_EXTENTS);
3020 	cluster->end = extent_key->objectid + extent_key->offset - 1;
3021 	cluster->boundary[cluster->nr] = extent_key->objectid;
3022 	cluster->nr++;
3023 
3024 	if (cluster->nr >= MAX_EXTENTS) {
3025 		ret = relocate_file_extent_cluster(rc);
3026 		if (ret)
3027 			return ret;
3028 		cluster->nr = 0;
3029 	}
3030 	return 0;
3031 }
3032 
3033 /*
3034  * helper to add a tree block to the list.
3035  * the major work is getting the generation and level of the block
3036  */
3037 static int add_tree_block(struct reloc_control *rc,
3038 			  const struct btrfs_key *extent_key,
3039 			  struct btrfs_path *path,
3040 			  struct rb_root *blocks)
3041 {
3042 	struct extent_buffer *eb;
3043 	struct btrfs_extent_item *ei;
3044 	struct btrfs_tree_block_info *bi;
3045 	struct tree_block *block;
3046 	struct rb_node *rb_node;
3047 	u32 item_size;
3048 	int level = -1;
3049 	u64 generation;
3050 	u64 owner = 0;
3051 
3052 	eb =  path->nodes[0];
3053 	item_size = btrfs_item_size(eb, path->slots[0]);
3054 
3055 	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3056 	    item_size >= sizeof(*ei) + sizeof(*bi)) {
3057 		unsigned long ptr = 0, end;
3058 
3059 		ei = btrfs_item_ptr(eb, path->slots[0],
3060 				struct btrfs_extent_item);
3061 		end = (unsigned long)ei + item_size;
3062 		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3063 			bi = (struct btrfs_tree_block_info *)(ei + 1);
3064 			level = btrfs_tree_block_level(eb, bi);
3065 			ptr = (unsigned long)(bi + 1);
3066 		} else {
3067 			level = (int)extent_key->offset;
3068 			ptr = (unsigned long)(ei + 1);
3069 		}
3070 		generation = btrfs_extent_generation(eb, ei);
3071 
3072 		/*
3073 		 * We're reading random blocks without knowing their owner ahead
3074 		 * of time.  This is ok most of the time, as all reloc roots and
3075 		 * fs roots have the same lock type.  However normal trees do
3076 		 * not, and the only way to know ahead of time is to read the
3077 		 * inline ref offset.  We know it's an fs root if
3078 		 *
3079 		 * 1. There's more than one ref.
3080 		 * 2. There's a SHARED_DATA_REF_KEY set.
3081 		 * 3. FULL_BACKREF is set on the flags.
3082 		 *
3083 		 * Otherwise it's safe to assume that the ref offset == the
3084 		 * owner of this block, so we can use that when calling
3085 		 * read_tree_block.
3086 		 */
3087 		if (btrfs_extent_refs(eb, ei) == 1 &&
3088 		    !(btrfs_extent_flags(eb, ei) &
3089 		      BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3090 		    ptr < end) {
3091 			struct btrfs_extent_inline_ref *iref;
3092 			int type;
3093 
3094 			iref = (struct btrfs_extent_inline_ref *)ptr;
3095 			type = btrfs_get_extent_inline_ref_type(eb, iref,
3096 							BTRFS_REF_TYPE_BLOCK);
3097 			if (type == BTRFS_REF_TYPE_INVALID)
3098 				return -EINVAL;
3099 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
3100 				owner = btrfs_extent_inline_ref_offset(eb, iref);
3101 		}
3102 	} else {
3103 		btrfs_print_leaf(eb);
3104 		btrfs_err(rc->block_group->fs_info,
3105 			  "unrecognized tree backref at tree block %llu slot %u",
3106 			  eb->start, path->slots[0]);
3107 		btrfs_release_path(path);
3108 		return -EUCLEAN;
3109 	}
3110 
3111 	btrfs_release_path(path);
3112 
3113 	BUG_ON(level == -1);
3114 
3115 	block = kmalloc_obj(*block, GFP_NOFS);
3116 	if (!block)
3117 		return -ENOMEM;
3118 
3119 	block->bytenr = extent_key->objectid;
3120 	block->key.objectid = rc->extent_root->fs_info->nodesize;
3121 	block->key.offset = generation;
3122 	block->level = level;
3123 	block->key_ready = false;
3124 	block->owner = owner;
3125 
3126 	rb_node = rb_simple_insert(blocks, &block->simple_node);
3127 	if (rb_node)
3128 		btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3129 				    -EEXIST);
3130 
3131 	return 0;
3132 }
3133 
3134 /*
3135  * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3136  */
3137 static int __add_tree_block(struct reloc_control *rc,
3138 			    u64 bytenr, u32 blocksize,
3139 			    struct rb_root *blocks)
3140 {
3141 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3142 	BTRFS_PATH_AUTO_FREE(path);
3143 	struct btrfs_key key;
3144 	int ret;
3145 	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3146 
3147 	if (tree_block_processed(bytenr, rc))
3148 		return 0;
3149 
3150 	if (rb_simple_search(blocks, bytenr))
3151 		return 0;
3152 
3153 	path = btrfs_alloc_path();
3154 	if (!path)
3155 		return -ENOMEM;
3156 again:
3157 	key.objectid = bytenr;
3158 	if (skinny) {
3159 		key.type = BTRFS_METADATA_ITEM_KEY;
3160 		key.offset = (u64)-1;
3161 	} else {
3162 		key.type = BTRFS_EXTENT_ITEM_KEY;
3163 		key.offset = blocksize;
3164 	}
3165 
3166 	path->search_commit_root = true;
3167 	path->skip_locking = true;
3168 	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3169 	if (ret < 0)
3170 		return ret;
3171 
3172 	if (ret > 0 && skinny) {
3173 		if (path->slots[0]) {
3174 			path->slots[0]--;
3175 			btrfs_item_key_to_cpu(path->nodes[0], &key,
3176 					      path->slots[0]);
3177 			if (key.objectid == bytenr &&
3178 			    (key.type == BTRFS_METADATA_ITEM_KEY ||
3179 			     (key.type == BTRFS_EXTENT_ITEM_KEY &&
3180 			      key.offset == blocksize)))
3181 				ret = 0;
3182 		}
3183 
3184 		if (ret) {
3185 			skinny = false;
3186 			btrfs_release_path(path);
3187 			goto again;
3188 		}
3189 	}
3190 	if (ret) {
3191 		ASSERT(ret == 1);
3192 		btrfs_print_leaf(path->nodes[0]);
3193 		btrfs_err(fs_info,
3194 	     "tree block extent item (%llu) is not found in extent tree",
3195 		     bytenr);
3196 		WARN_ON(1);
3197 		return -EINVAL;
3198 	}
3199 
3200 	return add_tree_block(rc, &key, path, blocks);
3201 }
3202 
3203 static int delete_block_group_cache(struct btrfs_block_group *block_group,
3204 				    struct inode *inode,
3205 				    u64 ino)
3206 {
3207 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3208 	struct btrfs_root *root = fs_info->tree_root;
3209 	struct btrfs_trans_handle *trans;
3210 	struct btrfs_inode *btrfs_inode;
3211 	int ret = 0;
3212 
3213 	if (inode)
3214 		goto truncate;
3215 
3216 	btrfs_inode = btrfs_iget(ino, root);
3217 	if (IS_ERR(btrfs_inode))
3218 		return -ENOENT;
3219 	inode = &btrfs_inode->vfs_inode;
3220 
3221 truncate:
3222 	ret = btrfs_check_trunc_cache_free_space(fs_info,
3223 						 &fs_info->global_block_rsv);
3224 	if (ret)
3225 		goto out;
3226 
3227 	trans = btrfs_join_transaction(root);
3228 	if (IS_ERR(trans)) {
3229 		ret = PTR_ERR(trans);
3230 		goto out;
3231 	}
3232 
3233 	ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3234 
3235 	btrfs_end_transaction(trans);
3236 	btrfs_btree_balance_dirty(fs_info);
3237 out:
3238 	iput(inode);
3239 	return ret;
3240 }
3241 
3242 /*
3243  * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3244  * cache inode, to avoid free space cache data extent blocking data relocation.
3245  */
3246 static int delete_v1_space_cache(struct extent_buffer *leaf,
3247 				 struct btrfs_block_group *block_group,
3248 				 u64 data_bytenr)
3249 {
3250 	u64 space_cache_ino;
3251 	struct btrfs_file_extent_item *ei;
3252 	struct btrfs_key key;
3253 	bool found = false;
3254 	int i;
3255 
3256 	if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3257 		return 0;
3258 
3259 	for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3260 		u8 type;
3261 
3262 		btrfs_item_key_to_cpu(leaf, &key, i);
3263 		if (key.type != BTRFS_EXTENT_DATA_KEY)
3264 			continue;
3265 		ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3266 		type = btrfs_file_extent_type(leaf, ei);
3267 
3268 		if ((type == BTRFS_FILE_EXTENT_REG ||
3269 		     type == BTRFS_FILE_EXTENT_PREALLOC) &&
3270 		    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3271 			found = true;
3272 			space_cache_ino = key.objectid;
3273 			break;
3274 		}
3275 	}
3276 	if (!found)
3277 		return -ENOENT;
3278 
3279 	return delete_block_group_cache(block_group, NULL, space_cache_ino);
3280 }
3281 
3282 /*
3283  * helper to find all tree blocks that reference a given data extent
3284  */
3285 static noinline_for_stack int add_data_references(struct reloc_control *rc,
3286 						  const struct btrfs_key *extent_key,
3287 						  struct btrfs_path *path,
3288 						  struct rb_root *blocks)
3289 {
3290 	struct btrfs_backref_walk_ctx ctx = { 0 };
3291 	struct ulist_iterator leaf_uiter;
3292 	struct ulist_node *ref_node = NULL;
3293 	const u32 blocksize = rc->extent_root->fs_info->nodesize;
3294 	int ret = 0;
3295 
3296 	btrfs_release_path(path);
3297 
3298 	ctx.bytenr = extent_key->objectid;
3299 	ctx.skip_inode_ref_list = true;
3300 	ctx.fs_info = rc->extent_root->fs_info;
3301 
3302 	ret = btrfs_find_all_leafs(&ctx);
3303 	if (ret < 0)
3304 		return ret;
3305 
3306 	ULIST_ITER_INIT(&leaf_uiter);
3307 	while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3308 		struct btrfs_tree_parent_check check = { 0 };
3309 		struct extent_buffer *eb;
3310 
3311 		eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3312 		if (IS_ERR(eb)) {
3313 			ret = PTR_ERR(eb);
3314 			break;
3315 		}
3316 		ret = delete_v1_space_cache(eb, rc->block_group,
3317 					    extent_key->objectid);
3318 		free_extent_buffer(eb);
3319 		if (ret < 0)
3320 			break;
3321 		ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3322 		if (ret < 0)
3323 			break;
3324 	}
3325 	if (ret < 0)
3326 		free_block_list(blocks);
3327 	ulist_free(ctx.refs);
3328 	return ret;
3329 }
3330 
3331 /*
3332  * helper to find next unprocessed extent
3333  */
3334 static noinline_for_stack
3335 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3336 		     struct btrfs_key *extent_key)
3337 {
3338 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3339 	struct btrfs_key key;
3340 	struct extent_buffer *leaf;
3341 	u64 start, end, last;
3342 	int ret;
3343 
3344 	last = rc->block_group->start + rc->block_group->length;
3345 	while (1) {
3346 		bool block_found;
3347 
3348 		cond_resched();
3349 		if (rc->search_start >= last) {
3350 			ret = 1;
3351 			break;
3352 		}
3353 
3354 		key.objectid = rc->search_start;
3355 		key.type = BTRFS_EXTENT_ITEM_KEY;
3356 		key.offset = 0;
3357 
3358 		path->search_commit_root = true;
3359 		path->skip_locking = true;
3360 		ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3361 					0, 0);
3362 		if (ret < 0)
3363 			break;
3364 next:
3365 		leaf = path->nodes[0];
3366 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3367 			ret = btrfs_next_leaf(rc->extent_root, path);
3368 			if (ret != 0)
3369 				break;
3370 			leaf = path->nodes[0];
3371 		}
3372 
3373 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3374 		if (key.objectid >= last) {
3375 			ret = 1;
3376 			break;
3377 		}
3378 
3379 		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3380 		    key.type != BTRFS_METADATA_ITEM_KEY) {
3381 			path->slots[0]++;
3382 			goto next;
3383 		}
3384 
3385 		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3386 		    key.objectid + key.offset <= rc->search_start) {
3387 			path->slots[0]++;
3388 			goto next;
3389 		}
3390 
3391 		if (key.type == BTRFS_METADATA_ITEM_KEY &&
3392 		    key.objectid + fs_info->nodesize <=
3393 		    rc->search_start) {
3394 			path->slots[0]++;
3395 			goto next;
3396 		}
3397 
3398 		block_found = btrfs_find_first_extent_bit(&rc->processed_blocks,
3399 							  key.objectid, &start, &end,
3400 							  EXTENT_DIRTY, NULL);
3401 
3402 		if (block_found && start <= key.objectid) {
3403 			btrfs_release_path(path);
3404 			rc->search_start = end + 1;
3405 		} else {
3406 			if (key.type == BTRFS_EXTENT_ITEM_KEY)
3407 				rc->search_start = key.objectid + key.offset;
3408 			else
3409 				rc->search_start = key.objectid +
3410 					fs_info->nodesize;
3411 			memcpy(extent_key, &key, sizeof(key));
3412 			return 0;
3413 		}
3414 	}
3415 	btrfs_release_path(path);
3416 	return ret;
3417 }
3418 
3419 static void set_reloc_control(struct reloc_control *rc)
3420 {
3421 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3422 
3423 	mutex_lock(&fs_info->reloc_mutex);
3424 	fs_info->reloc_ctl = rc;
3425 	mutex_unlock(&fs_info->reloc_mutex);
3426 }
3427 
3428 static void unset_reloc_control(struct reloc_control *rc)
3429 {
3430 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3431 
3432 	mutex_lock(&fs_info->reloc_mutex);
3433 	fs_info->reloc_ctl = NULL;
3434 	mutex_unlock(&fs_info->reloc_mutex);
3435 }
3436 
3437 static noinline_for_stack
3438 int prepare_to_relocate(struct reloc_control *rc)
3439 {
3440 	struct btrfs_trans_handle *trans;
3441 	int ret;
3442 
3443 	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3444 					      BTRFS_BLOCK_RSV_TEMP);
3445 	if (!rc->block_rsv)
3446 		return -ENOMEM;
3447 
3448 	memset(&rc->cluster, 0, sizeof(rc->cluster));
3449 	rc->search_start = rc->block_group->start;
3450 	rc->extents_found = 0;
3451 	rc->nodes_relocated = 0;
3452 	rc->merging_rsv_size = 0;
3453 	rc->reserved_bytes = 0;
3454 	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3455 			      RELOCATION_RESERVED_NODES;
3456 	ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3457 				     rc->block_rsv, rc->block_rsv->size,
3458 				     BTRFS_RESERVE_FLUSH_ALL);
3459 	if (ret)
3460 		return ret;
3461 
3462 	rc->create_reloc_tree = true;
3463 	set_reloc_control(rc);
3464 
3465 	trans = btrfs_join_transaction(rc->extent_root);
3466 	if (IS_ERR(trans)) {
3467 		unset_reloc_control(rc);
3468 		/*
3469 		 * extent tree is not a ref_cow tree and has no reloc_root to
3470 		 * cleanup.  And callers are responsible to free the above
3471 		 * block rsv.
3472 		 */
3473 		return PTR_ERR(trans);
3474 	}
3475 
3476 	ret = btrfs_commit_transaction(trans);
3477 	if (ret)
3478 		unset_reloc_control(rc);
3479 
3480 	return ret;
3481 }
3482 
3483 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3484 {
3485 	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3486 	struct rb_root blocks = RB_ROOT;
3487 	struct btrfs_key key;
3488 	struct btrfs_trans_handle *trans = NULL;
3489 	BTRFS_PATH_AUTO_FREE(path);
3490 	struct btrfs_extent_item *ei;
3491 	u64 flags;
3492 	int ret;
3493 	int err = 0;
3494 	int progress = 0;
3495 
3496 	path = btrfs_alloc_path();
3497 	if (!path)
3498 		return -ENOMEM;
3499 	path->reada = READA_FORWARD;
3500 
3501 	ret = prepare_to_relocate(rc);
3502 	if (ret) {
3503 		err = ret;
3504 		goto out_free;
3505 	}
3506 
3507 	while (1) {
3508 		rc->reserved_bytes = 0;
3509 		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3510 					     rc->block_rsv->size,
3511 					     BTRFS_RESERVE_FLUSH_ALL);
3512 		if (ret) {
3513 			err = ret;
3514 			break;
3515 		}
3516 		progress++;
3517 		trans = btrfs_start_transaction(rc->extent_root, 0);
3518 		if (IS_ERR(trans)) {
3519 			err = PTR_ERR(trans);
3520 			trans = NULL;
3521 			break;
3522 		}
3523 restart:
3524 		if (rc->backref_cache.last_trans != trans->transid)
3525 			btrfs_backref_release_cache(&rc->backref_cache);
3526 		rc->backref_cache.last_trans = trans->transid;
3527 
3528 		ret = find_next_extent(rc, path, &key);
3529 		if (ret < 0)
3530 			err = ret;
3531 		if (ret != 0)
3532 			break;
3533 
3534 		rc->extents_found++;
3535 
3536 		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3537 				    struct btrfs_extent_item);
3538 		flags = btrfs_extent_flags(path->nodes[0], ei);
3539 
3540 		/*
3541 		 * If we are relocating a simple quota owned extent item, we
3542 		 * need to note the owner on the reloc data root so that when
3543 		 * we allocate the replacement item, we can attribute it to the
3544 		 * correct eventual owner (rather than the reloc data root).
3545 		 */
3546 		if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3547 			struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
3548 			u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
3549 								 path->nodes[0],
3550 								 path->slots[0]);
3551 
3552 			root->relocation_src_root = owning_root_id;
3553 		}
3554 
3555 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3556 			ret = add_tree_block(rc, &key, path, &blocks);
3557 		} else if (rc->stage == UPDATE_DATA_PTRS &&
3558 			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
3559 			ret = add_data_references(rc, &key, path, &blocks);
3560 		} else {
3561 			btrfs_release_path(path);
3562 			ret = 0;
3563 		}
3564 		if (ret < 0) {
3565 			err = ret;
3566 			break;
3567 		}
3568 
3569 		if (!RB_EMPTY_ROOT(&blocks)) {
3570 			ret = relocate_tree_blocks(trans, rc, &blocks);
3571 			if (ret < 0) {
3572 				if (ret != -EAGAIN) {
3573 					err = ret;
3574 					break;
3575 				}
3576 				rc->extents_found--;
3577 				rc->search_start = key.objectid;
3578 			}
3579 		}
3580 
3581 		btrfs_end_transaction_throttle(trans);
3582 		btrfs_btree_balance_dirty(fs_info);
3583 		trans = NULL;
3584 
3585 		if (rc->stage == MOVE_DATA_EXTENTS &&
3586 		    (flags & BTRFS_EXTENT_FLAG_DATA)) {
3587 			rc->found_file_extent = true;
3588 			ret = relocate_data_extent(rc, &key);
3589 			if (ret < 0) {
3590 				err = ret;
3591 				break;
3592 			}
3593 		}
3594 		if (btrfs_should_cancel_balance(fs_info)) {
3595 			err = -ECANCELED;
3596 			break;
3597 		}
3598 	}
3599 	if (trans && progress && err == -ENOSPC) {
3600 		ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3601 		if (ret == 1) {
3602 			err = 0;
3603 			progress = 0;
3604 			goto restart;
3605 		}
3606 	}
3607 
3608 	btrfs_release_path(path);
3609 	btrfs_clear_extent_bit(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, NULL);
3610 
3611 	if (trans) {
3612 		btrfs_end_transaction_throttle(trans);
3613 		btrfs_btree_balance_dirty(fs_info);
3614 	}
3615 
3616 	if (!err && !btrfs_fs_incompat(fs_info, REMAP_TREE)) {
3617 		ret = relocate_file_extent_cluster(rc);
3618 		if (ret < 0)
3619 			err = ret;
3620 	}
3621 
3622 	rc->create_reloc_tree = false;
3623 	set_reloc_control(rc);
3624 
3625 	btrfs_backref_release_cache(&rc->backref_cache);
3626 	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3627 
3628 	/*
3629 	 * Even in the case when the relocation is cancelled, we should all go
3630 	 * through prepare_to_merge() and merge_reloc_roots().
3631 	 *
3632 	 * For error (including cancelled balance), prepare_to_merge() will
3633 	 * mark all reloc trees orphan, then queue them for cleanup in
3634 	 * merge_reloc_roots()
3635 	 */
3636 	err = prepare_to_merge(rc, err);
3637 
3638 	merge_reloc_roots(rc);
3639 
3640 	rc->merge_reloc_tree = false;
3641 	unset_reloc_control(rc);
3642 	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3643 
3644 	/* get rid of pinned extents */
3645 	ret = btrfs_commit_current_transaction(rc->extent_root);
3646 	if (ret && !err)
3647 		err = ret;
3648 out_free:
3649 	ret = clean_dirty_subvols(rc);
3650 	if (ret < 0 && !err)
3651 		err = ret;
3652 	btrfs_free_block_rsv(fs_info, rc->block_rsv);
3653 	return err;
3654 }
3655 
3656 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3657 				 struct btrfs_root *root, u64 objectid)
3658 {
3659 	BTRFS_PATH_AUTO_FREE(path);
3660 	struct btrfs_inode_item *item;
3661 	struct extent_buffer *leaf;
3662 	int ret;
3663 
3664 	path = btrfs_alloc_path();
3665 	if (!path)
3666 		return -ENOMEM;
3667 
3668 	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3669 	if (ret)
3670 		return ret;
3671 
3672 	leaf = path->nodes[0];
3673 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3674 	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3675 	btrfs_set_inode_generation(leaf, item, 1);
3676 	btrfs_set_inode_size(leaf, item, 0);
3677 	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3678 	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3679 					  BTRFS_INODE_PREALLOC);
3680 	return 0;
3681 }
3682 
3683 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3684 				struct btrfs_root *root, u64 objectid)
3685 {
3686 	BTRFS_PATH_AUTO_FREE(path);
3687 	struct btrfs_key key;
3688 	int ret = 0;
3689 
3690 	path = btrfs_alloc_path();
3691 	if (!path) {
3692 		ret = -ENOMEM;
3693 		goto out;
3694 	}
3695 
3696 	key.objectid = objectid;
3697 	key.type = BTRFS_INODE_ITEM_KEY;
3698 	key.offset = 0;
3699 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3700 	if (ret) {
3701 		if (ret > 0)
3702 			ret = -ENOENT;
3703 		goto out;
3704 	}
3705 	ret = btrfs_del_item(trans, root, path);
3706 out:
3707 	if (ret)
3708 		btrfs_abort_transaction(trans, ret);
3709 }
3710 
3711 /*
3712  * helper to create inode for data relocation.
3713  * the inode is in data relocation tree and its link count is 0
3714  */
3715 static noinline_for_stack struct inode *create_reloc_inode(
3716 					const struct btrfs_block_group *group)
3717 {
3718 	struct btrfs_fs_info *fs_info = group->fs_info;
3719 	struct btrfs_inode *inode = NULL;
3720 	struct btrfs_trans_handle *trans;
3721 	struct btrfs_root *root;
3722 	u64 objectid;
3723 	int ret = 0;
3724 
3725 	root = btrfs_grab_root(fs_info->data_reloc_root);
3726 	trans = btrfs_start_transaction(root, 6);
3727 	if (IS_ERR(trans)) {
3728 		btrfs_put_root(root);
3729 		return ERR_CAST(trans);
3730 	}
3731 
3732 	ret = btrfs_get_free_objectid(root, &objectid);
3733 	if (ret)
3734 		goto out;
3735 
3736 	ret = __insert_orphan_inode(trans, root, objectid);
3737 	if (ret)
3738 		goto out;
3739 
3740 	inode = btrfs_iget(objectid, root);
3741 	if (IS_ERR(inode)) {
3742 		delete_orphan_inode(trans, root, objectid);
3743 		ret = PTR_ERR(inode);
3744 		inode = NULL;
3745 		goto out;
3746 	}
3747 	inode->reloc_block_group_start = group->start;
3748 
3749 	ret = btrfs_orphan_add(trans, inode);
3750 out:
3751 	btrfs_put_root(root);
3752 	btrfs_end_transaction(trans);
3753 	btrfs_btree_balance_dirty(fs_info);
3754 	if (ret) {
3755 		if (inode)
3756 			iput(&inode->vfs_inode);
3757 		return ERR_PTR(ret);
3758 	}
3759 	return &inode->vfs_inode;
3760 }
3761 
3762 /*
3763  * Mark start of chunk relocation that is cancellable. Check if the cancellation
3764  * has been requested meanwhile and don't start in that case.
3765  * NOTE: if this returns an error, reloc_chunk_end() must not be called.
3766  *
3767  * Return:
3768  *   0             success
3769  *   -EINPROGRESS  operation is already in progress, that's probably a bug
3770  *   -ECANCELED    cancellation request was set before the operation started
3771  */
3772 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3773 {
3774 	if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3775 		/* This should not happen */
3776 		btrfs_err(fs_info, "reloc already running, cannot start");
3777 		return -EINPROGRESS;
3778 	}
3779 
3780 	if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3781 		btrfs_info(fs_info, "chunk relocation canceled on start");
3782 		/* On cancel, clear all requests. */
3783 		clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3784 		atomic_set(&fs_info->reloc_cancel_req, 0);
3785 		return -ECANCELED;
3786 	}
3787 	return 0;
3788 }
3789 
3790 /*
3791  * Mark end of chunk relocation that is cancellable and wake any waiters.
3792  * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
3793  */
3794 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3795 {
3796 	ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
3797 	/* Requested after start, clear bit first so any waiters can continue */
3798 	if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3799 		btrfs_info(fs_info, "chunk relocation canceled during operation");
3800 	clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3801 	atomic_set(&fs_info->reloc_cancel_req, 0);
3802 }
3803 
3804 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3805 {
3806 	struct reloc_control *rc;
3807 
3808 	rc = kzalloc_obj(*rc, GFP_NOFS);
3809 	if (!rc)
3810 		return NULL;
3811 
3812 	INIT_LIST_HEAD(&rc->reloc_roots);
3813 	INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3814 	btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
3815 	rc->reloc_root_tree.rb_root = RB_ROOT;
3816 	spin_lock_init(&rc->reloc_root_tree.lock);
3817 	btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3818 	return rc;
3819 }
3820 
3821 static void free_reloc_control(struct reloc_control *rc)
3822 {
3823 	struct mapping_node *node, *tmp;
3824 
3825 	free_reloc_roots(&rc->reloc_roots);
3826 	rbtree_postorder_for_each_entry_safe(node, tmp,
3827 			&rc->reloc_root_tree.rb_root, rb_node)
3828 		kfree(node);
3829 
3830 	kfree(rc);
3831 }
3832 
3833 /*
3834  * Print the block group being relocated
3835  */
3836 static void describe_relocation(struct btrfs_block_group *block_group)
3837 {
3838 	char buf[128] = "NONE";
3839 
3840 	btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3841 
3842 	btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
3843 		   block_group->start, buf);
3844 }
3845 
3846 static const char *stage_to_string(enum reloc_stage stage)
3847 {
3848 	if (stage == MOVE_DATA_EXTENTS)
3849 		return "move data extents";
3850 	if (stage == UPDATE_DATA_PTRS)
3851 		return "update data pointers";
3852 	return "unknown";
3853 }
3854 
3855 static int add_remap_tree_entries(struct btrfs_trans_handle *trans, struct btrfs_path *path,
3856 				  struct btrfs_key *entries, unsigned int num_entries)
3857 {
3858 	int ret;
3859 	struct btrfs_fs_info *fs_info = trans->fs_info;
3860 	struct btrfs_item_batch batch;
3861 	u32 *data_sizes;
3862 	u32 max_items;
3863 
3864 	max_items = BTRFS_LEAF_DATA_SIZE(trans->fs_info) / sizeof(struct btrfs_item);
3865 
3866 	data_sizes = kzalloc(sizeof(u32) * min_t(u32, num_entries, max_items), GFP_NOFS);
3867 	if (!data_sizes)
3868 		return -ENOMEM;
3869 
3870 	while (true) {
3871 		batch.keys = entries;
3872 		batch.data_sizes = data_sizes;
3873 		batch.total_data_size = 0;
3874 		batch.nr = min_t(u32, num_entries, max_items);
3875 
3876 		ret = btrfs_insert_empty_items(trans, fs_info->remap_root, path, &batch);
3877 		btrfs_release_path(path);
3878 
3879 		if (num_entries <= max_items)
3880 			break;
3881 
3882 		num_entries -= max_items;
3883 		entries += max_items;
3884 	}
3885 
3886 	kfree(data_sizes);
3887 
3888 	return ret;
3889 }
3890 
3891 struct space_run {
3892 	u64 start;
3893 	u64 end;
3894 };
3895 
3896 static void parse_bitmap(u64 block_size, const unsigned long *bitmap,
3897 			 unsigned long size, u64 address, struct space_run *space_runs,
3898 			 unsigned int *num_space_runs)
3899 {
3900 	unsigned long pos, end;
3901 	u64 run_start, run_length;
3902 
3903 	pos = find_first_bit(bitmap, size);
3904 	if (pos == size)
3905 		return;
3906 
3907 	while (true) {
3908 		end = find_next_zero_bit(bitmap, size, pos);
3909 
3910 		run_start = address + (pos * block_size);
3911 		run_length = (end - pos) * block_size;
3912 
3913 		if (*num_space_runs != 0 &&
3914 		    space_runs[*num_space_runs - 1].end == run_start) {
3915 			space_runs[*num_space_runs - 1].end += run_length;
3916 		} else {
3917 			space_runs[*num_space_runs].start = run_start;
3918 			space_runs[*num_space_runs].end = run_start + run_length;
3919 
3920 			(*num_space_runs)++;
3921 		}
3922 
3923 		if (end == size)
3924 			break;
3925 
3926 		pos = find_next_bit(bitmap, size, end + 1);
3927 		if (pos == size)
3928 			break;
3929 	}
3930 }
3931 
3932 static void adjust_block_group_remap_bytes(struct btrfs_trans_handle *trans,
3933 					   struct btrfs_block_group *bg, s64 diff)
3934 {
3935 	struct btrfs_fs_info *fs_info = trans->fs_info;
3936 	bool bg_already_dirty = true;
3937 	bool mark_unused = false;
3938 
3939 	spin_lock(&bg->lock);
3940 	bg->remap_bytes += diff;
3941 	if (bg->used == 0 && bg->remap_bytes == 0)
3942 		mark_unused = true;
3943 	spin_unlock(&bg->lock);
3944 
3945 	if (mark_unused)
3946 		btrfs_mark_bg_unused(bg);
3947 
3948 	spin_lock(&trans->transaction->dirty_bgs_lock);
3949 	if (list_empty(&bg->dirty_list)) {
3950 		list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs);
3951 		bg_already_dirty = false;
3952 		btrfs_get_block_group(bg);
3953 	}
3954 	spin_unlock(&trans->transaction->dirty_bgs_lock);
3955 
3956 	/* Modified block groups are accounted for in the delayed_refs_rsv. */
3957 	if (!bg_already_dirty)
3958 		btrfs_inc_delayed_refs_rsv_bg_updates(fs_info);
3959 }
3960 
3961 /* Private structure for I/O from copy_remapped_data().  */
3962 struct reloc_io_private {
3963 	struct completion done;
3964 	refcount_t pending_refs;
3965 	blk_status_t status;
3966 };
3967 
3968 static void reloc_endio(struct btrfs_bio *bbio)
3969 {
3970 	struct reloc_io_private *priv = bbio->private;
3971 
3972 	if (bbio->bio.bi_status)
3973 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
3974 
3975 	if (refcount_dec_and_test(&priv->pending_refs))
3976 		complete(&priv->done);
3977 
3978 	bio_put(&bbio->bio);
3979 }
3980 
3981 static int copy_remapped_data_io(struct btrfs_fs_info *fs_info,
3982 				 struct reloc_io_private *priv,
3983 				 struct page **pages, u64 addr, u64 length,
3984 				 blk_opf_t op)
3985 {
3986 	struct btrfs_bio *bbio;
3987 	int i;
3988 
3989 	init_completion(&priv->done);
3990 	refcount_set(&priv->pending_refs, 1);
3991 	priv->status = 0;
3992 
3993 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, op, BTRFS_I(fs_info->btree_inode),
3994 			       addr, reloc_endio, priv);
3995 	bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT);
3996 	bbio->is_remap = true;
3997 
3998 	i = 0;
3999 	do {
4000 		size_t bytes = min_t(u64, length, PAGE_SIZE);
4001 
4002 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
4003 			refcount_inc(&priv->pending_refs);
4004 			btrfs_submit_bbio(bbio, 0);
4005 
4006 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, op,
4007 					       BTRFS_I(fs_info->btree_inode),
4008 					       addr, reloc_endio, priv);
4009 			bbio->bio.bi_iter.bi_sector = (addr >> SECTOR_SHIFT);
4010 			bbio->is_remap = true;
4011 			continue;
4012 		}
4013 
4014 		i++;
4015 		addr += bytes;
4016 		length -= bytes;
4017 	} while (length);
4018 
4019 	refcount_inc(&priv->pending_refs);
4020 	btrfs_submit_bbio(bbio, 0);
4021 
4022 	if (!refcount_dec_and_test(&priv->pending_refs))
4023 		wait_for_completion_io(&priv->done);
4024 
4025 	return blk_status_to_errno(READ_ONCE(priv->status));
4026 }
4027 
4028 static int copy_remapped_data(struct btrfs_fs_info *fs_info, u64 old_addr,
4029 			      u64 new_addr, u64 length)
4030 {
4031 	int ret;
4032 	u64 copy_len = min_t(u64, length, SZ_1M);
4033 	struct page **pages;
4034 	struct reloc_io_private priv;
4035 	unsigned int nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
4036 
4037 	pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
4038 	if (!pages)
4039 		return -ENOMEM;
4040 
4041 	ret = btrfs_alloc_page_array(nr_pages, pages, 0);
4042 	if (ret) {
4043 		ret = -ENOMEM;
4044 		goto end;
4045 	}
4046 
4047 	/* Copy 1MB at a time, to avoid using too much memory. */
4048 	do {
4049 		u64 to_copy = min_t(u64, length, copy_len);
4050 
4051 		/* Limit to one bio. */
4052 		to_copy = min_t(u64, to_copy, BIO_MAX_VECS << PAGE_SHIFT);
4053 
4054 		ret = copy_remapped_data_io(fs_info, &priv, pages, old_addr,
4055 					    to_copy, REQ_OP_READ);
4056 		if (ret)
4057 			goto end;
4058 
4059 		ret = copy_remapped_data_io(fs_info, &priv, pages, new_addr,
4060 					    to_copy, REQ_OP_WRITE);
4061 		if (ret)
4062 			goto end;
4063 
4064 		if (to_copy == length)
4065 			break;
4066 
4067 		old_addr += to_copy;
4068 		new_addr += to_copy;
4069 		length -= to_copy;
4070 	} while (true);
4071 
4072 	ret = 0;
4073 end:
4074 	for (int i = 0; i < nr_pages; i++) {
4075 		if (pages[i])
4076 			__free_page(pages[i]);
4077 	}
4078 	kfree(pages);
4079 
4080 	return ret;
4081 }
4082 
4083 static int add_remap_item(struct btrfs_trans_handle *trans,
4084 			  struct btrfs_path *path, u64 new_addr, u64 length,
4085 			  u64 old_addr)
4086 {
4087 	struct btrfs_fs_info *fs_info = trans->fs_info;
4088 	struct btrfs_remap_item remap = { 0 };
4089 	struct btrfs_key key;
4090 	struct extent_buffer *leaf;
4091 	int ret;
4092 
4093 	key.objectid = old_addr;
4094 	key.type = BTRFS_REMAP_KEY;
4095 	key.offset = length;
4096 
4097 	ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path,
4098 				      &key, sizeof(struct btrfs_remap_item));
4099 	if (ret)
4100 		return ret;
4101 
4102 	leaf = path->nodes[0];
4103 	btrfs_set_stack_remap_address(&remap, new_addr);
4104 	write_extent_buffer(leaf, &remap, btrfs_item_ptr_offset(leaf, path->slots[0]),
4105 			    sizeof(struct btrfs_remap_item));
4106 
4107 	btrfs_release_path(path);
4108 
4109 	return 0;
4110 }
4111 
4112 static int add_remap_backref_item(struct btrfs_trans_handle *trans,
4113 				  struct btrfs_path *path, u64 new_addr,
4114 				  u64 length, u64 old_addr)
4115 {
4116 	struct btrfs_fs_info *fs_info = trans->fs_info;
4117 	struct btrfs_remap_item remap = { 0 };
4118 	struct btrfs_key key;
4119 	struct extent_buffer *leaf;
4120 	int ret;
4121 
4122 	key.objectid = new_addr;
4123 	key.type = BTRFS_REMAP_BACKREF_KEY;
4124 	key.offset = length;
4125 
4126 	ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path, &key,
4127 				      sizeof(struct btrfs_remap_item));
4128 	if (ret)
4129 		return ret;
4130 
4131 	leaf = path->nodes[0];
4132 	btrfs_set_stack_remap_address(&remap, old_addr);
4133 	write_extent_buffer(leaf, &remap, btrfs_item_ptr_offset(leaf, path->slots[0]),
4134 			    sizeof(struct btrfs_remap_item));
4135 
4136 	btrfs_release_path(path);
4137 
4138 	return 0;
4139 }
4140 
4141 static int move_existing_remap(struct btrfs_fs_info *fs_info,
4142 			       struct btrfs_path *path,
4143 			       struct btrfs_block_group *bg, u64 new_addr,
4144 			       u64 length, u64 old_addr)
4145 {
4146 	struct btrfs_trans_handle *trans;
4147 	struct extent_buffer *leaf;
4148 	struct btrfs_remap_item *remap_ptr;
4149 	struct btrfs_remap_item remap = { 0 };
4150 	struct btrfs_key key, ins;
4151 	u64 dest_addr, dest_length, min_size;
4152 	struct btrfs_block_group *dest_bg;
4153 	int ret;
4154 	const bool is_data = (bg->flags & BTRFS_BLOCK_GROUP_DATA);
4155 	struct btrfs_space_info *sinfo = bg->space_info;
4156 	bool mutex_taken = false;
4157 	bool bg_needs_free_space;
4158 
4159 	spin_lock(&sinfo->lock);
4160 	btrfs_space_info_update_bytes_may_use(sinfo, length);
4161 	spin_unlock(&sinfo->lock);
4162 
4163 	if (is_data)
4164 		min_size = fs_info->sectorsize;
4165 	else
4166 		min_size = fs_info->nodesize;
4167 
4168 	ret = btrfs_reserve_extent(fs_info->fs_root, length, length, min_size,
4169 				   0, 0, &ins, is_data, false);
4170 	if (unlikely(ret)) {
4171 		spin_lock(&sinfo->lock);
4172 		btrfs_space_info_update_bytes_may_use(sinfo, -length);
4173 		spin_unlock(&sinfo->lock);
4174 		return ret;
4175 	}
4176 
4177 	dest_addr = ins.objectid;
4178 	dest_length = ins.offset;
4179 
4180 	dest_bg = btrfs_lookup_block_group(fs_info, dest_addr);
4181 
4182 	if (!is_data && !IS_ALIGNED(dest_length, fs_info->nodesize)) {
4183 		u64 new_length = ALIGN_DOWN(dest_length, fs_info->nodesize);
4184 
4185 		btrfs_free_reserved_extent(fs_info, dest_addr + new_length,
4186 					   dest_length - new_length, 0);
4187 
4188 		dest_length = new_length;
4189 	}
4190 
4191 	trans = btrfs_join_transaction(fs_info->remap_root);
4192 	if (IS_ERR(trans)) {
4193 		ret = PTR_ERR(trans);
4194 		trans = NULL;
4195 		goto end;
4196 	}
4197 
4198 	mutex_lock(&fs_info->remap_mutex);
4199 	mutex_taken = true;
4200 
4201 	/* Find old remap entry. */
4202 	key.objectid = old_addr;
4203 	key.type = BTRFS_REMAP_KEY;
4204 	key.offset = length;
4205 
4206 	ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, 0, 1);
4207 	if (ret == 1) {
4208 		/*
4209 		 * Not a problem if the remap entry wasn't found: that means
4210 		 * that another transaction has deallocated the data.
4211 		 * move_existing_remaps() loops until the BG contains no
4212 		 * remaps, so we can just return 0 in this case.
4213 		 */
4214 		btrfs_release_path(path);
4215 		ret = 0;
4216 		goto end;
4217 	} else if (unlikely(ret)) {
4218 		goto end;
4219 	}
4220 
4221 	ret = copy_remapped_data(fs_info, new_addr, dest_addr, dest_length);
4222 	if (unlikely(ret))
4223 		goto end;
4224 
4225 	/* Change data of old remap entry. */
4226 	leaf = path->nodes[0];
4227 	remap_ptr = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
4228 	btrfs_set_remap_address(leaf, remap_ptr, dest_addr);
4229 	btrfs_mark_buffer_dirty(trans, leaf);
4230 
4231 	if (dest_length != length) {
4232 		key.offset = dest_length;
4233 		btrfs_set_item_key_safe(trans, path, &key);
4234 	}
4235 
4236 	btrfs_release_path(path);
4237 
4238 	if (dest_length != length) {
4239 		/* Add remap item for remainder. */
4240 		ret = add_remap_item(trans, path, new_addr + dest_length,
4241 				     length - dest_length, old_addr + dest_length);
4242 		if (unlikely(ret))
4243 			goto end;
4244 	}
4245 
4246 	/* Change or remove old backref. */
4247 	key.objectid = new_addr;
4248 	key.type = BTRFS_REMAP_BACKREF_KEY;
4249 	key.offset = length;
4250 
4251 	ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1);
4252 	if (unlikely(ret)) {
4253 		if (ret == 1) {
4254 			btrfs_release_path(path);
4255 			ret = -ENOENT;
4256 		}
4257 		goto end;
4258 	}
4259 
4260 	leaf = path->nodes[0];
4261 
4262 	if (dest_length == length) {
4263 		ret = btrfs_del_item(trans, fs_info->remap_root, path);
4264 		if (unlikely(ret)) {
4265 			btrfs_release_path(path);
4266 			goto end;
4267 		}
4268 	} else {
4269 		key.objectid += dest_length;
4270 		key.offset -= dest_length;
4271 		btrfs_set_item_key_safe(trans, path, &key);
4272 		btrfs_set_stack_remap_address(&remap, old_addr + dest_length);
4273 
4274 		write_extent_buffer(leaf, &remap,
4275 				    btrfs_item_ptr_offset(leaf, path->slots[0]),
4276 				    sizeof(struct btrfs_remap_item));
4277 	}
4278 
4279 	btrfs_release_path(path);
4280 
4281 	/* Add new backref. */
4282 	ret = add_remap_backref_item(trans, path, dest_addr, dest_length, old_addr);
4283 	if (unlikely(ret))
4284 		goto end;
4285 
4286 	adjust_block_group_remap_bytes(trans, bg, -dest_length);
4287 
4288 	ret = btrfs_add_to_free_space_tree(trans, new_addr, dest_length);
4289 	if (unlikely(ret))
4290 		goto end;
4291 
4292 	adjust_block_group_remap_bytes(trans, dest_bg, dest_length);
4293 
4294 	mutex_lock(&dest_bg->free_space_lock);
4295 	bg_needs_free_space = test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
4296 				       &dest_bg->runtime_flags);
4297 	mutex_unlock(&dest_bg->free_space_lock);
4298 
4299 	if (bg_needs_free_space) {
4300 		ret = btrfs_add_block_group_free_space(trans, dest_bg);
4301 		if (unlikely(ret))
4302 			goto end;
4303 	}
4304 
4305 	ret = btrfs_remove_from_free_space_tree(trans, dest_addr, dest_length);
4306 	if (unlikely(ret)) {
4307 		btrfs_remove_from_free_space_tree(trans, new_addr, dest_length);
4308 		goto end;
4309 	}
4310 
4311 	ret = 0;
4312 
4313 end:
4314 	if (mutex_taken)
4315 		mutex_unlock(&fs_info->remap_mutex);
4316 
4317 	btrfs_dec_block_group_reservations(fs_info, dest_addr);
4318 
4319 	if (unlikely(ret)) {
4320 		btrfs_free_reserved_extent(fs_info, dest_addr, dest_length, 0);
4321 
4322 		if (trans) {
4323 			btrfs_abort_transaction(trans, ret);
4324 			btrfs_end_transaction(trans);
4325 		}
4326 	} else {
4327 		btrfs_free_reserved_bytes(dest_bg, dest_length, 0);
4328 
4329 		ret = btrfs_commit_transaction(trans);
4330 	}
4331 
4332 	btrfs_put_block_group(dest_bg);
4333 
4334 	return ret;
4335 }
4336 
4337 static int move_existing_remaps(struct btrfs_fs_info *fs_info,
4338 				struct btrfs_block_group *bg,
4339 				struct btrfs_path *path)
4340 {
4341 	int ret;
4342 	struct btrfs_key key;
4343 	struct extent_buffer *leaf;
4344 	struct btrfs_remap_item *remap;
4345 	u64 old_addr;
4346 
4347 	/* Look for backrefs in remap tree. */
4348 	while (bg->remap_bytes > 0) {
4349 		key.objectid = bg->start;
4350 		key.type = BTRFS_REMAP_BACKREF_KEY;
4351 		key.offset = 0;
4352 
4353 		ret = btrfs_search_slot(NULL, fs_info->remap_root, &key, path, 0, 0);
4354 		if (ret < 0)
4355 			return ret;
4356 
4357 		leaf = path->nodes[0];
4358 
4359 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4360 			ret = btrfs_next_leaf(fs_info->remap_root, path);
4361 			if (ret < 0) {
4362 				btrfs_release_path(path);
4363 				return ret;
4364 			}
4365 
4366 			if (ret) {
4367 				btrfs_release_path(path);
4368 				break;
4369 			}
4370 
4371 			leaf = path->nodes[0];
4372 		}
4373 
4374 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4375 
4376 		if (key.type != BTRFS_REMAP_BACKREF_KEY) {
4377 			path->slots[0]++;
4378 
4379 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4380 				ret = btrfs_next_leaf(fs_info->remap_root, path);
4381 				if (ret < 0) {
4382 					btrfs_release_path(path);
4383 					return ret;
4384 				}
4385 
4386 				if (ret) {
4387 					btrfs_release_path(path);
4388 					break;
4389 				}
4390 
4391 				leaf = path->nodes[0];
4392 			}
4393 
4394 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4395 		}
4396 
4397 		remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
4398 		old_addr = btrfs_remap_address(leaf, remap);
4399 
4400 		btrfs_release_path(path);
4401 
4402 		ret = move_existing_remap(fs_info, path, bg, key.objectid,
4403 					  key.offset, old_addr);
4404 		if (ret)
4405 			return ret;
4406 	}
4407 
4408 	ASSERT(bg->remap_bytes == 0);
4409 
4410 	return 0;
4411 }
4412 
4413 static int create_remap_tree_entries(struct btrfs_trans_handle *trans,
4414 				     struct btrfs_path *path,
4415 				     struct btrfs_block_group *bg)
4416 {
4417 	struct btrfs_fs_info *fs_info = trans->fs_info;
4418 	struct btrfs_free_space_info *fsi;
4419 	struct btrfs_key key, found_key;
4420 	struct extent_buffer *leaf;
4421 	struct btrfs_root *space_root;
4422 	u32 extent_count;
4423 	struct space_run *space_runs = NULL;
4424 	unsigned int num_space_runs = 0;
4425 	struct btrfs_key *entries = NULL;
4426 	unsigned int max_entries, num_entries;
4427 	int ret;
4428 
4429 	mutex_lock(&bg->free_space_lock);
4430 
4431 	if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &bg->runtime_flags)) {
4432 		mutex_unlock(&bg->free_space_lock);
4433 
4434 		ret = btrfs_add_block_group_free_space(trans, bg);
4435 		if (ret)
4436 			return ret;
4437 
4438 		mutex_lock(&bg->free_space_lock);
4439 	}
4440 
4441 	fsi = btrfs_search_free_space_info(trans, bg, path, 0);
4442 	if (IS_ERR(fsi)) {
4443 		mutex_unlock(&bg->free_space_lock);
4444 		return PTR_ERR(fsi);
4445 	}
4446 
4447 	extent_count = btrfs_free_space_extent_count(path->nodes[0], fsi);
4448 
4449 	btrfs_release_path(path);
4450 
4451 	space_runs = kmalloc(sizeof(*space_runs) * extent_count, GFP_NOFS);
4452 	if (!space_runs) {
4453 		mutex_unlock(&bg->free_space_lock);
4454 		return -ENOMEM;
4455 	}
4456 
4457 	key.objectid = bg->start;
4458 	key.type = 0;
4459 	key.offset = 0;
4460 
4461 	space_root = btrfs_free_space_root(bg);
4462 
4463 	ret = btrfs_search_slot(trans, space_root, &key, path, 0, 0);
4464 	if (ret < 0) {
4465 		mutex_unlock(&bg->free_space_lock);
4466 		goto out;
4467 	}
4468 
4469 	ret = 0;
4470 
4471 	while (true) {
4472 		leaf = path->nodes[0];
4473 
4474 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4475 
4476 		if (found_key.objectid >= bg->start + bg->length)
4477 			break;
4478 
4479 		if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) {
4480 			if (num_space_runs != 0 &&
4481 			    space_runs[num_space_runs - 1].end == found_key.objectid) {
4482 				space_runs[num_space_runs - 1].end =
4483 					found_key.objectid + found_key.offset;
4484 			} else {
4485 				ASSERT(num_space_runs < extent_count);
4486 
4487 				space_runs[num_space_runs].start = found_key.objectid;
4488 				space_runs[num_space_runs].end =
4489 					found_key.objectid + found_key.offset;
4490 
4491 				num_space_runs++;
4492 			}
4493 		} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
4494 			void *bitmap;
4495 			unsigned long offset;
4496 			u32 data_size;
4497 
4498 			offset = btrfs_item_ptr_offset(leaf, path->slots[0]);
4499 			data_size = btrfs_item_size(leaf, path->slots[0]);
4500 
4501 			if (data_size != 0) {
4502 				bitmap = kmalloc(data_size, GFP_NOFS);
4503 				if (!bitmap) {
4504 					mutex_unlock(&bg->free_space_lock);
4505 					ret = -ENOMEM;
4506 					goto out;
4507 				}
4508 
4509 				read_extent_buffer(leaf, bitmap, offset, data_size);
4510 
4511 				parse_bitmap(fs_info->sectorsize, bitmap,
4512 					     data_size * BITS_PER_BYTE,
4513 					     found_key.objectid, space_runs,
4514 					     &num_space_runs);
4515 
4516 				ASSERT(num_space_runs <= extent_count);
4517 
4518 				kfree(bitmap);
4519 			}
4520 		}
4521 
4522 		path->slots[0]++;
4523 
4524 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4525 			ret = btrfs_next_leaf(space_root, path);
4526 			if (ret != 0) {
4527 				if (ret == 1)
4528 					ret = 0;
4529 				break;
4530 			}
4531 			leaf = path->nodes[0];
4532 		}
4533 	}
4534 
4535 	btrfs_release_path(path);
4536 
4537 	mutex_unlock(&bg->free_space_lock);
4538 
4539 	max_entries = extent_count + 2;
4540 	entries = kmalloc(sizeof(*entries) * max_entries, GFP_NOFS);
4541 	if (!entries) {
4542 		ret = -ENOMEM;
4543 		goto out;
4544 	}
4545 
4546 	num_entries = 0;
4547 
4548 	if (num_space_runs == 0) {
4549 		entries[num_entries].objectid = bg->start;
4550 		entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY;
4551 		entries[num_entries].offset = bg->length;
4552 		num_entries++;
4553 	} else {
4554 		if (space_runs[0].start > bg->start) {
4555 			entries[num_entries].objectid = bg->start;
4556 			entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY;
4557 			entries[num_entries].offset = space_runs[0].start - bg->start;
4558 			num_entries++;
4559 		}
4560 
4561 		for (unsigned int i = 1; i < num_space_runs; i++) {
4562 			entries[num_entries].objectid = space_runs[i - 1].end;
4563 			entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY;
4564 			entries[num_entries].offset =
4565 				space_runs[i].start - space_runs[i - 1].end;
4566 			num_entries++;
4567 		}
4568 
4569 		if (space_runs[num_space_runs - 1].end < bg->start + bg->length) {
4570 			entries[num_entries].objectid =
4571 				space_runs[num_space_runs - 1].end;
4572 			entries[num_entries].type = BTRFS_IDENTITY_REMAP_KEY;
4573 			entries[num_entries].offset =
4574 				bg->start + bg->length - space_runs[num_space_runs - 1].end;
4575 			num_entries++;
4576 		}
4577 
4578 		if (num_entries == 0)
4579 			goto out;
4580 	}
4581 
4582 	bg->identity_remap_count = num_entries;
4583 
4584 	ret = add_remap_tree_entries(trans, path, entries, num_entries);
4585 
4586 out:
4587 	kfree(entries);
4588 	kfree(space_runs);
4589 
4590 	return ret;
4591 }
4592 
4593 static int find_next_identity_remap(struct btrfs_trans_handle *trans,
4594 				    struct btrfs_path *path, u64 bg_end,
4595 				    u64 last_start, u64 *start, u64 *length)
4596 {
4597 	int ret;
4598 	struct btrfs_key key, found_key;
4599 	struct btrfs_root *remap_root = trans->fs_info->remap_root;
4600 	struct extent_buffer *leaf;
4601 
4602 	key.objectid = last_start;
4603 	key.type = BTRFS_IDENTITY_REMAP_KEY;
4604 	key.offset = 0;
4605 
4606 	ret = btrfs_search_slot(trans, remap_root, &key, path, 0, 0);
4607 	if (ret < 0)
4608 		goto out;
4609 
4610 	leaf = path->nodes[0];
4611 	while (true) {
4612 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4613 			ret = btrfs_next_leaf(remap_root, path);
4614 
4615 			if (ret != 0) {
4616 				if (ret == 1)
4617 					ret = -ENOENT;
4618 				goto out;
4619 			}
4620 
4621 			leaf = path->nodes[0];
4622 		}
4623 
4624 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4625 
4626 		if (found_key.objectid >= bg_end) {
4627 			ret = -ENOENT;
4628 			goto out;
4629 		}
4630 
4631 		if (found_key.type == BTRFS_IDENTITY_REMAP_KEY) {
4632 			*start = found_key.objectid;
4633 			*length = found_key.offset;
4634 			ret = 0;
4635 			goto out;
4636 		}
4637 
4638 		path->slots[0]++;
4639 	}
4640 
4641 out:
4642 	btrfs_release_path(path);
4643 
4644 	return ret;
4645 }
4646 
4647 static int remove_chunk_stripes(struct btrfs_trans_handle *trans,
4648 				struct btrfs_chunk_map *chunk_map,
4649 				struct btrfs_path *path)
4650 {
4651 	struct btrfs_fs_info *fs_info = trans->fs_info;
4652 	struct btrfs_key key;
4653 	struct extent_buffer *leaf;
4654 	struct btrfs_chunk *chunk;
4655 	int ret;
4656 
4657 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4658 	key.type = BTRFS_CHUNK_ITEM_KEY;
4659 	key.offset = chunk_map->start;
4660 
4661 	btrfs_reserve_chunk_metadata(trans, false);
4662 
4663 	ret = btrfs_search_slot(trans, fs_info->chunk_root, &key, path, 0, 1);
4664 	if (ret) {
4665 		if (ret == 1) {
4666 			btrfs_release_path(path);
4667 			ret = -ENOENT;
4668 		}
4669 		btrfs_trans_release_chunk_metadata(trans);
4670 		return ret;
4671 	}
4672 
4673 	leaf = path->nodes[0];
4674 
4675 	chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk);
4676 	btrfs_set_chunk_num_stripes(leaf, chunk, 0);
4677 	btrfs_set_chunk_sub_stripes(leaf, chunk, 0);
4678 
4679 	btrfs_truncate_item(trans, path, offsetof(struct btrfs_chunk, stripe), 1);
4680 
4681 	btrfs_mark_buffer_dirty(trans, leaf);
4682 
4683 	btrfs_release_path(path);
4684 	btrfs_trans_release_chunk_metadata(trans);
4685 
4686 	return 0;
4687 }
4688 
4689 int btrfs_last_identity_remap_gone(struct btrfs_chunk_map *chunk_map,
4690 				   struct btrfs_block_group *bg)
4691 {
4692 	struct btrfs_fs_info *fs_info = bg->fs_info;
4693 	struct btrfs_trans_handle *trans;
4694 	int ret;
4695 	unsigned int num_items;
4696 	BTRFS_PATH_AUTO_FREE(path);
4697 
4698 	path = btrfs_alloc_path();
4699 	if (!path)
4700 		return -ENOMEM;
4701 
4702 	/*
4703 	 * One item for each entry we're removing in the dev extents tree, and
4704 	 * another for each device. DUP chunks are all on one device,
4705 	 * everything else has one device per stripe.
4706 	 */
4707 	if (bg->flags & BTRFS_BLOCK_GROUP_DUP)
4708 		num_items = chunk_map->num_stripes + 1;
4709 	else
4710 		num_items = 2 * chunk_map->num_stripes;
4711 
4712 	trans = btrfs_start_transaction_fallback_global_rsv(fs_info->tree_root, num_items);
4713 	if (IS_ERR(trans))
4714 		return PTR_ERR(trans);
4715 
4716 	ret = btrfs_remove_dev_extents(trans, chunk_map);
4717 	if (unlikely(ret)) {
4718 		btrfs_abort_transaction(trans, ret);
4719 		btrfs_end_transaction(trans);
4720 		return ret;
4721 	}
4722 
4723 	mutex_lock(&trans->fs_info->chunk_mutex);
4724 	for (unsigned int i = 0; i < chunk_map->num_stripes; i++) {
4725 		ret = btrfs_update_device(trans, chunk_map->stripes[i].dev);
4726 		if (unlikely(ret)) {
4727 			mutex_unlock(&trans->fs_info->chunk_mutex);
4728 			btrfs_abort_transaction(trans, ret);
4729 			btrfs_end_transaction(trans);
4730 			return ret;
4731 		}
4732 	}
4733 	mutex_unlock(&trans->fs_info->chunk_mutex);
4734 
4735 	write_lock(&trans->fs_info->mapping_tree_lock);
4736 	btrfs_chunk_map_device_clear_bits(chunk_map, CHUNK_ALLOCATED);
4737 	write_unlock(&trans->fs_info->mapping_tree_lock);
4738 
4739 	btrfs_remove_bg_from_sinfo(bg);
4740 
4741 	spin_lock(&bg->lock);
4742 	clear_bit(BLOCK_GROUP_FLAG_STRIPE_REMOVAL_PENDING, &bg->runtime_flags);
4743 	spin_unlock(&bg->lock);
4744 
4745 	ret = remove_chunk_stripes(trans, chunk_map, path);
4746 	if (unlikely(ret)) {
4747 		btrfs_abort_transaction(trans, ret);
4748 		btrfs_end_transaction(trans);
4749 		return ret;
4750 	}
4751 
4752 	ret = btrfs_commit_transaction(trans);
4753 	if (ret)
4754 		return ret;
4755 
4756 	return 0;
4757 }
4758 
4759 static void adjust_identity_remap_count(struct btrfs_trans_handle *trans,
4760 				        struct btrfs_block_group *bg, int delta)
4761 {
4762 	struct btrfs_fs_info *fs_info = trans->fs_info;
4763 	bool bg_already_dirty = true;
4764 	bool mark_fully_remapped = false;
4765 
4766 	WARN_ON(delta < 0 && -delta > bg->identity_remap_count);
4767 
4768 	spin_lock(&bg->lock);
4769 
4770 	bg->identity_remap_count += delta;
4771 
4772 	if (bg->identity_remap_count == 0 &&
4773 	    !test_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &bg->runtime_flags)) {
4774 		set_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &bg->runtime_flags);
4775 		mark_fully_remapped = true;
4776 	}
4777 
4778 	spin_unlock(&bg->lock);
4779 
4780 	spin_lock(&trans->transaction->dirty_bgs_lock);
4781 	if (list_empty(&bg->dirty_list)) {
4782 		list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs);
4783 		bg_already_dirty = false;
4784 		btrfs_get_block_group(bg);
4785 	}
4786 	spin_unlock(&trans->transaction->dirty_bgs_lock);
4787 
4788 	/* Modified block groups are accounted for in the delayed_refs_rsv. */
4789 	if (!bg_already_dirty)
4790 		btrfs_inc_delayed_refs_rsv_bg_updates(fs_info);
4791 
4792 	if (mark_fully_remapped)
4793 		btrfs_mark_bg_fully_remapped(bg, trans);
4794 }
4795 
4796 static int add_remap_entry(struct btrfs_trans_handle *trans,
4797 			   struct btrfs_path *path,
4798 			   struct btrfs_block_group *src_bg, u64 old_addr,
4799 			   u64 new_addr, u64 length)
4800 {
4801 	struct btrfs_fs_info *fs_info = trans->fs_info;
4802 	struct btrfs_key key, new_key;
4803 	int ret;
4804 	int identity_count_delta = 0;
4805 
4806 	key.objectid = old_addr;
4807 	key.type = (u8)-1;
4808 	key.offset = (u64)-1;
4809 
4810 	ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1);
4811 	if (ret < 0)
4812 		goto end;
4813 
4814 	if (path->slots[0] == 0) {
4815 		ret = -ENOENT;
4816 		goto end;
4817 	}
4818 
4819 	path->slots[0]--;
4820 
4821 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4822 
4823 	if (key.type != BTRFS_IDENTITY_REMAP_KEY ||
4824 	    key.objectid > old_addr ||
4825 	    key.objectid + key.offset <= old_addr) {
4826 		ret = -ENOENT;
4827 		goto end;
4828 	}
4829 
4830 	/* Shorten or delete identity mapping entry. */
4831 	if (key.objectid == old_addr) {
4832 		ret = btrfs_del_item(trans, fs_info->remap_root, path);
4833 		if (ret)
4834 			goto end;
4835 
4836 		identity_count_delta--;
4837 	} else {
4838 		new_key.objectid = key.objectid;
4839 		new_key.type = BTRFS_IDENTITY_REMAP_KEY;
4840 		new_key.offset = old_addr - key.objectid;
4841 
4842 		btrfs_set_item_key_safe(trans, path, &new_key);
4843 	}
4844 
4845 	btrfs_release_path(path);
4846 
4847 	/* Create new remap entry. */
4848 	ret = add_remap_item(trans, path, new_addr, length, old_addr);
4849 	if (ret)
4850 		goto end;
4851 
4852 	/* Add entry for remainder of identity mapping, if necessary. */
4853 	if (key.objectid + key.offset != old_addr + length) {
4854 		new_key.objectid = old_addr + length;
4855 		new_key.type = BTRFS_IDENTITY_REMAP_KEY;
4856 		new_key.offset = key.objectid + key.offset - old_addr - length;
4857 
4858 		ret = btrfs_insert_empty_item(trans, fs_info->remap_root,
4859 					      path, &new_key, 0);
4860 		if (ret)
4861 			goto end;
4862 
4863 		btrfs_release_path(path);
4864 
4865 		identity_count_delta++;
4866 	}
4867 
4868 	/* Add backref. */
4869 	ret = add_remap_backref_item(trans, path, new_addr, length, old_addr);
4870 	if (ret)
4871 		goto end;
4872 
4873 	if (identity_count_delta != 0)
4874 		adjust_identity_remap_count(trans, src_bg, identity_count_delta);
4875 
4876 end:
4877 	btrfs_release_path(path);
4878 
4879 	return ret;
4880 }
4881 
4882 static int mark_chunk_remapped(struct btrfs_trans_handle *trans,
4883 			       struct btrfs_path *path, u64 start)
4884 {
4885 	struct btrfs_fs_info *fs_info = trans->fs_info;
4886 	struct btrfs_chunk_map *chunk_map;
4887 	struct btrfs_key key;
4888 	u64 type;
4889 	int ret;
4890 	struct extent_buffer *leaf;
4891 	struct btrfs_chunk *chunk;
4892 
4893 	read_lock(&fs_info->mapping_tree_lock);
4894 
4895 	chunk_map = btrfs_find_chunk_map_nolock(fs_info, start, 1);
4896 	if (!chunk_map) {
4897 		read_unlock(&fs_info->mapping_tree_lock);
4898 		return -ENOENT;
4899 	}
4900 
4901 	chunk_map->type |= BTRFS_BLOCK_GROUP_REMAPPED;
4902 	type = chunk_map->type;
4903 
4904 	read_unlock(&fs_info->mapping_tree_lock);
4905 
4906 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4907 	key.type = BTRFS_CHUNK_ITEM_KEY;
4908 	key.offset = start;
4909 
4910 	ret = btrfs_search_slot(trans, fs_info->chunk_root, &key, path, 0, 1);
4911 	if (ret == 1) {
4912 		ret = -ENOENT;
4913 		goto end;
4914 	} else if (ret < 0)
4915 		goto end;
4916 
4917 	leaf = path->nodes[0];
4918 
4919 	chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk);
4920 	btrfs_set_chunk_type(leaf, chunk, type);
4921 	btrfs_mark_buffer_dirty(trans, leaf);
4922 
4923 	ret = 0;
4924 end:
4925 	btrfs_free_chunk_map(chunk_map);
4926 	btrfs_release_path(path);
4927 
4928 	return ret;
4929 }
4930 
4931 static int do_remap_reloc_trans(struct btrfs_fs_info *fs_info,
4932 				struct btrfs_block_group *src_bg,
4933 				struct btrfs_path *path, u64 *last_start)
4934 {
4935 	struct btrfs_trans_handle *trans;
4936 	struct btrfs_root *extent_root;
4937 	struct btrfs_key ins;
4938 	struct btrfs_block_group *dest_bg = NULL;
4939 	u64 start = 0, remap_length = 0;
4940 	u64 length, new_addr, min_size;
4941 	int ret;
4942 	const bool is_data = (src_bg->flags & BTRFS_BLOCK_GROUP_DATA);
4943 	bool no_more = false;
4944 	bool made_reservation = false, bg_needs_free_space;
4945 	struct btrfs_space_info *sinfo = src_bg->space_info;
4946 
4947 	extent_root = btrfs_extent_root(fs_info, src_bg->start);
4948 	if (unlikely(!extent_root)) {
4949 		btrfs_err(fs_info,
4950 			  "missing extent root for block group at offset %llu",
4951 			  src_bg->start);
4952 		return -EUCLEAN;
4953 	}
4954 
4955 	trans = btrfs_start_transaction(extent_root, 0);
4956 	if (IS_ERR(trans))
4957 		return PTR_ERR(trans);
4958 
4959 	mutex_lock(&fs_info->remap_mutex);
4960 
4961 	ret = find_next_identity_remap(trans, path, src_bg->start + src_bg->length,
4962 				       *last_start, &start, &remap_length);
4963 	if (ret == -ENOENT) {
4964 		no_more = true;
4965 		goto next;
4966 	} else if (ret) {
4967 		mutex_unlock(&fs_info->remap_mutex);
4968 		btrfs_end_transaction(trans);
4969 		return ret;
4970 	}
4971 
4972 	/* Try to reserve enough space for block. */
4973 	spin_lock(&sinfo->lock);
4974 	btrfs_space_info_update_bytes_may_use(sinfo, remap_length);
4975 	spin_unlock(&sinfo->lock);
4976 
4977 	if (is_data)
4978 		min_size = fs_info->sectorsize;
4979 	else
4980 		min_size = fs_info->nodesize;
4981 
4982 	/*
4983 	 * We're using btrfs_reserve_extent() to allocate a contiguous
4984 	 * logical address range, but this will become a remap item rather than
4985 	 * an extent in the extent tree.
4986 	 *
4987 	 * Short allocations are fine: it means that we chop off the beginning
4988 	 * of the identity remap that we're processing, and will tackle the
4989 	 * rest of it the next time round.
4990 	 */
4991 	ret = btrfs_reserve_extent(fs_info->fs_root, remap_length, remap_length,
4992 				   min_size, 0, 0, &ins, is_data, false);
4993 	if (ret) {
4994 		spin_lock(&sinfo->lock);
4995 		btrfs_space_info_update_bytes_may_use(sinfo, -remap_length);
4996 		spin_unlock(&sinfo->lock);
4997 
4998 		mutex_unlock(&fs_info->remap_mutex);
4999 		btrfs_end_transaction(trans);
5000 		return ret;
5001 	}
5002 
5003 	made_reservation = true;
5004 
5005 	new_addr = ins.objectid;
5006 	length = ins.offset;
5007 
5008 	if (!is_data && !IS_ALIGNED(length, fs_info->nodesize)) {
5009 		u64 new_length = ALIGN_DOWN(length, fs_info->nodesize);
5010 
5011 		btrfs_free_reserved_extent(fs_info, new_addr + new_length,
5012 					   length - new_length, 0);
5013 
5014 		length = new_length;
5015 	}
5016 
5017 	dest_bg = btrfs_lookup_block_group(fs_info, new_addr);
5018 
5019 	mutex_lock(&dest_bg->free_space_lock);
5020 	bg_needs_free_space = test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
5021 				       &dest_bg->runtime_flags);
5022 	mutex_unlock(&dest_bg->free_space_lock);
5023 
5024 	if (bg_needs_free_space) {
5025 		ret = btrfs_add_block_group_free_space(trans, dest_bg);
5026 		if (ret)
5027 			goto fail;
5028 	}
5029 
5030 	ret = copy_remapped_data(fs_info, start, new_addr, length);
5031 	if (ret)
5032 		goto fail;
5033 
5034 	ret = btrfs_remove_from_free_space_tree(trans, new_addr, length);
5035 	if (ret)
5036 		goto fail;
5037 
5038 	ret = add_remap_entry(trans, path, src_bg, start, new_addr, length);
5039 	if (ret) {
5040 		btrfs_add_to_free_space_tree(trans, new_addr, length);
5041 		goto fail;
5042 	}
5043 
5044 	adjust_block_group_remap_bytes(trans, dest_bg, length);
5045 	btrfs_free_reserved_bytes(dest_bg, length, 0);
5046 
5047 	spin_lock(&sinfo->lock);
5048 	sinfo->bytes_readonly += length;
5049 	spin_unlock(&sinfo->lock);
5050 
5051 next:
5052 	if (dest_bg)
5053 		btrfs_put_block_group(dest_bg);
5054 
5055 	if (made_reservation)
5056 		btrfs_dec_block_group_reservations(fs_info, new_addr);
5057 
5058 	mutex_unlock(&fs_info->remap_mutex);
5059 
5060 	if (src_bg->identity_remap_count == 0) {
5061 		bool mark_fully_remapped = false;
5062 
5063 		spin_lock(&src_bg->lock);
5064 		if (!test_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &src_bg->runtime_flags)) {
5065 			mark_fully_remapped = true;
5066 			set_bit(BLOCK_GROUP_FLAG_FULLY_REMAPPED, &src_bg->runtime_flags);
5067 		}
5068 		spin_unlock(&src_bg->lock);
5069 
5070 		if (mark_fully_remapped)
5071 			btrfs_mark_bg_fully_remapped(src_bg, trans);
5072 	}
5073 
5074 	ret = btrfs_end_transaction(trans);
5075 	if (ret)
5076 		return ret;
5077 
5078 	if (no_more)
5079 		return 1;
5080 
5081 	*last_start = start;
5082 
5083 	return 0;
5084 
5085 fail:
5086 	if (dest_bg)
5087 		btrfs_put_block_group(dest_bg);
5088 
5089 	btrfs_free_reserved_extent(fs_info, new_addr, length, 0);
5090 
5091 	mutex_unlock(&fs_info->remap_mutex);
5092 	btrfs_end_transaction(trans);
5093 
5094 	return ret;
5095 }
5096 
5097 static int do_remap_reloc(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
5098 			  struct btrfs_block_group *bg)
5099 {
5100 	u64 last_start = bg->start;
5101 	int ret;
5102 
5103 	while (true) {
5104 		ret = do_remap_reloc_trans(fs_info, bg, path, &last_start);
5105 		if (ret) {
5106 			if (ret == 1)
5107 				ret = 0;
5108 			break;
5109 		}
5110 	}
5111 
5112 	return ret;
5113 }
5114 
5115 int btrfs_translate_remap(struct btrfs_fs_info *fs_info, u64 *logical, u64 *length)
5116 {
5117 	int ret;
5118 	struct btrfs_key key, found_key;
5119 	struct extent_buffer *leaf;
5120 	struct btrfs_remap_item *remap;
5121 	BTRFS_PATH_AUTO_FREE(path);
5122 
5123 	path = btrfs_alloc_path();
5124 	if (!path)
5125 		return -ENOMEM;
5126 
5127 	key.objectid = *logical;
5128 	key.type = (u8)-1;
5129 	key.offset = (u64)-1;
5130 
5131 	ret = btrfs_search_slot(NULL, fs_info->remap_root, &key, path, 0, 0);
5132 	if (ret < 0)
5133 		return ret;
5134 
5135 	leaf = path->nodes[0];
5136 	if (path->slots[0] == 0)
5137 		return -ENOENT;
5138 
5139 	path->slots[0]--;
5140 
5141 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5142 
5143 	if (found_key.type != BTRFS_REMAP_KEY &&
5144 	    found_key.type != BTRFS_IDENTITY_REMAP_KEY) {
5145 		return -ENOENT;
5146 	}
5147 
5148 	if (found_key.objectid > *logical ||
5149 	    found_key.objectid + found_key.offset <= *logical) {
5150 		return -ENOENT;
5151 	}
5152 
5153 	if (*logical + *length > found_key.objectid + found_key.offset)
5154 		*length = found_key.objectid + found_key.offset - *logical;
5155 
5156 	if (found_key.type == BTRFS_IDENTITY_REMAP_KEY)
5157 		return 0;
5158 
5159 	remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
5160 	*logical += btrfs_remap_address(leaf, remap) - found_key.objectid;
5161 
5162 	return 0;
5163 }
5164 
5165 static int start_block_group_remapping(struct btrfs_fs_info *fs_info,
5166 				       struct btrfs_path *path,
5167 				       struct btrfs_block_group *bg)
5168 {
5169 	struct btrfs_trans_handle *trans;
5170 	bool bg_already_dirty = true;
5171 	int ret, ret2;
5172 
5173 	ret = btrfs_cache_block_group(bg, true);
5174 	if (ret)
5175 		return ret;
5176 
5177 	trans = btrfs_start_transaction(fs_info->remap_root, 0);
5178 	if (IS_ERR(trans))
5179 		return PTR_ERR(trans);
5180 
5181 	/* We need to run delayed refs, to make sure FST is up to date. */
5182 	ret = btrfs_run_delayed_refs(trans, U64_MAX);
5183 	if (ret) {
5184 		btrfs_end_transaction(trans);
5185 		return ret;
5186 	}
5187 
5188 	mutex_lock(&fs_info->remap_mutex);
5189 
5190 	if (bg->flags & BTRFS_BLOCK_GROUP_REMAPPED) {
5191 		ret = 0;
5192 		goto end;
5193 	}
5194 
5195 	ret = create_remap_tree_entries(trans, path, bg);
5196 	if (unlikely(ret)) {
5197 		btrfs_abort_transaction(trans, ret);
5198 		goto end;
5199 	}
5200 
5201 	spin_lock(&bg->lock);
5202 	bg->flags |= BTRFS_BLOCK_GROUP_REMAPPED;
5203 	spin_unlock(&bg->lock);
5204 
5205 	spin_lock(&trans->transaction->dirty_bgs_lock);
5206 	if (list_empty(&bg->dirty_list)) {
5207 		list_add_tail(&bg->dirty_list, &trans->transaction->dirty_bgs);
5208 		bg_already_dirty = false;
5209 		btrfs_get_block_group(bg);
5210 	}
5211 	spin_unlock(&trans->transaction->dirty_bgs_lock);
5212 
5213 	/* Modified block groups are accounted for in the delayed_refs_rsv. */
5214 	if (!bg_already_dirty)
5215 		btrfs_inc_delayed_refs_rsv_bg_updates(fs_info);
5216 
5217 	ret = mark_chunk_remapped(trans, path, bg->start);
5218 	if (unlikely(ret)) {
5219 		btrfs_abort_transaction(trans, ret);
5220 		goto end;
5221 	}
5222 
5223 	ret = btrfs_remove_block_group_free_space(trans, bg);
5224 	if (unlikely(ret)) {
5225 		btrfs_abort_transaction(trans, ret);
5226 		goto end;
5227 	}
5228 
5229 	btrfs_remove_free_space_cache(bg);
5230 
5231 end:
5232 	mutex_unlock(&fs_info->remap_mutex);
5233 
5234 	ret2 = btrfs_end_transaction(trans);
5235 	if (!ret)
5236 		ret = ret2;
5237 
5238 	return ret;
5239 }
5240 
5241 static int do_nonremap_reloc(struct btrfs_fs_info *fs_info, bool verbose,
5242 			     struct reloc_control *rc)
5243 {
5244 	int ret;
5245 
5246 	while (1) {
5247 		enum reloc_stage finishes_stage;
5248 
5249 		mutex_lock(&fs_info->cleaner_mutex);
5250 		ret = relocate_block_group(rc);
5251 		mutex_unlock(&fs_info->cleaner_mutex);
5252 
5253 		finishes_stage = rc->stage;
5254 		/*
5255 		 * We may have gotten ENOSPC after we already dirtied some
5256 		 * extents.  If writeout happens while we're relocating a
5257 		 * different block group we could end up hitting the
5258 		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
5259 		 * btrfs_reloc_cow_block.  Make sure we write everything out
5260 		 * properly so we don't trip over this problem, and then break
5261 		 * out of the loop if we hit an error.
5262 		 */
5263 		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
5264 			int wb_ret;
5265 
5266 			wb_ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode),
5267 							  0, (u64)-1);
5268 			if (wb_ret && ret == 0)
5269 				ret = wb_ret;
5270 			invalidate_mapping_pages(rc->data_inode->i_mapping, 0, -1);
5271 			rc->stage = UPDATE_DATA_PTRS;
5272 		}
5273 
5274 		if (ret < 0)
5275 			return ret;
5276 
5277 		if (rc->extents_found == 0)
5278 			break;
5279 
5280 		if (verbose)
5281 			btrfs_info(fs_info, "found %llu extents, stage: %s",
5282 				   rc->extents_found, stage_to_string(finishes_stage));
5283 	}
5284 
5285 	WARN_ON(rc->block_group->pinned > 0);
5286 	WARN_ON(rc->block_group->reserved > 0);
5287 	WARN_ON(rc->block_group->used > 0);
5288 
5289 	return 0;
5290 }
5291 
5292 /*
5293  * function to relocate all extents in a block group.
5294  */
5295 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start,
5296 			       bool verbose)
5297 {
5298 	struct btrfs_block_group *bg;
5299 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
5300 	struct reloc_control *rc;
5301 	struct inode *inode;
5302 	struct btrfs_path *path = NULL;
5303 	int ret;
5304 	bool bg_is_ro = false;
5305 
5306 	if (unlikely(!extent_root)) {
5307 		btrfs_err(fs_info,
5308 			  "missing extent root for block group at offset %llu",
5309 			  group_start);
5310 		return -EUCLEAN;
5311 	}
5312 
5313 	/*
5314 	 * This only gets set if we had a half-deleted snapshot on mount.  We
5315 	 * cannot allow relocation to start while we're still trying to clean up
5316 	 * these pending deletions.
5317 	 */
5318 	ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
5319 	if (ret)
5320 		return ret;
5321 
5322 	/* We may have been woken up by close_ctree, so bail if we're closing. */
5323 	if (btrfs_fs_closing(fs_info))
5324 		return -EINTR;
5325 
5326 	bg = btrfs_lookup_block_group(fs_info, group_start);
5327 	if (!bg)
5328 		return -ENOENT;
5329 
5330 	/*
5331 	 * Relocation of a data block group creates ordered extents.  Without
5332 	 * sb_start_write(), we can freeze the filesystem while unfinished
5333 	 * ordered extents are left. Such ordered extents can cause a deadlock
5334 	 * e.g. when syncfs() is waiting for their completion but they can't
5335 	 * finish because they block when joining a transaction, due to the
5336 	 * fact that the freeze locks are being held in write mode.
5337 	 */
5338 	if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
5339 		ASSERT(sb_write_started(fs_info->sb));
5340 
5341 	if (btrfs_pinned_by_swapfile(fs_info, bg)) {
5342 		btrfs_put_block_group(bg);
5343 		return -ETXTBSY;
5344 	}
5345 
5346 	rc = alloc_reloc_control(fs_info);
5347 	if (!rc) {
5348 		btrfs_put_block_group(bg);
5349 		return -ENOMEM;
5350 	}
5351 
5352 	ret = reloc_chunk_start(fs_info);
5353 	if (ret < 0)
5354 		goto out_put_bg;
5355 
5356 	rc->extent_root = extent_root;
5357 	rc->block_group = bg;
5358 
5359 	ret = btrfs_inc_block_group_ro(rc->block_group, true);
5360 	if (ret)
5361 		goto out;
5362 	bg_is_ro = true;
5363 
5364 	path = btrfs_alloc_path();
5365 	if (!path) {
5366 		ret = -ENOMEM;
5367 		goto out;
5368 	}
5369 
5370 	inode = lookup_free_space_inode(rc->block_group, path);
5371 	btrfs_release_path(path);
5372 
5373 	if (!IS_ERR(inode))
5374 		ret = delete_block_group_cache(rc->block_group, inode, 0);
5375 	else
5376 		ret = PTR_ERR(inode);
5377 
5378 	if (ret && ret != -ENOENT)
5379 		goto out;
5380 
5381 	if (!btrfs_fs_incompat(fs_info, REMAP_TREE)) {
5382 		rc->data_inode = create_reloc_inode(rc->block_group);
5383 		if (IS_ERR(rc->data_inode)) {
5384 			ret = PTR_ERR(rc->data_inode);
5385 			rc->data_inode = NULL;
5386 			goto out;
5387 		}
5388 	}
5389 
5390 	if (verbose)
5391 		describe_relocation(rc->block_group);
5392 
5393 	btrfs_wait_block_group_reservations(rc->block_group);
5394 	btrfs_wait_nocow_writers(rc->block_group);
5395 	btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
5396 
5397 	ret = btrfs_zone_finish(rc->block_group);
5398 	WARN_ON(ret && ret != -EAGAIN);
5399 
5400 	if (should_relocate_using_remap_tree(bg)) {
5401 		if (bg->remap_bytes != 0) {
5402 			ret = move_existing_remaps(fs_info, bg, path);
5403 			if (ret)
5404 				goto out;
5405 		}
5406 		ret = start_block_group_remapping(fs_info, path, bg);
5407 		if (ret)
5408 			goto out;
5409 
5410 		ret = do_remap_reloc(fs_info, path, rc->block_group);
5411 		if (ret)
5412 			goto out;
5413 
5414 		btrfs_delete_unused_bgs(fs_info);
5415 	} else {
5416 		ret = do_nonremap_reloc(fs_info, verbose, rc);
5417 	}
5418 
5419 out:
5420 	if (ret && bg_is_ro)
5421 		btrfs_dec_block_group_ro(rc->block_group);
5422 	if (!btrfs_fs_incompat(fs_info, REMAP_TREE))
5423 		iput(rc->data_inode);
5424 	btrfs_free_path(path);
5425 	reloc_chunk_end(fs_info);
5426 out_put_bg:
5427 	btrfs_put_block_group(bg);
5428 	free_reloc_control(rc);
5429 	return ret;
5430 }
5431 
5432 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
5433 {
5434 	struct btrfs_fs_info *fs_info = root->fs_info;
5435 	struct btrfs_trans_handle *trans;
5436 	int ret, err;
5437 
5438 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
5439 	if (IS_ERR(trans))
5440 		return PTR_ERR(trans);
5441 
5442 	memset(&root->root_item.drop_progress, 0,
5443 		sizeof(root->root_item.drop_progress));
5444 	btrfs_set_root_drop_level(&root->root_item, 0);
5445 	btrfs_set_root_refs(&root->root_item, 0);
5446 	ret = btrfs_update_root(trans, fs_info->tree_root,
5447 				&root->root_key, &root->root_item);
5448 
5449 	err = btrfs_end_transaction(trans);
5450 	if (err)
5451 		return err;
5452 	return ret;
5453 }
5454 
5455 /*
5456  * recover relocation interrupted by system crash.
5457  *
5458  * this function resumes merging reloc trees with corresponding fs trees.
5459  * this is important for keeping the sharing of tree blocks
5460  */
5461 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
5462 {
5463 	LIST_HEAD(reloc_roots);
5464 	struct btrfs_key key;
5465 	struct btrfs_root *fs_root;
5466 	struct btrfs_root *reloc_root;
5467 	struct btrfs_path *path;
5468 	struct extent_buffer *leaf;
5469 	struct reloc_control *rc = NULL;
5470 	struct btrfs_trans_handle *trans;
5471 	int ret2;
5472 	int ret = 0;
5473 
5474 	path = btrfs_alloc_path();
5475 	if (!path)
5476 		return -ENOMEM;
5477 	path->reada = READA_BACK;
5478 
5479 	key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5480 	key.type = BTRFS_ROOT_ITEM_KEY;
5481 	key.offset = (u64)-1;
5482 
5483 	while (1) {
5484 		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
5485 					path, 0, 0);
5486 		if (ret < 0)
5487 			goto out;
5488 		if (ret > 0) {
5489 			if (path->slots[0] == 0)
5490 				break;
5491 			path->slots[0]--;
5492 		}
5493 		ret = 0;
5494 		leaf = path->nodes[0];
5495 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5496 		btrfs_release_path(path);
5497 
5498 		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
5499 		    key.type != BTRFS_ROOT_ITEM_KEY)
5500 			break;
5501 
5502 		reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
5503 		if (IS_ERR(reloc_root)) {
5504 			ret = PTR_ERR(reloc_root);
5505 			goto out;
5506 		}
5507 
5508 		set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
5509 		list_add(&reloc_root->root_list, &reloc_roots);
5510 
5511 		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
5512 			fs_root = btrfs_get_fs_root(fs_info,
5513 					reloc_root->root_key.offset, false);
5514 			if (IS_ERR(fs_root)) {
5515 				ret = PTR_ERR(fs_root);
5516 				if (ret != -ENOENT)
5517 					goto out;
5518 				ret = mark_garbage_root(reloc_root);
5519 				if (ret < 0)
5520 					goto out;
5521 				ret = 0;
5522 			} else {
5523 				btrfs_put_root(fs_root);
5524 			}
5525 		}
5526 
5527 		if (key.offset == 0)
5528 			break;
5529 
5530 		key.offset--;
5531 	}
5532 	btrfs_release_path(path);
5533 
5534 	if (list_empty(&reloc_roots))
5535 		goto out;
5536 
5537 	rc = alloc_reloc_control(fs_info);
5538 	if (!rc) {
5539 		ret = -ENOMEM;
5540 		goto out;
5541 	}
5542 
5543 	rc->extent_root = btrfs_extent_root(fs_info, 0);
5544 	if (unlikely(!rc->extent_root)) {
5545 		btrfs_err(fs_info, "missing extent root for extent at bytenr 0");
5546 		ret = -EUCLEAN;
5547 		goto out;
5548 	}
5549 
5550 	ret = reloc_chunk_start(fs_info);
5551 	if (ret < 0)
5552 		goto out_end;
5553 
5554 	set_reloc_control(rc);
5555 
5556 	trans = btrfs_join_transaction(rc->extent_root);
5557 	if (IS_ERR(trans)) {
5558 		ret = PTR_ERR(trans);
5559 		goto out_unset;
5560 	}
5561 
5562 	rc->merge_reloc_tree = true;
5563 
5564 	while (!list_empty(&reloc_roots)) {
5565 		reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
5566 		list_del(&reloc_root->root_list);
5567 
5568 		if (btrfs_root_refs(&reloc_root->root_item) == 0) {
5569 			list_add_tail(&reloc_root->root_list,
5570 				      &rc->reloc_roots);
5571 			continue;
5572 		}
5573 
5574 		fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
5575 					    false);
5576 		if (IS_ERR(fs_root)) {
5577 			ret = PTR_ERR(fs_root);
5578 			list_add_tail(&reloc_root->root_list, &reloc_roots);
5579 			btrfs_end_transaction(trans);
5580 			goto out_unset;
5581 		}
5582 
5583 		ret = __add_reloc_root(reloc_root);
5584 		ASSERT(ret != -EEXIST);
5585 		if (ret) {
5586 			list_add_tail(&reloc_root->root_list, &reloc_roots);
5587 			btrfs_put_root(fs_root);
5588 			btrfs_end_transaction(trans);
5589 			goto out_unset;
5590 		}
5591 		fs_root->reloc_root = btrfs_grab_root(reloc_root);
5592 		btrfs_put_root(fs_root);
5593 	}
5594 
5595 	ret = btrfs_commit_transaction(trans);
5596 	if (ret)
5597 		goto out_unset;
5598 
5599 	merge_reloc_roots(rc);
5600 
5601 	unset_reloc_control(rc);
5602 
5603 	trans = btrfs_join_transaction(rc->extent_root);
5604 	if (IS_ERR(trans)) {
5605 		ret = PTR_ERR(trans);
5606 		goto out_clean;
5607 	}
5608 	ret = btrfs_commit_transaction(trans);
5609 out_clean:
5610 	ret2 = clean_dirty_subvols(rc);
5611 	if (ret2 < 0 && !ret)
5612 		ret = ret2;
5613 out_unset:
5614 	unset_reloc_control(rc);
5615 	reloc_chunk_end(fs_info);
5616 out_end:
5617 	free_reloc_control(rc);
5618 out:
5619 	free_reloc_roots(&reloc_roots);
5620 
5621 	btrfs_free_path(path);
5622 
5623 	if (ret == 0 && !btrfs_fs_incompat(fs_info, REMAP_TREE)) {
5624 		/* cleanup orphan inode in data relocation tree */
5625 		fs_root = btrfs_grab_root(fs_info->data_reloc_root);
5626 		ASSERT(fs_root);
5627 		ret = btrfs_orphan_cleanup(fs_root);
5628 		btrfs_put_root(fs_root);
5629 	}
5630 	return ret;
5631 }
5632 
5633 /*
5634  * helper to add ordered checksum for data relocation.
5635  *
5636  * cloning checksum properly handles the nodatasum extents.
5637  * it also saves CPU time to re-calculate the checksum.
5638  */
5639 int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
5640 {
5641 	struct btrfs_inode *inode = ordered->inode;
5642 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
5643 	u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
5644 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
5645 	LIST_HEAD(list);
5646 	int ret;
5647 
5648 	if (unlikely(!csum_root)) {
5649 		btrfs_mark_ordered_extent_error(ordered);
5650 		btrfs_err(fs_info,
5651 			  "missing csum root for extent at bytenr %llu",
5652 			  disk_bytenr);
5653 		return -EUCLEAN;
5654 	}
5655 
5656 	ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
5657 				      disk_bytenr + ordered->num_bytes - 1,
5658 				      &list, false);
5659 	if (ret < 0) {
5660 		btrfs_mark_ordered_extent_error(ordered);
5661 		return ret;
5662 	}
5663 
5664 	while (!list_empty(&list)) {
5665 		struct btrfs_ordered_sum *sums =
5666 			list_first_entry(&list, struct btrfs_ordered_sum, list);
5667 
5668 		list_del_init(&sums->list);
5669 
5670 		/*
5671 		 * We need to offset the new_bytenr based on where the csum is.
5672 		 * We need to do this because we will read in entire prealloc
5673 		 * extents but we may have written to say the middle of the
5674 		 * prealloc extent, so we need to make sure the csum goes with
5675 		 * the right disk offset.
5676 		 *
5677 		 * We can do this because the data reloc inode refers strictly
5678 		 * to the on disk bytes, so we don't have to worry about
5679 		 * disk_len vs real len like with real inodes since it's all
5680 		 * disk length.
5681 		 */
5682 		sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
5683 		btrfs_add_ordered_sum(ordered, sums);
5684 	}
5685 
5686 	return 0;
5687 }
5688 
5689 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
5690 			  struct btrfs_root *root,
5691 			  const struct extent_buffer *buf,
5692 			  struct extent_buffer *cow)
5693 {
5694 	struct btrfs_fs_info *fs_info = root->fs_info;
5695 	struct reloc_control *rc;
5696 	struct btrfs_backref_node *node;
5697 	int first_cow = 0;
5698 	int level;
5699 	int ret = 0;
5700 
5701 	rc = fs_info->reloc_ctl;
5702 	if (!rc)
5703 		return 0;
5704 
5705 	BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
5706 
5707 	level = btrfs_header_level(buf);
5708 	if (btrfs_header_generation(buf) <=
5709 	    btrfs_root_last_snapshot(&root->root_item))
5710 		first_cow = 1;
5711 
5712 	if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
5713 		WARN_ON(!first_cow && level == 0);
5714 
5715 		node = rc->backref_cache.path[level];
5716 
5717 		/*
5718 		 * If node->bytenr != buf->start and node->new_bytenr !=
5719 		 * buf->start then we've got the wrong backref node for what we
5720 		 * expected to see here and the cache is incorrect.
5721 		 */
5722 		if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
5723 			btrfs_err(fs_info,
5724 "bytenr %llu was found but our backref cache was expecting %llu or %llu",
5725 				  buf->start, node->bytenr, node->new_bytenr);
5726 			return -EUCLEAN;
5727 		}
5728 
5729 		btrfs_backref_drop_node_buffer(node);
5730 		refcount_inc(&cow->refs);
5731 		node->eb = cow;
5732 		node->new_bytenr = cow->start;
5733 
5734 		if (!node->pending) {
5735 			list_move_tail(&node->list,
5736 				       &rc->backref_cache.pending[level]);
5737 			node->pending = 1;
5738 		}
5739 
5740 		if (first_cow)
5741 			mark_block_processed(rc, node);
5742 
5743 		if (first_cow && level > 0)
5744 			rc->nodes_relocated += buf->len;
5745 	}
5746 
5747 	if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
5748 		ret = replace_file_extents(trans, rc, root, cow);
5749 	return ret;
5750 }
5751 
5752 /*
5753  * called before creating snapshot. it calculates metadata reservation
5754  * required for relocating tree blocks in the snapshot
5755  */
5756 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
5757 			      u64 *bytes_to_reserve)
5758 {
5759 	struct btrfs_root *root = pending->root;
5760 	struct reloc_control *rc = root->fs_info->reloc_ctl;
5761 
5762 	if (!rc || !have_reloc_root(root))
5763 		return;
5764 
5765 	if (!rc->merge_reloc_tree)
5766 		return;
5767 
5768 	root = root->reloc_root;
5769 	BUG_ON(btrfs_root_refs(&root->root_item) == 0);
5770 	/*
5771 	 * relocation is in the stage of merging trees. the space
5772 	 * used by merging a reloc tree is twice the size of
5773 	 * relocated tree nodes in the worst case. half for cowing
5774 	 * the reloc tree, half for cowing the fs tree. the space
5775 	 * used by cowing the reloc tree will be freed after the
5776 	 * tree is dropped. if we create snapshot, cowing the fs
5777 	 * tree may use more space than it frees. so we need
5778 	 * reserve extra space.
5779 	 */
5780 	*bytes_to_reserve += rc->nodes_relocated;
5781 }
5782 
5783 /*
5784  * called after snapshot is created. migrate block reservation
5785  * and create reloc root for the newly created snapshot
5786  *
5787  * This is similar to btrfs_init_reloc_root(), we come out of here with two
5788  * references held on the reloc_root, one for root->reloc_root and one for
5789  * rc->reloc_roots.
5790  */
5791 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
5792 			       struct btrfs_pending_snapshot *pending)
5793 {
5794 	struct btrfs_root *root = pending->root;
5795 	struct btrfs_root *reloc_root;
5796 	struct btrfs_root *new_root;
5797 	struct reloc_control *rc = root->fs_info->reloc_ctl;
5798 	int ret;
5799 
5800 	if (!rc || !have_reloc_root(root))
5801 		return 0;
5802 
5803 	rc = root->fs_info->reloc_ctl;
5804 	rc->merging_rsv_size += rc->nodes_relocated;
5805 
5806 	if (rc->merge_reloc_tree) {
5807 		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
5808 					      rc->block_rsv,
5809 					      rc->nodes_relocated, true);
5810 		if (ret)
5811 			return ret;
5812 	}
5813 
5814 	new_root = pending->snap;
5815 	reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
5816 	if (IS_ERR(reloc_root))
5817 		return PTR_ERR(reloc_root);
5818 
5819 	ret = __add_reloc_root(reloc_root);
5820 	ASSERT(ret != -EEXIST);
5821 	if (ret) {
5822 		/* Pairs with create_reloc_root */
5823 		btrfs_put_root(reloc_root);
5824 		return ret;
5825 	}
5826 	new_root->reloc_root = btrfs_grab_root(reloc_root);
5827 	return 0;
5828 }
5829 
5830 /*
5831  * Get the current bytenr for the block group which is being relocated.
5832  *
5833  * Return U64_MAX if no running relocation.
5834  */
5835 u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
5836 {
5837 	u64 logical = U64_MAX;
5838 
5839 	lockdep_assert_held(&fs_info->reloc_mutex);
5840 
5841 	if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
5842 		logical = fs_info->reloc_ctl->block_group->start;
5843 	return logical;
5844 }
5845 
5846 static int insert_remap_item(struct btrfs_trans_handle *trans, struct btrfs_path *path,
5847 			     u64 old_addr, u64 length, u64 new_addr)
5848 {
5849 	int ret;
5850 	struct btrfs_fs_info *fs_info = trans->fs_info;
5851 	struct btrfs_key key;
5852 	struct btrfs_remap_item remap = { 0 };
5853 
5854 	if (old_addr == new_addr) {
5855 		/* Add new identity remap item. */
5856 		key.objectid = old_addr;
5857 		key.type = BTRFS_IDENTITY_REMAP_KEY;
5858 		key.offset = length;
5859 
5860 		ret = btrfs_insert_empty_item(trans, fs_info->remap_root, path,
5861 					      &key, 0);
5862 		if (ret)
5863 			return ret;
5864 	} else {
5865 		/* Add new remap item. */
5866 		key.objectid = old_addr;
5867 		key.type = BTRFS_REMAP_KEY;
5868 		key.offset = length;
5869 
5870 		ret = btrfs_insert_empty_item(trans, fs_info->remap_root,
5871 					      path, &key, sizeof(struct btrfs_remap_item));
5872 		if (ret)
5873 			return ret;
5874 
5875 		btrfs_set_stack_remap_address(&remap, new_addr);
5876 
5877 		write_extent_buffer(path->nodes[0], &remap,
5878 			btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
5879 			sizeof(struct btrfs_remap_item));
5880 
5881 		btrfs_release_path(path);
5882 
5883 		/* Add new backref item. */
5884 		key.objectid = new_addr;
5885 		key.type = BTRFS_REMAP_BACKREF_KEY;
5886 		key.offset = length;
5887 
5888 		ret = btrfs_insert_empty_item(trans, fs_info->remap_root,
5889 					      path, &key,
5890 					      sizeof(struct btrfs_remap_item));
5891 		if (ret)
5892 			return ret;
5893 
5894 		btrfs_set_stack_remap_address(&remap, old_addr);
5895 
5896 		write_extent_buffer(path->nodes[0], &remap,
5897 			btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
5898 			sizeof(struct btrfs_remap_item));
5899 	}
5900 
5901 	btrfs_release_path(path);
5902 
5903 	return 0;
5904 }
5905 
5906 /*
5907  * Punch a hole in the remap item or identity remap item pointed to by path,
5908  * for the range [hole_start, hole_start + hole_length).
5909  */
5910 static int remove_range_from_remap_tree(struct btrfs_trans_handle *trans,
5911 					struct btrfs_path *path,
5912 					struct btrfs_block_group *bg,
5913 					u64 hole_start, u64 hole_length)
5914 {
5915 	int ret;
5916 	struct btrfs_fs_info *fs_info = trans->fs_info;
5917 	struct extent_buffer *leaf = path->nodes[0];
5918 	struct btrfs_key key;
5919 	u64 hole_end, new_addr, remap_start, remap_length, remap_end;
5920 	u64 overlap_length;
5921 	bool is_identity_remap;
5922 	int identity_count_delta = 0;
5923 
5924 	hole_end = hole_start + hole_length;
5925 
5926 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5927 
5928 	is_identity_remap = (key.type == BTRFS_IDENTITY_REMAP_KEY);
5929 
5930 	remap_start = key.objectid;
5931 	remap_length = key.offset;
5932 	remap_end = remap_start + remap_length;
5933 
5934 	if (is_identity_remap) {
5935 		new_addr = remap_start;
5936 	} else {
5937 		struct btrfs_remap_item *remap_ptr;
5938 
5939 		remap_ptr = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
5940 		new_addr = btrfs_remap_address(leaf, remap_ptr);
5941 	}
5942 
5943 	/* Delete old item. */
5944 	ret = btrfs_del_item(trans, fs_info->remap_root, path);
5945 	btrfs_release_path(path);
5946 	if (ret)
5947 		return ret;
5948 
5949 	if (is_identity_remap) {
5950 		identity_count_delta = -1;
5951 	} else {
5952 		/* Remove backref. */
5953 		key.objectid = new_addr;
5954 		key.type = BTRFS_REMAP_BACKREF_KEY;
5955 		key.offset = remap_length;
5956 
5957 		ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1);
5958 		if (ret) {
5959 			if (ret == 1) {
5960 				btrfs_release_path(path);
5961 				ret = -ENOENT;
5962 			}
5963 			return ret;
5964 		}
5965 
5966 		ret = btrfs_del_item(trans, fs_info->remap_root, path);
5967 
5968 		btrfs_release_path(path);
5969 
5970 		if (ret)
5971 			return ret;
5972 	}
5973 
5974 	/* If hole_start > remap_start, re-add the start of the remap item. */
5975 	if (hole_start > remap_start) {
5976 		ret = insert_remap_item(trans, path, remap_start,
5977 					hole_start - remap_start, new_addr);
5978 		if (ret)
5979 			return ret;
5980 
5981 		if (is_identity_remap)
5982 			identity_count_delta++;
5983 	}
5984 
5985 	/* If hole_end < remap_end, re-add the end of the remap item. */
5986 	if (hole_end < remap_end) {
5987 		ret = insert_remap_item(trans, path, hole_end,
5988 					remap_end - hole_end,
5989 					hole_end - remap_start + new_addr);
5990 		if (ret)
5991 			return ret;
5992 
5993 		if (is_identity_remap)
5994 			identity_count_delta++;
5995 	}
5996 
5997 	if (identity_count_delta != 0)
5998 		adjust_identity_remap_count(trans, bg, identity_count_delta);
5999 
6000 	overlap_length = min_t(u64, hole_end, remap_end) -
6001 			 max_t(u64, hole_start, remap_start);
6002 
6003 	if (!is_identity_remap) {
6004 		struct btrfs_block_group *dest_bg;
6005 
6006 		dest_bg = btrfs_lookup_block_group(fs_info, new_addr);
6007 		if (unlikely(!dest_bg))
6008 			return -EUCLEAN;
6009 
6010 		adjust_block_group_remap_bytes(trans, dest_bg, -overlap_length);
6011 		btrfs_put_block_group(dest_bg);
6012 		ret = btrfs_add_to_free_space_tree(trans,
6013 						   hole_start - remap_start + new_addr,
6014 						   overlap_length);
6015 		if (ret)
6016 			return ret;
6017 	}
6018 
6019 	ret = overlap_length;
6020 
6021 	return ret;
6022 }
6023 
6024 /*
6025  * Return 1 if remove_range_from_remap_tree() has been called successfully,
6026  * 0 if block group wasn't remapped, and a negative number on error.
6027  */
6028 int btrfs_remove_extent_from_remap_tree(struct btrfs_trans_handle *trans,
6029 					struct btrfs_path *path,
6030 					u64 bytenr, u64 num_bytes)
6031 {
6032 	struct btrfs_fs_info *fs_info = trans->fs_info;
6033 	struct btrfs_key key, found_key;
6034 	struct extent_buffer *leaf;
6035 	struct btrfs_block_group *bg;
6036 	int ret, length;
6037 
6038 	if (!(btrfs_super_incompat_flags(fs_info->super_copy) &
6039 	      BTRFS_FEATURE_INCOMPAT_REMAP_TREE))
6040 		return 0;
6041 
6042 	bg = btrfs_lookup_block_group(fs_info, bytenr);
6043 	if (!bg)
6044 		return 0;
6045 
6046 	mutex_lock(&fs_info->remap_mutex);
6047 
6048 	if (!(bg->flags & BTRFS_BLOCK_GROUP_REMAPPED)) {
6049 		mutex_unlock(&fs_info->remap_mutex);
6050 		btrfs_put_block_group(bg);
6051 		return 0;
6052 	}
6053 
6054 	do {
6055 		key.objectid = bytenr;
6056 		key.type = (u8)-1;
6057 		key.offset = (u64)-1;
6058 
6059 		ret = btrfs_search_slot(trans, fs_info->remap_root, &key, path, -1, 1);
6060 		if (ret < 0)
6061 			goto end;
6062 
6063 		leaf = path->nodes[0];
6064 		if (path->slots[0] == 0) {
6065 			ret = -ENOENT;
6066 			goto end;
6067 		}
6068 
6069 		path->slots[0]--;
6070 
6071 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6072 
6073 		if (found_key.type != BTRFS_IDENTITY_REMAP_KEY &&
6074 		    found_key.type != BTRFS_REMAP_KEY) {
6075 			ret = -ENOENT;
6076 			goto end;
6077 		}
6078 
6079 		if (bytenr < found_key.objectid ||
6080 		    bytenr >= found_key.objectid + found_key.offset) {
6081 			ret = -ENOENT;
6082 			goto end;
6083 		}
6084 
6085 		length = remove_range_from_remap_tree(trans, path, bg, bytenr, num_bytes);
6086 		if (length < 0) {
6087 			ret = length;
6088 			goto end;
6089 		}
6090 
6091 		bytenr += length;
6092 		num_bytes -= length;
6093 	} while (num_bytes > 0);
6094 
6095 	ret = 1;
6096 
6097 end:
6098 	mutex_unlock(&fs_info->remap_mutex);
6099 
6100 	btrfs_put_block_group(bg);
6101 	btrfs_release_path(path);
6102 
6103 	return ret;
6104 }
6105