1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "locking.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "qgroup.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
25 #include "backref.h"
26 #include "misc.h"
27 #include "subpage.h"
28 #include "zoned.h"
29 #include "inode-item.h"
30 #include "space-info.h"
31 #include "fs.h"
32 #include "accessors.h"
33 #include "extent-tree.h"
34 #include "root-tree.h"
35 #include "file-item.h"
36 #include "relocation.h"
37 #include "super.h"
38 #include "tree-checker.h"
39 #include "raid-stripe-tree.h"
40
41 /*
42 * Relocation overview
43 *
44 * [What does relocation do]
45 *
46 * The objective of relocation is to relocate all extents of the target block
47 * group to other block groups.
48 * This is utilized by resize (shrink only), profile converting, compacting
49 * space, or balance routine to spread chunks over devices.
50 *
51 * Before | After
52 * ------------------------------------------------------------------
53 * BG A: 10 data extents | BG A: deleted
54 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
55 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
56 *
57 * [How does relocation work]
58 *
59 * 1. Mark the target block group read-only
60 * New extents won't be allocated from the target block group.
61 *
62 * 2.1 Record each extent in the target block group
63 * To build a proper map of extents to be relocated.
64 *
65 * 2.2 Build data reloc tree and reloc trees
66 * Data reloc tree will contain an inode, recording all newly relocated
67 * data extents.
68 * There will be only one data reloc tree for one data block group.
69 *
70 * Reloc tree will be a special snapshot of its source tree, containing
71 * relocated tree blocks.
72 * Each tree referring to a tree block in target block group will get its
73 * reloc tree built.
74 *
75 * 2.3 Swap source tree with its corresponding reloc tree
76 * Each involved tree only refers to new extents after swap.
77 *
78 * 3. Cleanup reloc trees and data reloc tree.
79 * As old extents in the target block group are still referenced by reloc
80 * trees, we need to clean them up before really freeing the target block
81 * group.
82 *
83 * The main complexity is in steps 2.2 and 2.3.
84 *
85 * The entry point of relocation is relocate_block_group() function.
86 */
87
88 #define RELOCATION_RESERVED_NODES 256
89 /*
90 * map address of tree root to tree
91 */
92 struct mapping_node {
93 struct {
94 struct rb_node rb_node;
95 u64 bytenr;
96 }; /* Use rb_simle_node for search/insert */
97 void *data;
98 };
99
100 struct mapping_tree {
101 struct rb_root rb_root;
102 spinlock_t lock;
103 };
104
105 /*
106 * present a tree block to process
107 */
108 struct tree_block {
109 struct {
110 struct rb_node rb_node;
111 u64 bytenr;
112 }; /* Use rb_simple_node for search/insert */
113 u64 owner;
114 struct btrfs_key key;
115 u8 level;
116 bool key_ready;
117 };
118
119 #define MAX_EXTENTS 128
120
121 struct file_extent_cluster {
122 u64 start;
123 u64 end;
124 u64 boundary[MAX_EXTENTS];
125 unsigned int nr;
126 u64 owning_root;
127 };
128
129 /* Stages of data relocation. */
130 enum reloc_stage {
131 MOVE_DATA_EXTENTS,
132 UPDATE_DATA_PTRS
133 };
134
135 struct reloc_control {
136 /* block group to relocate */
137 struct btrfs_block_group *block_group;
138 /* extent tree */
139 struct btrfs_root *extent_root;
140 /* inode for moving data */
141 struct inode *data_inode;
142
143 struct btrfs_block_rsv *block_rsv;
144
145 struct btrfs_backref_cache backref_cache;
146
147 struct file_extent_cluster cluster;
148 /* tree blocks have been processed */
149 struct extent_io_tree processed_blocks;
150 /* map start of tree root to corresponding reloc tree */
151 struct mapping_tree reloc_root_tree;
152 /* list of reloc trees */
153 struct list_head reloc_roots;
154 /* list of subvolume trees that get relocated */
155 struct list_head dirty_subvol_roots;
156 /* size of metadata reservation for merging reloc trees */
157 u64 merging_rsv_size;
158 /* size of relocated tree nodes */
159 u64 nodes_relocated;
160 /* reserved size for block group relocation*/
161 u64 reserved_bytes;
162
163 u64 search_start;
164 u64 extents_found;
165
166 enum reloc_stage stage;
167 bool create_reloc_tree;
168 bool merge_reloc_tree;
169 bool found_file_extent;
170 };
171
mark_block_processed(struct reloc_control * rc,struct btrfs_backref_node * node)172 static void mark_block_processed(struct reloc_control *rc,
173 struct btrfs_backref_node *node)
174 {
175 u32 blocksize;
176
177 if (node->level == 0 ||
178 in_range(node->bytenr, rc->block_group->start,
179 rc->block_group->length)) {
180 blocksize = rc->extent_root->fs_info->nodesize;
181 set_extent_bit(&rc->processed_blocks, node->bytenr,
182 node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL);
183 }
184 node->processed = 1;
185 }
186
187 /*
188 * walk up backref nodes until reach node presents tree root
189 */
walk_up_backref(struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[],int * index)190 static struct btrfs_backref_node *walk_up_backref(
191 struct btrfs_backref_node *node,
192 struct btrfs_backref_edge *edges[], int *index)
193 {
194 struct btrfs_backref_edge *edge;
195 int idx = *index;
196
197 while (!list_empty(&node->upper)) {
198 edge = list_entry(node->upper.next,
199 struct btrfs_backref_edge, list[LOWER]);
200 edges[idx++] = edge;
201 node = edge->node[UPPER];
202 }
203 BUG_ON(node->detached);
204 *index = idx;
205 return node;
206 }
207
208 /*
209 * walk down backref nodes to find start of next reference path
210 */
walk_down_backref(struct btrfs_backref_edge * edges[],int * index)211 static struct btrfs_backref_node *walk_down_backref(
212 struct btrfs_backref_edge *edges[], int *index)
213 {
214 struct btrfs_backref_edge *edge;
215 struct btrfs_backref_node *lower;
216 int idx = *index;
217
218 while (idx > 0) {
219 edge = edges[idx - 1];
220 lower = edge->node[LOWER];
221 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
222 idx--;
223 continue;
224 }
225 edge = list_entry(edge->list[LOWER].next,
226 struct btrfs_backref_edge, list[LOWER]);
227 edges[idx - 1] = edge;
228 *index = idx;
229 return edge->node[UPPER];
230 }
231 *index = 0;
232 return NULL;
233 }
234
reloc_root_is_dead(const struct btrfs_root * root)235 static bool reloc_root_is_dead(const struct btrfs_root *root)
236 {
237 /*
238 * Pair with set_bit/clear_bit in clean_dirty_subvols and
239 * btrfs_update_reloc_root. We need to see the updated bit before
240 * trying to access reloc_root
241 */
242 smp_rmb();
243 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
244 return true;
245 return false;
246 }
247
248 /*
249 * Check if this subvolume tree has valid reloc tree.
250 *
251 * Reloc tree after swap is considered dead, thus not considered as valid.
252 * This is enough for most callers, as they don't distinguish dead reloc root
253 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
254 * special case.
255 */
have_reloc_root(const struct btrfs_root * root)256 static bool have_reloc_root(const struct btrfs_root *root)
257 {
258 if (reloc_root_is_dead(root))
259 return false;
260 if (!root->reloc_root)
261 return false;
262 return true;
263 }
264
btrfs_should_ignore_reloc_root(const struct btrfs_root * root)265 bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
266 {
267 struct btrfs_root *reloc_root;
268
269 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
270 return false;
271
272 /* This root has been merged with its reloc tree, we can ignore it */
273 if (reloc_root_is_dead(root))
274 return true;
275
276 reloc_root = root->reloc_root;
277 if (!reloc_root)
278 return false;
279
280 if (btrfs_header_generation(reloc_root->commit_root) ==
281 root->fs_info->running_transaction->transid)
282 return false;
283 /*
284 * If there is reloc tree and it was created in previous transaction
285 * backref lookup can find the reloc tree, so backref node for the fs
286 * tree root is useless for relocation.
287 */
288 return true;
289 }
290
291 /*
292 * find reloc tree by address of tree root
293 */
find_reloc_root(struct btrfs_fs_info * fs_info,u64 bytenr)294 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
295 {
296 struct reloc_control *rc = fs_info->reloc_ctl;
297 struct rb_node *rb_node;
298 struct mapping_node *node;
299 struct btrfs_root *root = NULL;
300
301 ASSERT(rc);
302 spin_lock(&rc->reloc_root_tree.lock);
303 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
304 if (rb_node) {
305 node = rb_entry(rb_node, struct mapping_node, rb_node);
306 root = node->data;
307 }
308 spin_unlock(&rc->reloc_root_tree.lock);
309 return btrfs_grab_root(root);
310 }
311
312 /*
313 * For useless nodes, do two major clean ups:
314 *
315 * - Cleanup the children edges and nodes
316 * If child node is also orphan (no parent) during cleanup, then the child
317 * node will also be cleaned up.
318 *
319 * - Freeing up leaves (level 0), keeps nodes detached
320 * For nodes, the node is still cached as "detached"
321 *
322 * Return false if @node is not in the @useless_nodes list.
323 * Return true if @node is in the @useless_nodes list.
324 */
handle_useless_nodes(struct reloc_control * rc,struct btrfs_backref_node * node)325 static bool handle_useless_nodes(struct reloc_control *rc,
326 struct btrfs_backref_node *node)
327 {
328 struct btrfs_backref_cache *cache = &rc->backref_cache;
329 struct list_head *useless_node = &cache->useless_node;
330 bool ret = false;
331
332 while (!list_empty(useless_node)) {
333 struct btrfs_backref_node *cur;
334
335 cur = list_first_entry(useless_node, struct btrfs_backref_node,
336 list);
337 list_del_init(&cur->list);
338
339 /* Only tree root nodes can be added to @useless_nodes */
340 ASSERT(list_empty(&cur->upper));
341
342 if (cur == node)
343 ret = true;
344
345 /* Cleanup the lower edges */
346 while (!list_empty(&cur->lower)) {
347 struct btrfs_backref_edge *edge;
348 struct btrfs_backref_node *lower;
349
350 edge = list_entry(cur->lower.next,
351 struct btrfs_backref_edge, list[UPPER]);
352 list_del(&edge->list[UPPER]);
353 list_del(&edge->list[LOWER]);
354 lower = edge->node[LOWER];
355 btrfs_backref_free_edge(cache, edge);
356
357 /* Child node is also orphan, queue for cleanup */
358 if (list_empty(&lower->upper))
359 list_add(&lower->list, useless_node);
360 }
361 /* Mark this block processed for relocation */
362 mark_block_processed(rc, cur);
363
364 /*
365 * Backref nodes for tree leaves are deleted from the cache.
366 * Backref nodes for upper level tree blocks are left in the
367 * cache to avoid unnecessary backref lookup.
368 */
369 if (cur->level > 0) {
370 cur->detached = 1;
371 } else {
372 rb_erase(&cur->rb_node, &cache->rb_root);
373 btrfs_backref_free_node(cache, cur);
374 }
375 }
376 return ret;
377 }
378
379 /*
380 * Build backref tree for a given tree block. Root of the backref tree
381 * corresponds the tree block, leaves of the backref tree correspond roots of
382 * b-trees that reference the tree block.
383 *
384 * The basic idea of this function is check backrefs of a given block to find
385 * upper level blocks that reference the block, and then check backrefs of
386 * these upper level blocks recursively. The recursion stops when tree root is
387 * reached or backrefs for the block is cached.
388 *
389 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
390 * all upper level blocks that directly/indirectly reference the block are also
391 * cached.
392 */
build_backref_tree(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_key * node_key,int level,u64 bytenr)393 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
394 struct btrfs_trans_handle *trans,
395 struct reloc_control *rc, struct btrfs_key *node_key,
396 int level, u64 bytenr)
397 {
398 struct btrfs_backref_iter *iter;
399 struct btrfs_backref_cache *cache = &rc->backref_cache;
400 /* For searching parent of TREE_BLOCK_REF */
401 struct btrfs_path *path;
402 struct btrfs_backref_node *cur;
403 struct btrfs_backref_node *node = NULL;
404 struct btrfs_backref_edge *edge;
405 int ret;
406
407 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
408 if (!iter)
409 return ERR_PTR(-ENOMEM);
410 path = btrfs_alloc_path();
411 if (!path) {
412 ret = -ENOMEM;
413 goto out;
414 }
415
416 node = btrfs_backref_alloc_node(cache, bytenr, level);
417 if (!node) {
418 ret = -ENOMEM;
419 goto out;
420 }
421
422 cur = node;
423
424 /* Breadth-first search to build backref cache */
425 do {
426 ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
427 node_key, cur);
428 if (ret < 0)
429 goto out;
430
431 edge = list_first_entry_or_null(&cache->pending_edge,
432 struct btrfs_backref_edge, list[UPPER]);
433 /*
434 * The pending list isn't empty, take the first block to
435 * process
436 */
437 if (edge) {
438 list_del_init(&edge->list[UPPER]);
439 cur = edge->node[UPPER];
440 }
441 } while (edge);
442
443 /* Finish the upper linkage of newly added edges/nodes */
444 ret = btrfs_backref_finish_upper_links(cache, node);
445 if (ret < 0)
446 goto out;
447
448 if (handle_useless_nodes(rc, node))
449 node = NULL;
450 out:
451 btrfs_free_path(iter->path);
452 kfree(iter);
453 btrfs_free_path(path);
454 if (ret) {
455 btrfs_backref_error_cleanup(cache, node);
456 return ERR_PTR(ret);
457 }
458 ASSERT(!node || !node->detached);
459 ASSERT(list_empty(&cache->useless_node) &&
460 list_empty(&cache->pending_edge));
461 return node;
462 }
463
464 /*
465 * helper to add 'address of tree root -> reloc tree' mapping
466 */
__add_reloc_root(struct btrfs_root * root)467 static int __add_reloc_root(struct btrfs_root *root)
468 {
469 struct btrfs_fs_info *fs_info = root->fs_info;
470 struct rb_node *rb_node;
471 struct mapping_node *node;
472 struct reloc_control *rc = fs_info->reloc_ctl;
473
474 node = kmalloc(sizeof(*node), GFP_NOFS);
475 if (!node)
476 return -ENOMEM;
477
478 node->bytenr = root->commit_root->start;
479 node->data = root;
480
481 spin_lock(&rc->reloc_root_tree.lock);
482 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
483 node->bytenr, &node->rb_node);
484 spin_unlock(&rc->reloc_root_tree.lock);
485 if (rb_node) {
486 btrfs_err(fs_info,
487 "Duplicate root found for start=%llu while inserting into relocation tree",
488 node->bytenr);
489 return -EEXIST;
490 }
491
492 list_add_tail(&root->root_list, &rc->reloc_roots);
493 return 0;
494 }
495
496 /*
497 * helper to delete the 'address of tree root -> reloc tree'
498 * mapping
499 */
__del_reloc_root(struct btrfs_root * root)500 static void __del_reloc_root(struct btrfs_root *root)
501 {
502 struct btrfs_fs_info *fs_info = root->fs_info;
503 struct rb_node *rb_node;
504 struct mapping_node *node = NULL;
505 struct reloc_control *rc = fs_info->reloc_ctl;
506 bool put_ref = false;
507
508 if (rc && root->node) {
509 spin_lock(&rc->reloc_root_tree.lock);
510 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
511 root->commit_root->start);
512 if (rb_node) {
513 node = rb_entry(rb_node, struct mapping_node, rb_node);
514 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
515 RB_CLEAR_NODE(&node->rb_node);
516 }
517 spin_unlock(&rc->reloc_root_tree.lock);
518 ASSERT(!node || (struct btrfs_root *)node->data == root);
519 }
520
521 /*
522 * We only put the reloc root here if it's on the list. There's a lot
523 * of places where the pattern is to splice the rc->reloc_roots, process
524 * the reloc roots, and then add the reloc root back onto
525 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
526 * list we don't want the reference being dropped, because the guy
527 * messing with the list is in charge of the reference.
528 */
529 spin_lock(&fs_info->trans_lock);
530 if (!list_empty(&root->root_list)) {
531 put_ref = true;
532 list_del_init(&root->root_list);
533 }
534 spin_unlock(&fs_info->trans_lock);
535 if (put_ref)
536 btrfs_put_root(root);
537 kfree(node);
538 }
539
540 /*
541 * helper to update the 'address of tree root -> reloc tree'
542 * mapping
543 */
__update_reloc_root(struct btrfs_root * root)544 static int __update_reloc_root(struct btrfs_root *root)
545 {
546 struct btrfs_fs_info *fs_info = root->fs_info;
547 struct rb_node *rb_node;
548 struct mapping_node *node = NULL;
549 struct reloc_control *rc = fs_info->reloc_ctl;
550
551 spin_lock(&rc->reloc_root_tree.lock);
552 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
553 root->commit_root->start);
554 if (rb_node) {
555 node = rb_entry(rb_node, struct mapping_node, rb_node);
556 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
557 }
558 spin_unlock(&rc->reloc_root_tree.lock);
559
560 if (!node)
561 return 0;
562 BUG_ON((struct btrfs_root *)node->data != root);
563
564 spin_lock(&rc->reloc_root_tree.lock);
565 node->bytenr = root->node->start;
566 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
567 node->bytenr, &node->rb_node);
568 spin_unlock(&rc->reloc_root_tree.lock);
569 if (rb_node)
570 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
571 return 0;
572 }
573
create_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)574 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
575 struct btrfs_root *root, u64 objectid)
576 {
577 struct btrfs_fs_info *fs_info = root->fs_info;
578 struct btrfs_root *reloc_root;
579 struct extent_buffer *eb;
580 struct btrfs_root_item *root_item;
581 struct btrfs_key root_key;
582 int ret = 0;
583 bool must_abort = false;
584
585 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
586 if (!root_item)
587 return ERR_PTR(-ENOMEM);
588
589 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
590 root_key.type = BTRFS_ROOT_ITEM_KEY;
591 root_key.offset = objectid;
592
593 if (btrfs_root_id(root) == objectid) {
594 u64 commit_root_gen;
595
596 /* called by btrfs_init_reloc_root */
597 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
598 BTRFS_TREE_RELOC_OBJECTID);
599 if (ret)
600 goto fail;
601
602 /*
603 * Set the last_snapshot field to the generation of the commit
604 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
605 * correctly (returns true) when the relocation root is created
606 * either inside the critical section of a transaction commit
607 * (through transaction.c:qgroup_account_snapshot()) and when
608 * it's created before the transaction commit is started.
609 */
610 commit_root_gen = btrfs_header_generation(root->commit_root);
611 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
612 } else {
613 /*
614 * called by btrfs_reloc_post_snapshot_hook.
615 * the source tree is a reloc tree, all tree blocks
616 * modified after it was created have RELOC flag
617 * set in their headers. so it's OK to not update
618 * the 'last_snapshot'.
619 */
620 ret = btrfs_copy_root(trans, root, root->node, &eb,
621 BTRFS_TREE_RELOC_OBJECTID);
622 if (ret)
623 goto fail;
624 }
625
626 /*
627 * We have changed references at this point, we must abort the
628 * transaction if anything fails.
629 */
630 must_abort = true;
631
632 memcpy(root_item, &root->root_item, sizeof(*root_item));
633 btrfs_set_root_bytenr(root_item, eb->start);
634 btrfs_set_root_level(root_item, btrfs_header_level(eb));
635 btrfs_set_root_generation(root_item, trans->transid);
636
637 if (btrfs_root_id(root) == objectid) {
638 btrfs_set_root_refs(root_item, 0);
639 memset(&root_item->drop_progress, 0,
640 sizeof(struct btrfs_disk_key));
641 btrfs_set_root_drop_level(root_item, 0);
642 }
643
644 btrfs_tree_unlock(eb);
645 free_extent_buffer(eb);
646
647 ret = btrfs_insert_root(trans, fs_info->tree_root,
648 &root_key, root_item);
649 if (ret)
650 goto fail;
651
652 kfree(root_item);
653
654 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
655 if (IS_ERR(reloc_root)) {
656 ret = PTR_ERR(reloc_root);
657 goto abort;
658 }
659 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
660 btrfs_set_root_last_trans(reloc_root, trans->transid);
661 return reloc_root;
662 fail:
663 kfree(root_item);
664 abort:
665 if (must_abort)
666 btrfs_abort_transaction(trans, ret);
667 return ERR_PTR(ret);
668 }
669
670 /*
671 * create reloc tree for a given fs tree. reloc tree is just a
672 * snapshot of the fs tree with special root objectid.
673 *
674 * The reloc_root comes out of here with two references, one for
675 * root->reloc_root, and another for being on the rc->reloc_roots list.
676 */
btrfs_init_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)677 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
678 struct btrfs_root *root)
679 {
680 struct btrfs_fs_info *fs_info = root->fs_info;
681 struct btrfs_root *reloc_root;
682 struct reloc_control *rc = fs_info->reloc_ctl;
683 struct btrfs_block_rsv *rsv;
684 int clear_rsv = 0;
685 int ret;
686
687 if (!rc)
688 return 0;
689
690 /*
691 * The subvolume has reloc tree but the swap is finished, no need to
692 * create/update the dead reloc tree
693 */
694 if (reloc_root_is_dead(root))
695 return 0;
696
697 /*
698 * This is subtle but important. We do not do
699 * record_root_in_transaction for reloc roots, instead we record their
700 * corresponding fs root, and then here we update the last trans for the
701 * reloc root. This means that we have to do this for the entire life
702 * of the reloc root, regardless of which stage of the relocation we are
703 * in.
704 */
705 if (root->reloc_root) {
706 reloc_root = root->reloc_root;
707 btrfs_set_root_last_trans(reloc_root, trans->transid);
708 return 0;
709 }
710
711 /*
712 * We are merging reloc roots, we do not need new reloc trees. Also
713 * reloc trees never need their own reloc tree.
714 */
715 if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
716 return 0;
717
718 if (!trans->reloc_reserved) {
719 rsv = trans->block_rsv;
720 trans->block_rsv = rc->block_rsv;
721 clear_rsv = 1;
722 }
723 reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
724 if (clear_rsv)
725 trans->block_rsv = rsv;
726 if (IS_ERR(reloc_root))
727 return PTR_ERR(reloc_root);
728
729 ret = __add_reloc_root(reloc_root);
730 ASSERT(ret != -EEXIST);
731 if (ret) {
732 /* Pairs with create_reloc_root */
733 btrfs_put_root(reloc_root);
734 return ret;
735 }
736 root->reloc_root = btrfs_grab_root(reloc_root);
737 return 0;
738 }
739
740 /*
741 * update root item of reloc tree
742 */
btrfs_update_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)743 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
744 struct btrfs_root *root)
745 {
746 struct btrfs_fs_info *fs_info = root->fs_info;
747 struct btrfs_root *reloc_root;
748 struct btrfs_root_item *root_item;
749 int ret;
750
751 if (!have_reloc_root(root))
752 return 0;
753
754 reloc_root = root->reloc_root;
755 root_item = &reloc_root->root_item;
756
757 /*
758 * We are probably ok here, but __del_reloc_root() will drop its ref of
759 * the root. We have the ref for root->reloc_root, but just in case
760 * hold it while we update the reloc root.
761 */
762 btrfs_grab_root(reloc_root);
763
764 /* root->reloc_root will stay until current relocation finished */
765 if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
766 btrfs_root_refs(root_item) == 0) {
767 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
768 /*
769 * Mark the tree as dead before we change reloc_root so
770 * have_reloc_root will not touch it from now on.
771 */
772 smp_wmb();
773 __del_reloc_root(reloc_root);
774 }
775
776 if (reloc_root->commit_root != reloc_root->node) {
777 __update_reloc_root(reloc_root);
778 btrfs_set_root_node(root_item, reloc_root->node);
779 free_extent_buffer(reloc_root->commit_root);
780 reloc_root->commit_root = btrfs_root_node(reloc_root);
781 }
782
783 ret = btrfs_update_root(trans, fs_info->tree_root,
784 &reloc_root->root_key, root_item);
785 btrfs_put_root(reloc_root);
786 return ret;
787 }
788
789 /*
790 * get new location of data
791 */
get_new_location(struct inode * reloc_inode,u64 * new_bytenr,u64 bytenr,u64 num_bytes)792 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
793 u64 bytenr, u64 num_bytes)
794 {
795 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
796 struct btrfs_path *path;
797 struct btrfs_file_extent_item *fi;
798 struct extent_buffer *leaf;
799 int ret;
800
801 path = btrfs_alloc_path();
802 if (!path)
803 return -ENOMEM;
804
805 bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
806 ret = btrfs_lookup_file_extent(NULL, root, path,
807 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
808 if (ret < 0)
809 goto out;
810 if (ret > 0) {
811 ret = -ENOENT;
812 goto out;
813 }
814
815 leaf = path->nodes[0];
816 fi = btrfs_item_ptr(leaf, path->slots[0],
817 struct btrfs_file_extent_item);
818
819 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
820 btrfs_file_extent_compression(leaf, fi) ||
821 btrfs_file_extent_encryption(leaf, fi) ||
822 btrfs_file_extent_other_encoding(leaf, fi));
823
824 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
825 ret = -EINVAL;
826 goto out;
827 }
828
829 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
830 ret = 0;
831 out:
832 btrfs_free_path(path);
833 return ret;
834 }
835
836 /*
837 * update file extent items in the tree leaf to point to
838 * the new locations.
839 */
840 static noinline_for_stack
replace_file_extents(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root,struct extent_buffer * leaf)841 int replace_file_extents(struct btrfs_trans_handle *trans,
842 struct reloc_control *rc,
843 struct btrfs_root *root,
844 struct extent_buffer *leaf)
845 {
846 struct btrfs_fs_info *fs_info = root->fs_info;
847 struct btrfs_key key;
848 struct btrfs_file_extent_item *fi;
849 struct btrfs_inode *inode = NULL;
850 u64 parent;
851 u64 bytenr;
852 u64 new_bytenr = 0;
853 u64 num_bytes;
854 u64 end;
855 u32 nritems;
856 u32 i;
857 int ret = 0;
858 int first = 1;
859
860 if (rc->stage != UPDATE_DATA_PTRS)
861 return 0;
862
863 /* reloc trees always use full backref */
864 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
865 parent = leaf->start;
866 else
867 parent = 0;
868
869 nritems = btrfs_header_nritems(leaf);
870 for (i = 0; i < nritems; i++) {
871 struct btrfs_ref ref = { 0 };
872
873 cond_resched();
874 btrfs_item_key_to_cpu(leaf, &key, i);
875 if (key.type != BTRFS_EXTENT_DATA_KEY)
876 continue;
877 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
878 if (btrfs_file_extent_type(leaf, fi) ==
879 BTRFS_FILE_EXTENT_INLINE)
880 continue;
881 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
882 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
883 if (bytenr == 0)
884 continue;
885 if (!in_range(bytenr, rc->block_group->start,
886 rc->block_group->length))
887 continue;
888
889 /*
890 * if we are modifying block in fs tree, wait for read_folio
891 * to complete and drop the extent cache
892 */
893 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
894 if (first) {
895 inode = btrfs_find_first_inode(root, key.objectid);
896 first = 0;
897 } else if (inode && btrfs_ino(inode) < key.objectid) {
898 btrfs_add_delayed_iput(inode);
899 inode = btrfs_find_first_inode(root, key.objectid);
900 }
901 if (inode && btrfs_ino(inode) == key.objectid) {
902 struct extent_state *cached_state = NULL;
903
904 end = key.offset +
905 btrfs_file_extent_num_bytes(leaf, fi);
906 WARN_ON(!IS_ALIGNED(key.offset,
907 fs_info->sectorsize));
908 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
909 end--;
910 /* Take mmap lock to serialize with reflinks. */
911 if (!down_read_trylock(&inode->i_mmap_lock))
912 continue;
913 ret = try_lock_extent(&inode->io_tree, key.offset,
914 end, &cached_state);
915 if (!ret) {
916 up_read(&inode->i_mmap_lock);
917 continue;
918 }
919
920 btrfs_drop_extent_map_range(inode, key.offset, end, true);
921 unlock_extent(&inode->io_tree, key.offset, end,
922 &cached_state);
923 up_read(&inode->i_mmap_lock);
924 }
925 }
926
927 ret = get_new_location(rc->data_inode, &new_bytenr,
928 bytenr, num_bytes);
929 if (ret) {
930 /*
931 * Don't have to abort since we've not changed anything
932 * in the file extent yet.
933 */
934 break;
935 }
936
937 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
938
939 key.offset -= btrfs_file_extent_offset(leaf, fi);
940 ref.action = BTRFS_ADD_DELAYED_REF;
941 ref.bytenr = new_bytenr;
942 ref.num_bytes = num_bytes;
943 ref.parent = parent;
944 ref.owning_root = btrfs_root_id(root);
945 ref.ref_root = btrfs_header_owner(leaf);
946 btrfs_init_data_ref(&ref, key.objectid, key.offset,
947 btrfs_root_id(root), false);
948 ret = btrfs_inc_extent_ref(trans, &ref);
949 if (ret) {
950 btrfs_abort_transaction(trans, ret);
951 break;
952 }
953
954 ref.action = BTRFS_DROP_DELAYED_REF;
955 ref.bytenr = bytenr;
956 ref.num_bytes = num_bytes;
957 ref.parent = parent;
958 ref.owning_root = btrfs_root_id(root);
959 ref.ref_root = btrfs_header_owner(leaf);
960 btrfs_init_data_ref(&ref, key.objectid, key.offset,
961 btrfs_root_id(root), false);
962 ret = btrfs_free_extent(trans, &ref);
963 if (ret) {
964 btrfs_abort_transaction(trans, ret);
965 break;
966 }
967 }
968 if (inode)
969 btrfs_add_delayed_iput(inode);
970 return ret;
971 }
972
memcmp_node_keys(const struct extent_buffer * eb,int slot,const struct btrfs_path * path,int level)973 static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
974 int slot, const struct btrfs_path *path,
975 int level)
976 {
977 struct btrfs_disk_key key1;
978 struct btrfs_disk_key key2;
979 btrfs_node_key(eb, &key1, slot);
980 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
981 return memcmp(&key1, &key2, sizeof(key1));
982 }
983
984 /*
985 * try to replace tree blocks in fs tree with the new blocks
986 * in reloc tree. tree blocks haven't been modified since the
987 * reloc tree was create can be replaced.
988 *
989 * if a block was replaced, level of the block + 1 is returned.
990 * if no block got replaced, 0 is returned. if there are other
991 * errors, a negative error number is returned.
992 */
993 static noinline_for_stack
replace_path(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * dest,struct btrfs_root * src,struct btrfs_path * path,struct btrfs_key * next_key,int lowest_level,int max_level)994 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
995 struct btrfs_root *dest, struct btrfs_root *src,
996 struct btrfs_path *path, struct btrfs_key *next_key,
997 int lowest_level, int max_level)
998 {
999 struct btrfs_fs_info *fs_info = dest->fs_info;
1000 struct extent_buffer *eb;
1001 struct extent_buffer *parent;
1002 struct btrfs_ref ref = { 0 };
1003 struct btrfs_key key;
1004 u64 old_bytenr;
1005 u64 new_bytenr;
1006 u64 old_ptr_gen;
1007 u64 new_ptr_gen;
1008 u64 last_snapshot;
1009 u32 blocksize;
1010 int cow = 0;
1011 int level;
1012 int ret;
1013 int slot;
1014
1015 ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
1016 ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
1017
1018 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1019 again:
1020 slot = path->slots[lowest_level];
1021 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1022
1023 eb = btrfs_lock_root_node(dest);
1024 level = btrfs_header_level(eb);
1025
1026 if (level < lowest_level) {
1027 btrfs_tree_unlock(eb);
1028 free_extent_buffer(eb);
1029 return 0;
1030 }
1031
1032 if (cow) {
1033 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1034 BTRFS_NESTING_COW);
1035 if (ret) {
1036 btrfs_tree_unlock(eb);
1037 free_extent_buffer(eb);
1038 return ret;
1039 }
1040 }
1041
1042 if (next_key) {
1043 next_key->objectid = (u64)-1;
1044 next_key->type = (u8)-1;
1045 next_key->offset = (u64)-1;
1046 }
1047
1048 parent = eb;
1049 while (1) {
1050 level = btrfs_header_level(parent);
1051 ASSERT(level >= lowest_level);
1052
1053 ret = btrfs_bin_search(parent, 0, &key, &slot);
1054 if (ret < 0)
1055 break;
1056 if (ret && slot > 0)
1057 slot--;
1058
1059 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1060 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1061
1062 old_bytenr = btrfs_node_blockptr(parent, slot);
1063 blocksize = fs_info->nodesize;
1064 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1065
1066 if (level <= max_level) {
1067 eb = path->nodes[level];
1068 new_bytenr = btrfs_node_blockptr(eb,
1069 path->slots[level]);
1070 new_ptr_gen = btrfs_node_ptr_generation(eb,
1071 path->slots[level]);
1072 } else {
1073 new_bytenr = 0;
1074 new_ptr_gen = 0;
1075 }
1076
1077 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1078 ret = level;
1079 break;
1080 }
1081
1082 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1083 memcmp_node_keys(parent, slot, path, level)) {
1084 if (level <= lowest_level) {
1085 ret = 0;
1086 break;
1087 }
1088
1089 eb = btrfs_read_node_slot(parent, slot);
1090 if (IS_ERR(eb)) {
1091 ret = PTR_ERR(eb);
1092 break;
1093 }
1094 btrfs_tree_lock(eb);
1095 if (cow) {
1096 ret = btrfs_cow_block(trans, dest, eb, parent,
1097 slot, &eb,
1098 BTRFS_NESTING_COW);
1099 if (ret) {
1100 btrfs_tree_unlock(eb);
1101 free_extent_buffer(eb);
1102 break;
1103 }
1104 }
1105
1106 btrfs_tree_unlock(parent);
1107 free_extent_buffer(parent);
1108
1109 parent = eb;
1110 continue;
1111 }
1112
1113 if (!cow) {
1114 btrfs_tree_unlock(parent);
1115 free_extent_buffer(parent);
1116 cow = 1;
1117 goto again;
1118 }
1119
1120 btrfs_node_key_to_cpu(path->nodes[level], &key,
1121 path->slots[level]);
1122 btrfs_release_path(path);
1123
1124 path->lowest_level = level;
1125 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1126 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1127 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1128 path->lowest_level = 0;
1129 if (ret) {
1130 if (ret > 0)
1131 ret = -ENOENT;
1132 break;
1133 }
1134
1135 /*
1136 * Info qgroup to trace both subtrees.
1137 *
1138 * We must trace both trees.
1139 * 1) Tree reloc subtree
1140 * If not traced, we will leak data numbers
1141 * 2) Fs subtree
1142 * If not traced, we will double count old data
1143 *
1144 * We don't scan the subtree right now, but only record
1145 * the swapped tree blocks.
1146 * The real subtree rescan is delayed until we have new
1147 * CoW on the subtree root node before transaction commit.
1148 */
1149 ret = btrfs_qgroup_add_swapped_blocks(dest,
1150 rc->block_group, parent, slot,
1151 path->nodes[level], path->slots[level],
1152 last_snapshot);
1153 if (ret < 0)
1154 break;
1155 /*
1156 * swap blocks in fs tree and reloc tree.
1157 */
1158 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1159 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1160
1161 btrfs_set_node_blockptr(path->nodes[level],
1162 path->slots[level], old_bytenr);
1163 btrfs_set_node_ptr_generation(path->nodes[level],
1164 path->slots[level], old_ptr_gen);
1165
1166 ref.action = BTRFS_ADD_DELAYED_REF;
1167 ref.bytenr = old_bytenr;
1168 ref.num_bytes = blocksize;
1169 ref.parent = path->nodes[level]->start;
1170 ref.owning_root = btrfs_root_id(src);
1171 ref.ref_root = btrfs_root_id(src);
1172 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1173 ret = btrfs_inc_extent_ref(trans, &ref);
1174 if (ret) {
1175 btrfs_abort_transaction(trans, ret);
1176 break;
1177 }
1178
1179 ref.action = BTRFS_ADD_DELAYED_REF;
1180 ref.bytenr = new_bytenr;
1181 ref.num_bytes = blocksize;
1182 ref.parent = 0;
1183 ref.owning_root = btrfs_root_id(dest);
1184 ref.ref_root = btrfs_root_id(dest);
1185 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1186 ret = btrfs_inc_extent_ref(trans, &ref);
1187 if (ret) {
1188 btrfs_abort_transaction(trans, ret);
1189 break;
1190 }
1191
1192 /* We don't know the real owning_root, use 0. */
1193 ref.action = BTRFS_DROP_DELAYED_REF;
1194 ref.bytenr = new_bytenr;
1195 ref.num_bytes = blocksize;
1196 ref.parent = path->nodes[level]->start;
1197 ref.owning_root = 0;
1198 ref.ref_root = btrfs_root_id(src);
1199 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1200 ret = btrfs_free_extent(trans, &ref);
1201 if (ret) {
1202 btrfs_abort_transaction(trans, ret);
1203 break;
1204 }
1205
1206 /* We don't know the real owning_root, use 0. */
1207 ref.action = BTRFS_DROP_DELAYED_REF;
1208 ref.bytenr = old_bytenr;
1209 ref.num_bytes = blocksize;
1210 ref.parent = 0;
1211 ref.owning_root = 0;
1212 ref.ref_root = btrfs_root_id(dest);
1213 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1214 ret = btrfs_free_extent(trans, &ref);
1215 if (ret) {
1216 btrfs_abort_transaction(trans, ret);
1217 break;
1218 }
1219
1220 btrfs_unlock_up_safe(path, 0);
1221
1222 ret = level;
1223 break;
1224 }
1225 btrfs_tree_unlock(parent);
1226 free_extent_buffer(parent);
1227 return ret;
1228 }
1229
1230 /*
1231 * helper to find next relocated block in reloc tree
1232 */
1233 static noinline_for_stack
walk_up_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1234 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1235 int *level)
1236 {
1237 struct extent_buffer *eb;
1238 int i;
1239 u64 last_snapshot;
1240 u32 nritems;
1241
1242 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1243
1244 for (i = 0; i < *level; i++) {
1245 free_extent_buffer(path->nodes[i]);
1246 path->nodes[i] = NULL;
1247 }
1248
1249 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1250 eb = path->nodes[i];
1251 nritems = btrfs_header_nritems(eb);
1252 while (path->slots[i] + 1 < nritems) {
1253 path->slots[i]++;
1254 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1255 last_snapshot)
1256 continue;
1257
1258 *level = i;
1259 return 0;
1260 }
1261 free_extent_buffer(path->nodes[i]);
1262 path->nodes[i] = NULL;
1263 }
1264 return 1;
1265 }
1266
1267 /*
1268 * walk down reloc tree to find relocated block of lowest level
1269 */
1270 static noinline_for_stack
walk_down_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1271 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1272 int *level)
1273 {
1274 struct extent_buffer *eb = NULL;
1275 int i;
1276 u64 ptr_gen = 0;
1277 u64 last_snapshot;
1278 u32 nritems;
1279
1280 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1281
1282 for (i = *level; i > 0; i--) {
1283 eb = path->nodes[i];
1284 nritems = btrfs_header_nritems(eb);
1285 while (path->slots[i] < nritems) {
1286 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1287 if (ptr_gen > last_snapshot)
1288 break;
1289 path->slots[i]++;
1290 }
1291 if (path->slots[i] >= nritems) {
1292 if (i == *level)
1293 break;
1294 *level = i + 1;
1295 return 0;
1296 }
1297 if (i == 1) {
1298 *level = i;
1299 return 0;
1300 }
1301
1302 eb = btrfs_read_node_slot(eb, path->slots[i]);
1303 if (IS_ERR(eb))
1304 return PTR_ERR(eb);
1305 BUG_ON(btrfs_header_level(eb) != i - 1);
1306 path->nodes[i - 1] = eb;
1307 path->slots[i - 1] = 0;
1308 }
1309 return 1;
1310 }
1311
1312 /*
1313 * invalidate extent cache for file extents whose key in range of
1314 * [min_key, max_key)
1315 */
invalidate_extent_cache(struct btrfs_root * root,const struct btrfs_key * min_key,const struct btrfs_key * max_key)1316 static int invalidate_extent_cache(struct btrfs_root *root,
1317 const struct btrfs_key *min_key,
1318 const struct btrfs_key *max_key)
1319 {
1320 struct btrfs_fs_info *fs_info = root->fs_info;
1321 struct btrfs_inode *inode = NULL;
1322 u64 objectid;
1323 u64 start, end;
1324 u64 ino;
1325
1326 objectid = min_key->objectid;
1327 while (1) {
1328 struct extent_state *cached_state = NULL;
1329
1330 cond_resched();
1331 if (inode)
1332 iput(&inode->vfs_inode);
1333
1334 if (objectid > max_key->objectid)
1335 break;
1336
1337 inode = btrfs_find_first_inode(root, objectid);
1338 if (!inode)
1339 break;
1340 ino = btrfs_ino(inode);
1341
1342 if (ino > max_key->objectid) {
1343 iput(&inode->vfs_inode);
1344 break;
1345 }
1346
1347 objectid = ino + 1;
1348 if (!S_ISREG(inode->vfs_inode.i_mode))
1349 continue;
1350
1351 if (unlikely(min_key->objectid == ino)) {
1352 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1353 continue;
1354 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1355 start = 0;
1356 else {
1357 start = min_key->offset;
1358 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1359 }
1360 } else {
1361 start = 0;
1362 }
1363
1364 if (unlikely(max_key->objectid == ino)) {
1365 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1366 continue;
1367 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1368 end = (u64)-1;
1369 } else {
1370 if (max_key->offset == 0)
1371 continue;
1372 end = max_key->offset;
1373 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1374 end--;
1375 }
1376 } else {
1377 end = (u64)-1;
1378 }
1379
1380 /* the lock_extent waits for read_folio to complete */
1381 lock_extent(&inode->io_tree, start, end, &cached_state);
1382 btrfs_drop_extent_map_range(inode, start, end, true);
1383 unlock_extent(&inode->io_tree, start, end, &cached_state);
1384 }
1385 return 0;
1386 }
1387
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)1388 static int find_next_key(struct btrfs_path *path, int level,
1389 struct btrfs_key *key)
1390
1391 {
1392 while (level < BTRFS_MAX_LEVEL) {
1393 if (!path->nodes[level])
1394 break;
1395 if (path->slots[level] + 1 <
1396 btrfs_header_nritems(path->nodes[level])) {
1397 btrfs_node_key_to_cpu(path->nodes[level], key,
1398 path->slots[level] + 1);
1399 return 0;
1400 }
1401 level++;
1402 }
1403 return 1;
1404 }
1405
1406 /*
1407 * Insert current subvolume into reloc_control::dirty_subvol_roots
1408 */
insert_dirty_subvol(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root)1409 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1410 struct reloc_control *rc,
1411 struct btrfs_root *root)
1412 {
1413 struct btrfs_root *reloc_root = root->reloc_root;
1414 struct btrfs_root_item *reloc_root_item;
1415 int ret;
1416
1417 /* @root must be a subvolume tree root with a valid reloc tree */
1418 ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
1419 ASSERT(reloc_root);
1420
1421 reloc_root_item = &reloc_root->root_item;
1422 memset(&reloc_root_item->drop_progress, 0,
1423 sizeof(reloc_root_item->drop_progress));
1424 btrfs_set_root_drop_level(reloc_root_item, 0);
1425 btrfs_set_root_refs(reloc_root_item, 0);
1426 ret = btrfs_update_reloc_root(trans, root);
1427 if (ret)
1428 return ret;
1429
1430 if (list_empty(&root->reloc_dirty_list)) {
1431 btrfs_grab_root(root);
1432 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1433 }
1434
1435 return 0;
1436 }
1437
clean_dirty_subvols(struct reloc_control * rc)1438 static int clean_dirty_subvols(struct reloc_control *rc)
1439 {
1440 struct btrfs_root *root;
1441 struct btrfs_root *next;
1442 int ret = 0;
1443 int ret2;
1444
1445 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1446 reloc_dirty_list) {
1447 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
1448 /* Merged subvolume, cleanup its reloc root */
1449 struct btrfs_root *reloc_root = root->reloc_root;
1450
1451 list_del_init(&root->reloc_dirty_list);
1452 root->reloc_root = NULL;
1453 /*
1454 * Need barrier to ensure clear_bit() only happens after
1455 * root->reloc_root = NULL. Pairs with have_reloc_root.
1456 */
1457 smp_wmb();
1458 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1459 if (reloc_root) {
1460 /*
1461 * btrfs_drop_snapshot drops our ref we hold for
1462 * ->reloc_root. If it fails however we must
1463 * drop the ref ourselves.
1464 */
1465 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1466 if (ret2 < 0) {
1467 btrfs_put_root(reloc_root);
1468 if (!ret)
1469 ret = ret2;
1470 }
1471 }
1472 btrfs_put_root(root);
1473 } else {
1474 /* Orphan reloc tree, just clean it up */
1475 ret2 = btrfs_drop_snapshot(root, 0, 1);
1476 if (ret2 < 0) {
1477 btrfs_put_root(root);
1478 if (!ret)
1479 ret = ret2;
1480 }
1481 }
1482 }
1483 return ret;
1484 }
1485
1486 /*
1487 * merge the relocated tree blocks in reloc tree with corresponding
1488 * fs tree.
1489 */
merge_reloc_root(struct reloc_control * rc,struct btrfs_root * root)1490 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1491 struct btrfs_root *root)
1492 {
1493 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1494 struct btrfs_key key;
1495 struct btrfs_key next_key;
1496 struct btrfs_trans_handle *trans = NULL;
1497 struct btrfs_root *reloc_root;
1498 struct btrfs_root_item *root_item;
1499 struct btrfs_path *path;
1500 struct extent_buffer *leaf;
1501 int reserve_level;
1502 int level;
1503 int max_level;
1504 int replaced = 0;
1505 int ret = 0;
1506 u32 min_reserved;
1507
1508 path = btrfs_alloc_path();
1509 if (!path)
1510 return -ENOMEM;
1511 path->reada = READA_FORWARD;
1512
1513 reloc_root = root->reloc_root;
1514 root_item = &reloc_root->root_item;
1515
1516 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1517 level = btrfs_root_level(root_item);
1518 atomic_inc(&reloc_root->node->refs);
1519 path->nodes[level] = reloc_root->node;
1520 path->slots[level] = 0;
1521 } else {
1522 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1523
1524 level = btrfs_root_drop_level(root_item);
1525 BUG_ON(level == 0);
1526 path->lowest_level = level;
1527 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1528 path->lowest_level = 0;
1529 if (ret < 0) {
1530 btrfs_free_path(path);
1531 return ret;
1532 }
1533
1534 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1535 path->slots[level]);
1536 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1537
1538 btrfs_unlock_up_safe(path, 0);
1539 }
1540
1541 /*
1542 * In merge_reloc_root(), we modify the upper level pointer to swap the
1543 * tree blocks between reloc tree and subvolume tree. Thus for tree
1544 * block COW, we COW at most from level 1 to root level for each tree.
1545 *
1546 * Thus the needed metadata size is at most root_level * nodesize,
1547 * and * 2 since we have two trees to COW.
1548 */
1549 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1550 min_reserved = fs_info->nodesize * reserve_level * 2;
1551 memset(&next_key, 0, sizeof(next_key));
1552
1553 while (1) {
1554 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1555 min_reserved,
1556 BTRFS_RESERVE_FLUSH_LIMIT);
1557 if (ret)
1558 goto out;
1559 trans = btrfs_start_transaction(root, 0);
1560 if (IS_ERR(trans)) {
1561 ret = PTR_ERR(trans);
1562 trans = NULL;
1563 goto out;
1564 }
1565
1566 /*
1567 * At this point we no longer have a reloc_control, so we can't
1568 * depend on btrfs_init_reloc_root to update our last_trans.
1569 *
1570 * But that's ok, we started the trans handle on our
1571 * corresponding fs_root, which means it's been added to the
1572 * dirty list. At commit time we'll still call
1573 * btrfs_update_reloc_root() and update our root item
1574 * appropriately.
1575 */
1576 btrfs_set_root_last_trans(reloc_root, trans->transid);
1577 trans->block_rsv = rc->block_rsv;
1578
1579 replaced = 0;
1580 max_level = level;
1581
1582 ret = walk_down_reloc_tree(reloc_root, path, &level);
1583 if (ret < 0)
1584 goto out;
1585 if (ret > 0)
1586 break;
1587
1588 if (!find_next_key(path, level, &key) &&
1589 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1590 ret = 0;
1591 } else {
1592 ret = replace_path(trans, rc, root, reloc_root, path,
1593 &next_key, level, max_level);
1594 }
1595 if (ret < 0)
1596 goto out;
1597 if (ret > 0) {
1598 level = ret;
1599 btrfs_node_key_to_cpu(path->nodes[level], &key,
1600 path->slots[level]);
1601 replaced = 1;
1602 }
1603
1604 ret = walk_up_reloc_tree(reloc_root, path, &level);
1605 if (ret > 0)
1606 break;
1607
1608 BUG_ON(level == 0);
1609 /*
1610 * save the merging progress in the drop_progress.
1611 * this is OK since root refs == 1 in this case.
1612 */
1613 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1614 path->slots[level]);
1615 btrfs_set_root_drop_level(root_item, level);
1616
1617 btrfs_end_transaction_throttle(trans);
1618 trans = NULL;
1619
1620 btrfs_btree_balance_dirty(fs_info);
1621
1622 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1623 invalidate_extent_cache(root, &key, &next_key);
1624 }
1625
1626 /*
1627 * handle the case only one block in the fs tree need to be
1628 * relocated and the block is tree root.
1629 */
1630 leaf = btrfs_lock_root_node(root);
1631 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1632 BTRFS_NESTING_COW);
1633 btrfs_tree_unlock(leaf);
1634 free_extent_buffer(leaf);
1635 out:
1636 btrfs_free_path(path);
1637
1638 if (ret == 0) {
1639 ret = insert_dirty_subvol(trans, rc, root);
1640 if (ret)
1641 btrfs_abort_transaction(trans, ret);
1642 }
1643
1644 if (trans)
1645 btrfs_end_transaction_throttle(trans);
1646
1647 btrfs_btree_balance_dirty(fs_info);
1648
1649 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1650 invalidate_extent_cache(root, &key, &next_key);
1651
1652 return ret;
1653 }
1654
1655 static noinline_for_stack
prepare_to_merge(struct reloc_control * rc,int err)1656 int prepare_to_merge(struct reloc_control *rc, int err)
1657 {
1658 struct btrfs_root *root = rc->extent_root;
1659 struct btrfs_fs_info *fs_info = root->fs_info;
1660 struct btrfs_root *reloc_root;
1661 struct btrfs_trans_handle *trans;
1662 LIST_HEAD(reloc_roots);
1663 u64 num_bytes = 0;
1664 int ret;
1665
1666 mutex_lock(&fs_info->reloc_mutex);
1667 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1668 rc->merging_rsv_size += rc->nodes_relocated * 2;
1669 mutex_unlock(&fs_info->reloc_mutex);
1670
1671 again:
1672 if (!err) {
1673 num_bytes = rc->merging_rsv_size;
1674 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1675 BTRFS_RESERVE_FLUSH_ALL);
1676 if (ret)
1677 err = ret;
1678 }
1679
1680 trans = btrfs_join_transaction(rc->extent_root);
1681 if (IS_ERR(trans)) {
1682 if (!err)
1683 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1684 num_bytes, NULL);
1685 return PTR_ERR(trans);
1686 }
1687
1688 if (!err) {
1689 if (num_bytes != rc->merging_rsv_size) {
1690 btrfs_end_transaction(trans);
1691 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1692 num_bytes, NULL);
1693 goto again;
1694 }
1695 }
1696
1697 rc->merge_reloc_tree = true;
1698
1699 while (!list_empty(&rc->reloc_roots)) {
1700 reloc_root = list_entry(rc->reloc_roots.next,
1701 struct btrfs_root, root_list);
1702 list_del_init(&reloc_root->root_list);
1703
1704 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1705 false);
1706 if (IS_ERR(root)) {
1707 /*
1708 * Even if we have an error we need this reloc root
1709 * back on our list so we can clean up properly.
1710 */
1711 list_add(&reloc_root->root_list, &reloc_roots);
1712 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1713 if (!err)
1714 err = PTR_ERR(root);
1715 break;
1716 }
1717
1718 if (unlikely(root->reloc_root != reloc_root)) {
1719 if (root->reloc_root) {
1720 btrfs_err(fs_info,
1721 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
1722 btrfs_root_id(root),
1723 btrfs_root_id(root->reloc_root),
1724 root->reloc_root->root_key.type,
1725 root->reloc_root->root_key.offset,
1726 btrfs_root_generation(
1727 &root->reloc_root->root_item),
1728 btrfs_root_id(reloc_root),
1729 reloc_root->root_key.type,
1730 reloc_root->root_key.offset,
1731 btrfs_root_generation(
1732 &reloc_root->root_item));
1733 } else {
1734 btrfs_err(fs_info,
1735 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
1736 btrfs_root_id(root),
1737 btrfs_root_id(reloc_root),
1738 reloc_root->root_key.type,
1739 reloc_root->root_key.offset,
1740 btrfs_root_generation(
1741 &reloc_root->root_item));
1742 }
1743 list_add(&reloc_root->root_list, &reloc_roots);
1744 btrfs_put_root(root);
1745 btrfs_abort_transaction(trans, -EUCLEAN);
1746 if (!err)
1747 err = -EUCLEAN;
1748 break;
1749 }
1750
1751 /*
1752 * set reference count to 1, so btrfs_recover_relocation
1753 * knows it should resumes merging
1754 */
1755 if (!err)
1756 btrfs_set_root_refs(&reloc_root->root_item, 1);
1757 ret = btrfs_update_reloc_root(trans, root);
1758
1759 /*
1760 * Even if we have an error we need this reloc root back on our
1761 * list so we can clean up properly.
1762 */
1763 list_add(&reloc_root->root_list, &reloc_roots);
1764 btrfs_put_root(root);
1765
1766 if (ret) {
1767 btrfs_abort_transaction(trans, ret);
1768 if (!err)
1769 err = ret;
1770 break;
1771 }
1772 }
1773
1774 list_splice(&reloc_roots, &rc->reloc_roots);
1775
1776 if (!err)
1777 err = btrfs_commit_transaction(trans);
1778 else
1779 btrfs_end_transaction(trans);
1780 return err;
1781 }
1782
1783 static noinline_for_stack
free_reloc_roots(struct list_head * list)1784 void free_reloc_roots(struct list_head *list)
1785 {
1786 struct btrfs_root *reloc_root, *tmp;
1787
1788 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1789 __del_reloc_root(reloc_root);
1790 }
1791
1792 static noinline_for_stack
merge_reloc_roots(struct reloc_control * rc)1793 void merge_reloc_roots(struct reloc_control *rc)
1794 {
1795 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1796 struct btrfs_root *root;
1797 struct btrfs_root *reloc_root;
1798 LIST_HEAD(reloc_roots);
1799 int found = 0;
1800 int ret = 0;
1801 again:
1802 root = rc->extent_root;
1803
1804 /*
1805 * this serializes us with btrfs_record_root_in_transaction,
1806 * we have to make sure nobody is in the middle of
1807 * adding their roots to the list while we are
1808 * doing this splice
1809 */
1810 mutex_lock(&fs_info->reloc_mutex);
1811 list_splice_init(&rc->reloc_roots, &reloc_roots);
1812 mutex_unlock(&fs_info->reloc_mutex);
1813
1814 while (!list_empty(&reloc_roots)) {
1815 found = 1;
1816 reloc_root = list_entry(reloc_roots.next,
1817 struct btrfs_root, root_list);
1818
1819 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1820 false);
1821 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1822 if (WARN_ON(IS_ERR(root))) {
1823 /*
1824 * For recovery we read the fs roots on mount,
1825 * and if we didn't find the root then we marked
1826 * the reloc root as a garbage root. For normal
1827 * relocation obviously the root should exist in
1828 * memory. However there's no reason we can't
1829 * handle the error properly here just in case.
1830 */
1831 ret = PTR_ERR(root);
1832 goto out;
1833 }
1834 if (WARN_ON(root->reloc_root != reloc_root)) {
1835 /*
1836 * This can happen if on-disk metadata has some
1837 * corruption, e.g. bad reloc tree key offset.
1838 */
1839 ret = -EINVAL;
1840 goto out;
1841 }
1842 ret = merge_reloc_root(rc, root);
1843 btrfs_put_root(root);
1844 if (ret) {
1845 if (list_empty(&reloc_root->root_list))
1846 list_add_tail(&reloc_root->root_list,
1847 &reloc_roots);
1848 goto out;
1849 }
1850 } else {
1851 if (!IS_ERR(root)) {
1852 if (root->reloc_root == reloc_root) {
1853 root->reloc_root = NULL;
1854 btrfs_put_root(reloc_root);
1855 }
1856 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
1857 &root->state);
1858 btrfs_put_root(root);
1859 }
1860
1861 list_del_init(&reloc_root->root_list);
1862 /* Don't forget to queue this reloc root for cleanup */
1863 list_add_tail(&reloc_root->reloc_dirty_list,
1864 &rc->dirty_subvol_roots);
1865 }
1866 }
1867
1868 if (found) {
1869 found = 0;
1870 goto again;
1871 }
1872 out:
1873 if (ret) {
1874 btrfs_handle_fs_error(fs_info, ret, NULL);
1875 free_reloc_roots(&reloc_roots);
1876
1877 /* new reloc root may be added */
1878 mutex_lock(&fs_info->reloc_mutex);
1879 list_splice_init(&rc->reloc_roots, &reloc_roots);
1880 mutex_unlock(&fs_info->reloc_mutex);
1881 free_reloc_roots(&reloc_roots);
1882 }
1883
1884 /*
1885 * We used to have
1886 *
1887 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
1888 *
1889 * here, but it's wrong. If we fail to start the transaction in
1890 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
1891 * have actually been removed from the reloc_root_tree rb tree. This is
1892 * fine because we're bailing here, and we hold a reference on the root
1893 * for the list that holds it, so these roots will be cleaned up when we
1894 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
1895 * will be cleaned up on unmount.
1896 *
1897 * The remaining nodes will be cleaned up by free_reloc_control.
1898 */
1899 }
1900
free_block_list(struct rb_root * blocks)1901 static void free_block_list(struct rb_root *blocks)
1902 {
1903 struct tree_block *block;
1904 struct rb_node *rb_node;
1905 while ((rb_node = rb_first(blocks))) {
1906 block = rb_entry(rb_node, struct tree_block, rb_node);
1907 rb_erase(rb_node, blocks);
1908 kfree(block);
1909 }
1910 }
1911
record_reloc_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * reloc_root)1912 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
1913 struct btrfs_root *reloc_root)
1914 {
1915 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
1916 struct btrfs_root *root;
1917 int ret;
1918
1919 if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
1920 return 0;
1921
1922 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
1923
1924 /*
1925 * This should succeed, since we can't have a reloc root without having
1926 * already looked up the actual root and created the reloc root for this
1927 * root.
1928 *
1929 * However if there's some sort of corruption where we have a ref to a
1930 * reloc root without a corresponding root this could return ENOENT.
1931 */
1932 if (IS_ERR(root)) {
1933 ASSERT(0);
1934 return PTR_ERR(root);
1935 }
1936 if (root->reloc_root != reloc_root) {
1937 ASSERT(0);
1938 btrfs_err(fs_info,
1939 "root %llu has two reloc roots associated with it",
1940 reloc_root->root_key.offset);
1941 btrfs_put_root(root);
1942 return -EUCLEAN;
1943 }
1944 ret = btrfs_record_root_in_trans(trans, root);
1945 btrfs_put_root(root);
1946
1947 return ret;
1948 }
1949
1950 static noinline_for_stack
select_reloc_root(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[])1951 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
1952 struct reloc_control *rc,
1953 struct btrfs_backref_node *node,
1954 struct btrfs_backref_edge *edges[])
1955 {
1956 struct btrfs_backref_node *next;
1957 struct btrfs_root *root;
1958 int index = 0;
1959 int ret;
1960
1961 next = walk_up_backref(node, edges, &index);
1962 root = next->root;
1963
1964 /*
1965 * If there is no root, then our references for this block are
1966 * incomplete, as we should be able to walk all the way up to a block
1967 * that is owned by a root.
1968 *
1969 * This path is only for SHAREABLE roots, so if we come upon a
1970 * non-SHAREABLE root then we have backrefs that resolve improperly.
1971 *
1972 * Both of these cases indicate file system corruption, or a bug in the
1973 * backref walking code.
1974 */
1975 if (unlikely(!root)) {
1976 btrfs_err(trans->fs_info,
1977 "bytenr %llu doesn't have a backref path ending in a root",
1978 node->bytenr);
1979 return ERR_PTR(-EUCLEAN);
1980 }
1981 if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
1982 btrfs_err(trans->fs_info,
1983 "bytenr %llu has multiple refs with one ending in a non-shareable root",
1984 node->bytenr);
1985 return ERR_PTR(-EUCLEAN);
1986 }
1987
1988 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
1989 ret = record_reloc_root_in_trans(trans, root);
1990 if (ret)
1991 return ERR_PTR(ret);
1992 goto found;
1993 }
1994
1995 ret = btrfs_record_root_in_trans(trans, root);
1996 if (ret)
1997 return ERR_PTR(ret);
1998 root = root->reloc_root;
1999
2000 /*
2001 * We could have raced with another thread which failed, so
2002 * root->reloc_root may not be set, return ENOENT in this case.
2003 */
2004 if (!root)
2005 return ERR_PTR(-ENOENT);
2006
2007 if (next->new_bytenr) {
2008 /*
2009 * We just created the reloc root, so we shouldn't have
2010 * ->new_bytenr set yet. If it is then we have multiple roots
2011 * pointing at the same bytenr which indicates corruption, or
2012 * we've made a mistake in the backref walking code.
2013 */
2014 ASSERT(next->new_bytenr == 0);
2015 btrfs_err(trans->fs_info,
2016 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2017 node->bytenr, next->bytenr);
2018 return ERR_PTR(-EUCLEAN);
2019 }
2020
2021 next->new_bytenr = root->node->start;
2022 btrfs_put_root(next->root);
2023 next->root = btrfs_grab_root(root);
2024 ASSERT(next->root);
2025 mark_block_processed(rc, next);
2026 found:
2027 next = node;
2028 /* setup backref node path for btrfs_reloc_cow_block */
2029 while (1) {
2030 rc->backref_cache.path[next->level] = next;
2031 if (--index < 0)
2032 break;
2033 next = edges[index]->node[UPPER];
2034 }
2035 return root;
2036 }
2037
2038 /*
2039 * Select a tree root for relocation.
2040 *
2041 * Return NULL if the block is not shareable. We should use do_relocation() in
2042 * this case.
2043 *
2044 * Return a tree root pointer if the block is shareable.
2045 * Return -ENOENT if the block is root of reloc tree.
2046 */
2047 static noinline_for_stack
select_one_root(struct btrfs_backref_node * node)2048 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2049 {
2050 struct btrfs_backref_node *next;
2051 struct btrfs_root *root;
2052 struct btrfs_root *fs_root = NULL;
2053 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2054 int index = 0;
2055
2056 next = node;
2057 while (1) {
2058 cond_resched();
2059 next = walk_up_backref(next, edges, &index);
2060 root = next->root;
2061
2062 /*
2063 * This can occur if we have incomplete extent refs leading all
2064 * the way up a particular path, in this case return -EUCLEAN.
2065 */
2066 if (!root)
2067 return ERR_PTR(-EUCLEAN);
2068
2069 /* No other choice for non-shareable tree */
2070 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2071 return root;
2072
2073 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
2074 fs_root = root;
2075
2076 if (next != node)
2077 return NULL;
2078
2079 next = walk_down_backref(edges, &index);
2080 if (!next || next->level <= node->level)
2081 break;
2082 }
2083
2084 if (!fs_root)
2085 return ERR_PTR(-ENOENT);
2086 return fs_root;
2087 }
2088
calcu_metadata_size(struct reloc_control * rc,struct btrfs_backref_node * node)2089 static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
2090 struct btrfs_backref_node *node)
2091 {
2092 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2093 struct btrfs_backref_node *next = node;
2094 struct btrfs_backref_edge *edge;
2095 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2096 u64 num_bytes = 0;
2097 int index = 0;
2098
2099 BUG_ON(node->processed);
2100
2101 while (next) {
2102 cond_resched();
2103 while (1) {
2104 if (next->processed)
2105 break;
2106
2107 num_bytes += fs_info->nodesize;
2108
2109 if (list_empty(&next->upper))
2110 break;
2111
2112 edge = list_entry(next->upper.next,
2113 struct btrfs_backref_edge, list[LOWER]);
2114 edges[index++] = edge;
2115 next = edge->node[UPPER];
2116 }
2117 next = walk_down_backref(edges, &index);
2118 }
2119 return num_bytes;
2120 }
2121
refill_metadata_space(struct btrfs_trans_handle * trans,struct reloc_control * rc,u64 num_bytes)2122 static int refill_metadata_space(struct btrfs_trans_handle *trans,
2123 struct reloc_control *rc, u64 num_bytes)
2124 {
2125 struct btrfs_fs_info *fs_info = trans->fs_info;
2126 int ret;
2127
2128 trans->block_rsv = rc->block_rsv;
2129 rc->reserved_bytes += num_bytes;
2130
2131 /*
2132 * We are under a transaction here so we can only do limited flushing.
2133 * If we get an enospc just kick back -EAGAIN so we know to drop the
2134 * transaction and try to refill when we can flush all the things.
2135 */
2136 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2137 BTRFS_RESERVE_FLUSH_LIMIT);
2138 if (ret) {
2139 u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2140
2141 while (tmp <= rc->reserved_bytes)
2142 tmp <<= 1;
2143 /*
2144 * only one thread can access block_rsv at this point,
2145 * so we don't need hold lock to protect block_rsv.
2146 * we expand more reservation size here to allow enough
2147 * space for relocation and we will return earlier in
2148 * enospc case.
2149 */
2150 rc->block_rsv->size = tmp + fs_info->nodesize *
2151 RELOCATION_RESERVED_NODES;
2152 return -EAGAIN;
2153 }
2154
2155 return 0;
2156 }
2157
reserve_metadata_space(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node)2158 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2159 struct reloc_control *rc,
2160 struct btrfs_backref_node *node)
2161 {
2162 u64 num_bytes;
2163
2164 num_bytes = calcu_metadata_size(rc, node) * 2;
2165 return refill_metadata_space(trans, rc, num_bytes);
2166 }
2167
2168 /*
2169 * relocate a block tree, and then update pointers in upper level
2170 * blocks that reference the block to point to the new location.
2171 *
2172 * if called by link_to_upper, the block has already been relocated.
2173 * in that case this function just updates pointers.
2174 */
do_relocation(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path,int lowest)2175 static int do_relocation(struct btrfs_trans_handle *trans,
2176 struct reloc_control *rc,
2177 struct btrfs_backref_node *node,
2178 struct btrfs_key *key,
2179 struct btrfs_path *path, int lowest)
2180 {
2181 struct btrfs_backref_node *upper;
2182 struct btrfs_backref_edge *edge;
2183 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2184 struct btrfs_root *root;
2185 struct extent_buffer *eb;
2186 u32 blocksize;
2187 u64 bytenr;
2188 int slot;
2189 int ret = 0;
2190
2191 /*
2192 * If we are lowest then this is the first time we're processing this
2193 * block, and thus shouldn't have an eb associated with it yet.
2194 */
2195 ASSERT(!lowest || !node->eb);
2196
2197 path->lowest_level = node->level + 1;
2198 rc->backref_cache.path[node->level] = node;
2199 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2200 cond_resched();
2201
2202 upper = edge->node[UPPER];
2203 root = select_reloc_root(trans, rc, upper, edges);
2204 if (IS_ERR(root)) {
2205 ret = PTR_ERR(root);
2206 goto next;
2207 }
2208
2209 if (upper->eb && !upper->locked) {
2210 if (!lowest) {
2211 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2212 if (ret < 0)
2213 goto next;
2214 BUG_ON(ret);
2215 bytenr = btrfs_node_blockptr(upper->eb, slot);
2216 if (node->eb->start == bytenr)
2217 goto next;
2218 }
2219 btrfs_backref_drop_node_buffer(upper);
2220 }
2221
2222 if (!upper->eb) {
2223 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2224 if (ret) {
2225 if (ret > 0)
2226 ret = -ENOENT;
2227
2228 btrfs_release_path(path);
2229 break;
2230 }
2231
2232 if (!upper->eb) {
2233 upper->eb = path->nodes[upper->level];
2234 path->nodes[upper->level] = NULL;
2235 } else {
2236 BUG_ON(upper->eb != path->nodes[upper->level]);
2237 }
2238
2239 upper->locked = 1;
2240 path->locks[upper->level] = 0;
2241
2242 slot = path->slots[upper->level];
2243 btrfs_release_path(path);
2244 } else {
2245 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2246 if (ret < 0)
2247 goto next;
2248 BUG_ON(ret);
2249 }
2250
2251 bytenr = btrfs_node_blockptr(upper->eb, slot);
2252 if (lowest) {
2253 if (bytenr != node->bytenr) {
2254 btrfs_err(root->fs_info,
2255 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2256 bytenr, node->bytenr, slot,
2257 upper->eb->start);
2258 ret = -EIO;
2259 goto next;
2260 }
2261 } else {
2262 if (node->eb->start == bytenr)
2263 goto next;
2264 }
2265
2266 blocksize = root->fs_info->nodesize;
2267 eb = btrfs_read_node_slot(upper->eb, slot);
2268 if (IS_ERR(eb)) {
2269 ret = PTR_ERR(eb);
2270 goto next;
2271 }
2272 btrfs_tree_lock(eb);
2273
2274 if (!node->eb) {
2275 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2276 slot, &eb, BTRFS_NESTING_COW);
2277 btrfs_tree_unlock(eb);
2278 free_extent_buffer(eb);
2279 if (ret < 0)
2280 goto next;
2281 /*
2282 * We've just COWed this block, it should have updated
2283 * the correct backref node entry.
2284 */
2285 ASSERT(node->eb == eb);
2286 } else {
2287 struct btrfs_ref ref = {
2288 .action = BTRFS_ADD_DELAYED_REF,
2289 .bytenr = node->eb->start,
2290 .num_bytes = blocksize,
2291 .parent = upper->eb->start,
2292 .owning_root = btrfs_header_owner(upper->eb),
2293 .ref_root = btrfs_header_owner(upper->eb),
2294 };
2295
2296 btrfs_set_node_blockptr(upper->eb, slot,
2297 node->eb->start);
2298 btrfs_set_node_ptr_generation(upper->eb, slot,
2299 trans->transid);
2300 btrfs_mark_buffer_dirty(trans, upper->eb);
2301
2302 btrfs_init_tree_ref(&ref, node->level,
2303 btrfs_root_id(root), false);
2304 ret = btrfs_inc_extent_ref(trans, &ref);
2305 if (!ret)
2306 ret = btrfs_drop_subtree(trans, root, eb,
2307 upper->eb);
2308 if (ret)
2309 btrfs_abort_transaction(trans, ret);
2310 }
2311 next:
2312 if (!upper->pending)
2313 btrfs_backref_drop_node_buffer(upper);
2314 else
2315 btrfs_backref_unlock_node_buffer(upper);
2316 if (ret)
2317 break;
2318 }
2319
2320 if (!ret && node->pending) {
2321 btrfs_backref_drop_node_buffer(node);
2322 list_del_init(&node->list);
2323 node->pending = 0;
2324 }
2325
2326 path->lowest_level = 0;
2327
2328 /*
2329 * We should have allocated all of our space in the block rsv and thus
2330 * shouldn't ENOSPC.
2331 */
2332 ASSERT(ret != -ENOSPC);
2333 return ret;
2334 }
2335
link_to_upper(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_path * path)2336 static int link_to_upper(struct btrfs_trans_handle *trans,
2337 struct reloc_control *rc,
2338 struct btrfs_backref_node *node,
2339 struct btrfs_path *path)
2340 {
2341 struct btrfs_key key;
2342
2343 btrfs_node_key_to_cpu(node->eb, &key, 0);
2344 return do_relocation(trans, rc, node, &key, path, 0);
2345 }
2346
finish_pending_nodes(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_path * path,int err)2347 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2348 struct reloc_control *rc,
2349 struct btrfs_path *path, int err)
2350 {
2351 LIST_HEAD(list);
2352 struct btrfs_backref_cache *cache = &rc->backref_cache;
2353 struct btrfs_backref_node *node;
2354 int level;
2355 int ret;
2356
2357 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2358 while (!list_empty(&cache->pending[level])) {
2359 node = list_entry(cache->pending[level].next,
2360 struct btrfs_backref_node, list);
2361 list_move_tail(&node->list, &list);
2362 BUG_ON(!node->pending);
2363
2364 if (!err) {
2365 ret = link_to_upper(trans, rc, node, path);
2366 if (ret < 0)
2367 err = ret;
2368 }
2369 }
2370 list_splice_init(&list, &cache->pending[level]);
2371 }
2372 return err;
2373 }
2374
2375 /*
2376 * mark a block and all blocks directly/indirectly reference the block
2377 * as processed.
2378 */
update_processed_blocks(struct reloc_control * rc,struct btrfs_backref_node * node)2379 static void update_processed_blocks(struct reloc_control *rc,
2380 struct btrfs_backref_node *node)
2381 {
2382 struct btrfs_backref_node *next = node;
2383 struct btrfs_backref_edge *edge;
2384 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2385 int index = 0;
2386
2387 while (next) {
2388 cond_resched();
2389 while (1) {
2390 if (next->processed)
2391 break;
2392
2393 mark_block_processed(rc, next);
2394
2395 if (list_empty(&next->upper))
2396 break;
2397
2398 edge = list_entry(next->upper.next,
2399 struct btrfs_backref_edge, list[LOWER]);
2400 edges[index++] = edge;
2401 next = edge->node[UPPER];
2402 }
2403 next = walk_down_backref(edges, &index);
2404 }
2405 }
2406
tree_block_processed(u64 bytenr,struct reloc_control * rc)2407 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2408 {
2409 u32 blocksize = rc->extent_root->fs_info->nodesize;
2410
2411 if (test_range_bit(&rc->processed_blocks, bytenr,
2412 bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
2413 return 1;
2414 return 0;
2415 }
2416
get_tree_block_key(struct btrfs_fs_info * fs_info,struct tree_block * block)2417 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2418 struct tree_block *block)
2419 {
2420 struct btrfs_tree_parent_check check = {
2421 .level = block->level,
2422 .owner_root = block->owner,
2423 .transid = block->key.offset
2424 };
2425 struct extent_buffer *eb;
2426
2427 eb = read_tree_block(fs_info, block->bytenr, &check);
2428 if (IS_ERR(eb))
2429 return PTR_ERR(eb);
2430 if (!extent_buffer_uptodate(eb)) {
2431 free_extent_buffer(eb);
2432 return -EIO;
2433 }
2434 if (block->level == 0)
2435 btrfs_item_key_to_cpu(eb, &block->key, 0);
2436 else
2437 btrfs_node_key_to_cpu(eb, &block->key, 0);
2438 free_extent_buffer(eb);
2439 block->key_ready = true;
2440 return 0;
2441 }
2442
2443 /*
2444 * helper function to relocate a tree block
2445 */
relocate_tree_block(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path)2446 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2447 struct reloc_control *rc,
2448 struct btrfs_backref_node *node,
2449 struct btrfs_key *key,
2450 struct btrfs_path *path)
2451 {
2452 struct btrfs_root *root;
2453 int ret = 0;
2454
2455 if (!node)
2456 return 0;
2457
2458 /*
2459 * If we fail here we want to drop our backref_node because we are going
2460 * to start over and regenerate the tree for it.
2461 */
2462 ret = reserve_metadata_space(trans, rc, node);
2463 if (ret)
2464 goto out;
2465
2466 BUG_ON(node->processed);
2467 root = select_one_root(node);
2468 if (IS_ERR(root)) {
2469 ret = PTR_ERR(root);
2470
2471 /* See explanation in select_one_root for the -EUCLEAN case. */
2472 ASSERT(ret == -ENOENT);
2473 if (ret == -ENOENT) {
2474 ret = 0;
2475 update_processed_blocks(rc, node);
2476 }
2477 goto out;
2478 }
2479
2480 if (root) {
2481 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2482 /*
2483 * This block was the root block of a root, and this is
2484 * the first time we're processing the block and thus it
2485 * should not have had the ->new_bytenr modified.
2486 *
2487 * However in the case of corruption we could have
2488 * multiple refs pointing to the same block improperly,
2489 * and thus we would trip over these checks. ASSERT()
2490 * for the developer case, because it could indicate a
2491 * bug in the backref code, however error out for a
2492 * normal user in the case of corruption.
2493 */
2494 ASSERT(node->new_bytenr == 0);
2495 if (node->new_bytenr) {
2496 btrfs_err(root->fs_info,
2497 "bytenr %llu has improper references to it",
2498 node->bytenr);
2499 ret = -EUCLEAN;
2500 goto out;
2501 }
2502 ret = btrfs_record_root_in_trans(trans, root);
2503 if (ret)
2504 goto out;
2505 /*
2506 * Another thread could have failed, need to check if we
2507 * have reloc_root actually set.
2508 */
2509 if (!root->reloc_root) {
2510 ret = -ENOENT;
2511 goto out;
2512 }
2513 root = root->reloc_root;
2514 node->new_bytenr = root->node->start;
2515 btrfs_put_root(node->root);
2516 node->root = btrfs_grab_root(root);
2517 ASSERT(node->root);
2518 } else {
2519 btrfs_err(root->fs_info,
2520 "bytenr %llu resolved to a non-shareable root",
2521 node->bytenr);
2522 ret = -EUCLEAN;
2523 goto out;
2524 }
2525 if (!ret)
2526 update_processed_blocks(rc, node);
2527 } else {
2528 ret = do_relocation(trans, rc, node, key, path, 1);
2529 }
2530 out:
2531 if (ret || node->level == 0)
2532 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2533 return ret;
2534 }
2535
relocate_cowonly_block(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct tree_block * block,struct btrfs_path * path)2536 static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
2537 struct reloc_control *rc, struct tree_block *block,
2538 struct btrfs_path *path)
2539 {
2540 struct btrfs_fs_info *fs_info = trans->fs_info;
2541 struct btrfs_root *root;
2542 u64 num_bytes;
2543 int nr_levels;
2544 int ret;
2545
2546 root = btrfs_get_fs_root(fs_info, block->owner, true);
2547 if (IS_ERR(root))
2548 return PTR_ERR(root);
2549
2550 nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
2551
2552 num_bytes = fs_info->nodesize * nr_levels;
2553 ret = refill_metadata_space(trans, rc, num_bytes);
2554 if (ret) {
2555 btrfs_put_root(root);
2556 return ret;
2557 }
2558 path->lowest_level = block->level;
2559 if (root == root->fs_info->chunk_root)
2560 btrfs_reserve_chunk_metadata(trans, false);
2561
2562 ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
2563 path->lowest_level = 0;
2564 btrfs_release_path(path);
2565
2566 if (root == root->fs_info->chunk_root)
2567 btrfs_trans_release_chunk_metadata(trans);
2568 if (ret > 0)
2569 ret = 0;
2570 btrfs_put_root(root);
2571
2572 return ret;
2573 }
2574
2575 /*
2576 * relocate a list of blocks
2577 */
2578 static noinline_for_stack
relocate_tree_blocks(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct rb_root * blocks)2579 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2580 struct reloc_control *rc, struct rb_root *blocks)
2581 {
2582 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2583 struct btrfs_backref_node *node;
2584 struct btrfs_path *path;
2585 struct tree_block *block;
2586 struct tree_block *next;
2587 int ret = 0;
2588
2589 path = btrfs_alloc_path();
2590 if (!path) {
2591 ret = -ENOMEM;
2592 goto out_free_blocks;
2593 }
2594
2595 /* Kick in readahead for tree blocks with missing keys */
2596 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2597 if (!block->key_ready)
2598 btrfs_readahead_tree_block(fs_info, block->bytenr,
2599 block->owner, 0,
2600 block->level);
2601 }
2602
2603 /* Get first keys */
2604 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2605 if (!block->key_ready) {
2606 ret = get_tree_block_key(fs_info, block);
2607 if (ret)
2608 goto out_free_path;
2609 }
2610 }
2611
2612 /* Do tree relocation */
2613 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2614 /*
2615 * For COWonly blocks, or the data reloc tree, we only need to
2616 * COW down to the block, there's no need to generate a backref
2617 * tree.
2618 */
2619 if (block->owner &&
2620 (!is_fstree(block->owner) ||
2621 block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
2622 ret = relocate_cowonly_block(trans, rc, block, path);
2623 if (ret)
2624 break;
2625 continue;
2626 }
2627
2628 node = build_backref_tree(trans, rc, &block->key,
2629 block->level, block->bytenr);
2630 if (IS_ERR(node)) {
2631 ret = PTR_ERR(node);
2632 goto out;
2633 }
2634
2635 ret = relocate_tree_block(trans, rc, node, &block->key,
2636 path);
2637 if (ret < 0)
2638 break;
2639 }
2640 out:
2641 ret = finish_pending_nodes(trans, rc, path, ret);
2642
2643 out_free_path:
2644 btrfs_free_path(path);
2645 out_free_blocks:
2646 free_block_list(blocks);
2647 return ret;
2648 }
2649
prealloc_file_extent_cluster(struct reloc_control * rc)2650 static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
2651 {
2652 const struct file_extent_cluster *cluster = &rc->cluster;
2653 struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2654 u64 alloc_hint = 0;
2655 u64 start;
2656 u64 end;
2657 u64 offset = inode->reloc_block_group_start;
2658 u64 num_bytes;
2659 int nr;
2660 int ret = 0;
2661 u64 i_size = i_size_read(&inode->vfs_inode);
2662 u64 prealloc_start = cluster->start - offset;
2663 u64 prealloc_end = cluster->end - offset;
2664 u64 cur_offset = prealloc_start;
2665
2666 /*
2667 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2668 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2669 * btrfs_do_readpage() call of previously relocated file cluster.
2670 *
2671 * If the current cluster starts in the above range, btrfs_do_readpage()
2672 * will skip the read, and relocate_one_folio() will later writeback
2673 * the padding zeros as new data, causing data corruption.
2674 *
2675 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2676 */
2677 if (!PAGE_ALIGNED(i_size)) {
2678 struct address_space *mapping = inode->vfs_inode.i_mapping;
2679 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2680 const u32 sectorsize = fs_info->sectorsize;
2681 struct folio *folio;
2682
2683 ASSERT(sectorsize < PAGE_SIZE);
2684 ASSERT(IS_ALIGNED(i_size, sectorsize));
2685
2686 /*
2687 * Subpage can't handle page with DIRTY but without UPTODATE
2688 * bit as it can lead to the following deadlock:
2689 *
2690 * btrfs_read_folio()
2691 * | Page already *locked*
2692 * |- btrfs_lock_and_flush_ordered_range()
2693 * |- btrfs_start_ordered_extent()
2694 * |- extent_write_cache_pages()
2695 * |- lock_page()
2696 * We try to lock the page we already hold.
2697 *
2698 * Here we just writeback the whole data reloc inode, so that
2699 * we will be ensured to have no dirty range in the page, and
2700 * are safe to clear the uptodate bits.
2701 *
2702 * This shouldn't cause too much overhead, as we need to write
2703 * the data back anyway.
2704 */
2705 ret = filemap_write_and_wait(mapping);
2706 if (ret < 0)
2707 return ret;
2708
2709 clear_extent_bits(&inode->io_tree, i_size,
2710 round_up(i_size, PAGE_SIZE) - 1,
2711 EXTENT_UPTODATE);
2712 folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT);
2713 /*
2714 * If page is freed we don't need to do anything then, as we
2715 * will re-read the whole page anyway.
2716 */
2717 if (!IS_ERR(folio)) {
2718 btrfs_subpage_clear_uptodate(fs_info, folio, i_size,
2719 round_up(i_size, PAGE_SIZE) - i_size);
2720 folio_unlock(folio);
2721 folio_put(folio);
2722 }
2723 }
2724
2725 BUG_ON(cluster->start != cluster->boundary[0]);
2726 ret = btrfs_alloc_data_chunk_ondemand(inode,
2727 prealloc_end + 1 - prealloc_start);
2728 if (ret)
2729 return ret;
2730
2731 btrfs_inode_lock(inode, 0);
2732 for (nr = 0; nr < cluster->nr; nr++) {
2733 struct extent_state *cached_state = NULL;
2734
2735 start = cluster->boundary[nr] - offset;
2736 if (nr + 1 < cluster->nr)
2737 end = cluster->boundary[nr + 1] - 1 - offset;
2738 else
2739 end = cluster->end - offset;
2740
2741 lock_extent(&inode->io_tree, start, end, &cached_state);
2742 num_bytes = end + 1 - start;
2743 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2744 num_bytes, num_bytes,
2745 end + 1, &alloc_hint);
2746 cur_offset = end + 1;
2747 unlock_extent(&inode->io_tree, start, end, &cached_state);
2748 if (ret)
2749 break;
2750 }
2751 btrfs_inode_unlock(inode, 0);
2752
2753 if (cur_offset < prealloc_end)
2754 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2755 prealloc_end + 1 - cur_offset);
2756 return ret;
2757 }
2758
setup_relocation_extent_mapping(struct reloc_control * rc)2759 static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
2760 {
2761 struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2762 struct extent_map *em;
2763 struct extent_state *cached_state = NULL;
2764 u64 offset = inode->reloc_block_group_start;
2765 u64 start = rc->cluster.start - offset;
2766 u64 end = rc->cluster.end - offset;
2767 int ret = 0;
2768
2769 em = alloc_extent_map();
2770 if (!em)
2771 return -ENOMEM;
2772
2773 em->start = start;
2774 em->len = end + 1 - start;
2775 em->disk_bytenr = rc->cluster.start;
2776 em->disk_num_bytes = em->len;
2777 em->ram_bytes = em->len;
2778 em->flags |= EXTENT_FLAG_PINNED;
2779
2780 lock_extent(&inode->io_tree, start, end, &cached_state);
2781 ret = btrfs_replace_extent_map_range(inode, em, false);
2782 unlock_extent(&inode->io_tree, start, end, &cached_state);
2783 free_extent_map(em);
2784
2785 return ret;
2786 }
2787
2788 /*
2789 * Allow error injection to test balance/relocation cancellation
2790 */
btrfs_should_cancel_balance(const struct btrfs_fs_info * fs_info)2791 noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
2792 {
2793 return atomic_read(&fs_info->balance_cancel_req) ||
2794 atomic_read(&fs_info->reloc_cancel_req) ||
2795 fatal_signal_pending(current);
2796 }
2797 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2798
get_cluster_boundary_end(const struct file_extent_cluster * cluster,int cluster_nr)2799 static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
2800 int cluster_nr)
2801 {
2802 /* Last extent, use cluster end directly */
2803 if (cluster_nr >= cluster->nr - 1)
2804 return cluster->end;
2805
2806 /* Use next boundary start*/
2807 return cluster->boundary[cluster_nr + 1] - 1;
2808 }
2809
relocate_one_folio(struct reloc_control * rc,struct file_ra_state * ra,int * cluster_nr,unsigned long index)2810 static int relocate_one_folio(struct reloc_control *rc,
2811 struct file_ra_state *ra,
2812 int *cluster_nr, unsigned long index)
2813 {
2814 const struct file_extent_cluster *cluster = &rc->cluster;
2815 struct inode *inode = rc->data_inode;
2816 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2817 u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2818 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2819 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2820 struct folio *folio;
2821 u64 folio_start;
2822 u64 folio_end;
2823 u64 cur;
2824 int ret;
2825 const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
2826
2827 ASSERT(index <= last_index);
2828 again:
2829 folio = filemap_lock_folio(inode->i_mapping, index);
2830 if (IS_ERR(folio)) {
2831
2832 /*
2833 * On relocation we're doing readahead on the relocation inode,
2834 * but if the filesystem is backed by a RAID stripe tree we can
2835 * get ENOENT (e.g. due to preallocated extents not being
2836 * mapped in the RST) from the lookup.
2837 *
2838 * But readahead doesn't handle the error and submits invalid
2839 * reads to the device, causing a assertion failures.
2840 */
2841 if (!use_rst)
2842 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2843 index, last_index + 1 - index);
2844 folio = __filemap_get_folio(inode->i_mapping, index,
2845 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
2846 mask);
2847 if (IS_ERR(folio))
2848 return PTR_ERR(folio);
2849 }
2850
2851 WARN_ON(folio_order(folio));
2852
2853 if (folio_test_readahead(folio) && !use_rst)
2854 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2855 folio, last_index + 1 - index);
2856
2857 if (!folio_test_uptodate(folio)) {
2858 btrfs_read_folio(NULL, folio);
2859 folio_lock(folio);
2860 if (!folio_test_uptodate(folio)) {
2861 ret = -EIO;
2862 goto release_folio;
2863 }
2864 if (folio->mapping != inode->i_mapping) {
2865 folio_unlock(folio);
2866 folio_put(folio);
2867 goto again;
2868 }
2869 }
2870
2871 /*
2872 * We could have lost folio private when we dropped the lock to read the
2873 * folio above, make sure we set_folio_extent_mapped() here so we have any
2874 * of the subpage blocksize stuff we need in place.
2875 */
2876 ret = set_folio_extent_mapped(folio);
2877 if (ret < 0)
2878 goto release_folio;
2879
2880 folio_start = folio_pos(folio);
2881 folio_end = folio_start + PAGE_SIZE - 1;
2882
2883 /*
2884 * Start from the cluster, as for subpage case, the cluster can start
2885 * inside the folio.
2886 */
2887 cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
2888 while (cur <= folio_end) {
2889 struct extent_state *cached_state = NULL;
2890 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
2891 u64 extent_end = get_cluster_boundary_end(cluster,
2892 *cluster_nr) - offset;
2893 u64 clamped_start = max(folio_start, extent_start);
2894 u64 clamped_end = min(folio_end, extent_end);
2895 u32 clamped_len = clamped_end + 1 - clamped_start;
2896
2897 /* Reserve metadata for this range */
2898 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2899 clamped_len, clamped_len,
2900 false);
2901 if (ret)
2902 goto release_folio;
2903
2904 /* Mark the range delalloc and dirty for later writeback */
2905 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
2906 &cached_state);
2907 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
2908 clamped_end, 0, &cached_state);
2909 if (ret) {
2910 clear_extent_bit(&BTRFS_I(inode)->io_tree,
2911 clamped_start, clamped_end,
2912 EXTENT_LOCKED | EXTENT_BOUNDARY,
2913 &cached_state);
2914 btrfs_delalloc_release_metadata(BTRFS_I(inode),
2915 clamped_len, true);
2916 btrfs_delalloc_release_extents(BTRFS_I(inode),
2917 clamped_len);
2918 goto release_folio;
2919 }
2920 btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
2921
2922 /*
2923 * Set the boundary if it's inside the folio.
2924 * Data relocation requires the destination extents to have the
2925 * same size as the source.
2926 * EXTENT_BOUNDARY bit prevents current extent from being merged
2927 * with previous extent.
2928 */
2929 if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
2930 u64 boundary_start = cluster->boundary[*cluster_nr] -
2931 offset;
2932 u64 boundary_end = boundary_start +
2933 fs_info->sectorsize - 1;
2934
2935 set_extent_bit(&BTRFS_I(inode)->io_tree,
2936 boundary_start, boundary_end,
2937 EXTENT_BOUNDARY, NULL);
2938 }
2939 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
2940 &cached_state);
2941 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
2942 cur += clamped_len;
2943
2944 /* Crossed extent end, go to next extent */
2945 if (cur >= extent_end) {
2946 (*cluster_nr)++;
2947 /* Just finished the last extent of the cluster, exit. */
2948 if (*cluster_nr >= cluster->nr)
2949 break;
2950 }
2951 }
2952 folio_unlock(folio);
2953 folio_put(folio);
2954
2955 balance_dirty_pages_ratelimited(inode->i_mapping);
2956 btrfs_throttle(fs_info);
2957 if (btrfs_should_cancel_balance(fs_info))
2958 ret = -ECANCELED;
2959 return ret;
2960
2961 release_folio:
2962 folio_unlock(folio);
2963 folio_put(folio);
2964 return ret;
2965 }
2966
relocate_file_extent_cluster(struct reloc_control * rc)2967 static int relocate_file_extent_cluster(struct reloc_control *rc)
2968 {
2969 struct inode *inode = rc->data_inode;
2970 const struct file_extent_cluster *cluster = &rc->cluster;
2971 u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2972 unsigned long index;
2973 unsigned long last_index;
2974 struct file_ra_state *ra;
2975 int cluster_nr = 0;
2976 int ret = 0;
2977
2978 if (!cluster->nr)
2979 return 0;
2980
2981 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2982 if (!ra)
2983 return -ENOMEM;
2984
2985 ret = prealloc_file_extent_cluster(rc);
2986 if (ret)
2987 goto out;
2988
2989 file_ra_state_init(ra, inode->i_mapping);
2990
2991 ret = setup_relocation_extent_mapping(rc);
2992 if (ret)
2993 goto out;
2994
2995 last_index = (cluster->end - offset) >> PAGE_SHIFT;
2996 for (index = (cluster->start - offset) >> PAGE_SHIFT;
2997 index <= last_index && !ret; index++)
2998 ret = relocate_one_folio(rc, ra, &cluster_nr, index);
2999 if (ret == 0)
3000 WARN_ON(cluster_nr != cluster->nr);
3001 out:
3002 kfree(ra);
3003 return ret;
3004 }
3005
relocate_data_extent(struct reloc_control * rc,const struct btrfs_key * extent_key)3006 static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
3007 const struct btrfs_key *extent_key)
3008 {
3009 struct inode *inode = rc->data_inode;
3010 struct file_extent_cluster *cluster = &rc->cluster;
3011 int ret;
3012 struct btrfs_root *root = BTRFS_I(inode)->root;
3013
3014 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3015 ret = relocate_file_extent_cluster(rc);
3016 if (ret)
3017 return ret;
3018 cluster->nr = 0;
3019 }
3020
3021 /*
3022 * Under simple quotas, we set root->relocation_src_root when we find
3023 * the extent. If adjacent extents have different owners, we can't merge
3024 * them while relocating. Handle this by storing the owning root that
3025 * started a cluster and if we see an extent from a different root break
3026 * cluster formation (just like the above case of non-adjacent extents).
3027 *
3028 * Without simple quotas, relocation_src_root is always 0, so we should
3029 * never see a mismatch, and it should have no effect on relocation
3030 * clusters.
3031 */
3032 if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
3033 u64 tmp = root->relocation_src_root;
3034
3035 /*
3036 * root->relocation_src_root is the state that actually affects
3037 * the preallocation we do here, so set it to the root owning
3038 * the cluster we need to relocate.
3039 */
3040 root->relocation_src_root = cluster->owning_root;
3041 ret = relocate_file_extent_cluster(rc);
3042 if (ret)
3043 return ret;
3044 cluster->nr = 0;
3045 /* And reset it back for the current extent's owning root. */
3046 root->relocation_src_root = tmp;
3047 }
3048
3049 if (!cluster->nr) {
3050 cluster->start = extent_key->objectid;
3051 cluster->owning_root = root->relocation_src_root;
3052 }
3053 else
3054 BUG_ON(cluster->nr >= MAX_EXTENTS);
3055 cluster->end = extent_key->objectid + extent_key->offset - 1;
3056 cluster->boundary[cluster->nr] = extent_key->objectid;
3057 cluster->nr++;
3058
3059 if (cluster->nr >= MAX_EXTENTS) {
3060 ret = relocate_file_extent_cluster(rc);
3061 if (ret)
3062 return ret;
3063 cluster->nr = 0;
3064 }
3065 return 0;
3066 }
3067
3068 /*
3069 * helper to add a tree block to the list.
3070 * the major work is getting the generation and level of the block
3071 */
add_tree_block(struct reloc_control * rc,const struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3072 static int add_tree_block(struct reloc_control *rc,
3073 const struct btrfs_key *extent_key,
3074 struct btrfs_path *path,
3075 struct rb_root *blocks)
3076 {
3077 struct extent_buffer *eb;
3078 struct btrfs_extent_item *ei;
3079 struct btrfs_tree_block_info *bi;
3080 struct tree_block *block;
3081 struct rb_node *rb_node;
3082 u32 item_size;
3083 int level = -1;
3084 u64 generation;
3085 u64 owner = 0;
3086
3087 eb = path->nodes[0];
3088 item_size = btrfs_item_size(eb, path->slots[0]);
3089
3090 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3091 item_size >= sizeof(*ei) + sizeof(*bi)) {
3092 unsigned long ptr = 0, end;
3093
3094 ei = btrfs_item_ptr(eb, path->slots[0],
3095 struct btrfs_extent_item);
3096 end = (unsigned long)ei + item_size;
3097 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3098 bi = (struct btrfs_tree_block_info *)(ei + 1);
3099 level = btrfs_tree_block_level(eb, bi);
3100 ptr = (unsigned long)(bi + 1);
3101 } else {
3102 level = (int)extent_key->offset;
3103 ptr = (unsigned long)(ei + 1);
3104 }
3105 generation = btrfs_extent_generation(eb, ei);
3106
3107 /*
3108 * We're reading random blocks without knowing their owner ahead
3109 * of time. This is ok most of the time, as all reloc roots and
3110 * fs roots have the same lock type. However normal trees do
3111 * not, and the only way to know ahead of time is to read the
3112 * inline ref offset. We know it's an fs root if
3113 *
3114 * 1. There's more than one ref.
3115 * 2. There's a SHARED_DATA_REF_KEY set.
3116 * 3. FULL_BACKREF is set on the flags.
3117 *
3118 * Otherwise it's safe to assume that the ref offset == the
3119 * owner of this block, so we can use that when calling
3120 * read_tree_block.
3121 */
3122 if (btrfs_extent_refs(eb, ei) == 1 &&
3123 !(btrfs_extent_flags(eb, ei) &
3124 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3125 ptr < end) {
3126 struct btrfs_extent_inline_ref *iref;
3127 int type;
3128
3129 iref = (struct btrfs_extent_inline_ref *)ptr;
3130 type = btrfs_get_extent_inline_ref_type(eb, iref,
3131 BTRFS_REF_TYPE_BLOCK);
3132 if (type == BTRFS_REF_TYPE_INVALID)
3133 return -EINVAL;
3134 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3135 owner = btrfs_extent_inline_ref_offset(eb, iref);
3136 }
3137 } else {
3138 btrfs_print_leaf(eb);
3139 btrfs_err(rc->block_group->fs_info,
3140 "unrecognized tree backref at tree block %llu slot %u",
3141 eb->start, path->slots[0]);
3142 btrfs_release_path(path);
3143 return -EUCLEAN;
3144 }
3145
3146 btrfs_release_path(path);
3147
3148 BUG_ON(level == -1);
3149
3150 block = kmalloc(sizeof(*block), GFP_NOFS);
3151 if (!block)
3152 return -ENOMEM;
3153
3154 block->bytenr = extent_key->objectid;
3155 block->key.objectid = rc->extent_root->fs_info->nodesize;
3156 block->key.offset = generation;
3157 block->level = level;
3158 block->key_ready = false;
3159 block->owner = owner;
3160
3161 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3162 if (rb_node)
3163 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3164 -EEXIST);
3165
3166 return 0;
3167 }
3168
3169 /*
3170 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3171 */
__add_tree_block(struct reloc_control * rc,u64 bytenr,u32 blocksize,struct rb_root * blocks)3172 static int __add_tree_block(struct reloc_control *rc,
3173 u64 bytenr, u32 blocksize,
3174 struct rb_root *blocks)
3175 {
3176 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3177 struct btrfs_path *path;
3178 struct btrfs_key key;
3179 int ret;
3180 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3181
3182 if (tree_block_processed(bytenr, rc))
3183 return 0;
3184
3185 if (rb_simple_search(blocks, bytenr))
3186 return 0;
3187
3188 path = btrfs_alloc_path();
3189 if (!path)
3190 return -ENOMEM;
3191 again:
3192 key.objectid = bytenr;
3193 if (skinny) {
3194 key.type = BTRFS_METADATA_ITEM_KEY;
3195 key.offset = (u64)-1;
3196 } else {
3197 key.type = BTRFS_EXTENT_ITEM_KEY;
3198 key.offset = blocksize;
3199 }
3200
3201 path->search_commit_root = 1;
3202 path->skip_locking = 1;
3203 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3204 if (ret < 0)
3205 goto out;
3206
3207 if (ret > 0 && skinny) {
3208 if (path->slots[0]) {
3209 path->slots[0]--;
3210 btrfs_item_key_to_cpu(path->nodes[0], &key,
3211 path->slots[0]);
3212 if (key.objectid == bytenr &&
3213 (key.type == BTRFS_METADATA_ITEM_KEY ||
3214 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3215 key.offset == blocksize)))
3216 ret = 0;
3217 }
3218
3219 if (ret) {
3220 skinny = false;
3221 btrfs_release_path(path);
3222 goto again;
3223 }
3224 }
3225 if (ret) {
3226 ASSERT(ret == 1);
3227 btrfs_print_leaf(path->nodes[0]);
3228 btrfs_err(fs_info,
3229 "tree block extent item (%llu) is not found in extent tree",
3230 bytenr);
3231 WARN_ON(1);
3232 ret = -EINVAL;
3233 goto out;
3234 }
3235
3236 ret = add_tree_block(rc, &key, path, blocks);
3237 out:
3238 btrfs_free_path(path);
3239 return ret;
3240 }
3241
delete_block_group_cache(struct btrfs_fs_info * fs_info,struct btrfs_block_group * block_group,struct inode * inode,u64 ino)3242 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3243 struct btrfs_block_group *block_group,
3244 struct inode *inode,
3245 u64 ino)
3246 {
3247 struct btrfs_root *root = fs_info->tree_root;
3248 struct btrfs_trans_handle *trans;
3249 int ret = 0;
3250
3251 if (inode)
3252 goto truncate;
3253
3254 inode = btrfs_iget(ino, root);
3255 if (IS_ERR(inode))
3256 return -ENOENT;
3257
3258 truncate:
3259 ret = btrfs_check_trunc_cache_free_space(fs_info,
3260 &fs_info->global_block_rsv);
3261 if (ret)
3262 goto out;
3263
3264 trans = btrfs_join_transaction(root);
3265 if (IS_ERR(trans)) {
3266 ret = PTR_ERR(trans);
3267 goto out;
3268 }
3269
3270 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3271
3272 btrfs_end_transaction(trans);
3273 btrfs_btree_balance_dirty(fs_info);
3274 out:
3275 iput(inode);
3276 return ret;
3277 }
3278
3279 /*
3280 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3281 * cache inode, to avoid free space cache data extent blocking data relocation.
3282 */
delete_v1_space_cache(struct extent_buffer * leaf,struct btrfs_block_group * block_group,u64 data_bytenr)3283 static int delete_v1_space_cache(struct extent_buffer *leaf,
3284 struct btrfs_block_group *block_group,
3285 u64 data_bytenr)
3286 {
3287 u64 space_cache_ino;
3288 struct btrfs_file_extent_item *ei;
3289 struct btrfs_key key;
3290 bool found = false;
3291 int i;
3292 int ret;
3293
3294 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3295 return 0;
3296
3297 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3298 u8 type;
3299
3300 btrfs_item_key_to_cpu(leaf, &key, i);
3301 if (key.type != BTRFS_EXTENT_DATA_KEY)
3302 continue;
3303 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3304 type = btrfs_file_extent_type(leaf, ei);
3305
3306 if ((type == BTRFS_FILE_EXTENT_REG ||
3307 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3308 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3309 found = true;
3310 space_cache_ino = key.objectid;
3311 break;
3312 }
3313 }
3314 if (!found)
3315 return -ENOENT;
3316 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3317 space_cache_ino);
3318 return ret;
3319 }
3320
3321 /*
3322 * helper to find all tree blocks that reference a given data extent
3323 */
add_data_references(struct reloc_control * rc,const struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3324 static noinline_for_stack int add_data_references(struct reloc_control *rc,
3325 const struct btrfs_key *extent_key,
3326 struct btrfs_path *path,
3327 struct rb_root *blocks)
3328 {
3329 struct btrfs_backref_walk_ctx ctx = { 0 };
3330 struct ulist_iterator leaf_uiter;
3331 struct ulist_node *ref_node = NULL;
3332 const u32 blocksize = rc->extent_root->fs_info->nodesize;
3333 int ret = 0;
3334
3335 btrfs_release_path(path);
3336
3337 ctx.bytenr = extent_key->objectid;
3338 ctx.skip_inode_ref_list = true;
3339 ctx.fs_info = rc->extent_root->fs_info;
3340
3341 ret = btrfs_find_all_leafs(&ctx);
3342 if (ret < 0)
3343 return ret;
3344
3345 ULIST_ITER_INIT(&leaf_uiter);
3346 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3347 struct btrfs_tree_parent_check check = { 0 };
3348 struct extent_buffer *eb;
3349
3350 eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3351 if (IS_ERR(eb)) {
3352 ret = PTR_ERR(eb);
3353 break;
3354 }
3355 ret = delete_v1_space_cache(eb, rc->block_group,
3356 extent_key->objectid);
3357 free_extent_buffer(eb);
3358 if (ret < 0)
3359 break;
3360 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3361 if (ret < 0)
3362 break;
3363 }
3364 if (ret < 0)
3365 free_block_list(blocks);
3366 ulist_free(ctx.refs);
3367 return ret;
3368 }
3369
3370 /*
3371 * helper to find next unprocessed extent
3372 */
3373 static noinline_for_stack
find_next_extent(struct reloc_control * rc,struct btrfs_path * path,struct btrfs_key * extent_key)3374 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3375 struct btrfs_key *extent_key)
3376 {
3377 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3378 struct btrfs_key key;
3379 struct extent_buffer *leaf;
3380 u64 start, end, last;
3381 int ret;
3382
3383 last = rc->block_group->start + rc->block_group->length;
3384 while (1) {
3385 bool block_found;
3386
3387 cond_resched();
3388 if (rc->search_start >= last) {
3389 ret = 1;
3390 break;
3391 }
3392
3393 key.objectid = rc->search_start;
3394 key.type = BTRFS_EXTENT_ITEM_KEY;
3395 key.offset = 0;
3396
3397 path->search_commit_root = 1;
3398 path->skip_locking = 1;
3399 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3400 0, 0);
3401 if (ret < 0)
3402 break;
3403 next:
3404 leaf = path->nodes[0];
3405 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3406 ret = btrfs_next_leaf(rc->extent_root, path);
3407 if (ret != 0)
3408 break;
3409 leaf = path->nodes[0];
3410 }
3411
3412 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3413 if (key.objectid >= last) {
3414 ret = 1;
3415 break;
3416 }
3417
3418 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3419 key.type != BTRFS_METADATA_ITEM_KEY) {
3420 path->slots[0]++;
3421 goto next;
3422 }
3423
3424 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3425 key.objectid + key.offset <= rc->search_start) {
3426 path->slots[0]++;
3427 goto next;
3428 }
3429
3430 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3431 key.objectid + fs_info->nodesize <=
3432 rc->search_start) {
3433 path->slots[0]++;
3434 goto next;
3435 }
3436
3437 block_found = find_first_extent_bit(&rc->processed_blocks,
3438 key.objectid, &start, &end,
3439 EXTENT_DIRTY, NULL);
3440
3441 if (block_found && start <= key.objectid) {
3442 btrfs_release_path(path);
3443 rc->search_start = end + 1;
3444 } else {
3445 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3446 rc->search_start = key.objectid + key.offset;
3447 else
3448 rc->search_start = key.objectid +
3449 fs_info->nodesize;
3450 memcpy(extent_key, &key, sizeof(key));
3451 return 0;
3452 }
3453 }
3454 btrfs_release_path(path);
3455 return ret;
3456 }
3457
set_reloc_control(struct reloc_control * rc)3458 static void set_reloc_control(struct reloc_control *rc)
3459 {
3460 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3461
3462 mutex_lock(&fs_info->reloc_mutex);
3463 fs_info->reloc_ctl = rc;
3464 mutex_unlock(&fs_info->reloc_mutex);
3465 }
3466
unset_reloc_control(struct reloc_control * rc)3467 static void unset_reloc_control(struct reloc_control *rc)
3468 {
3469 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3470
3471 mutex_lock(&fs_info->reloc_mutex);
3472 fs_info->reloc_ctl = NULL;
3473 mutex_unlock(&fs_info->reloc_mutex);
3474 }
3475
3476 static noinline_for_stack
prepare_to_relocate(struct reloc_control * rc)3477 int prepare_to_relocate(struct reloc_control *rc)
3478 {
3479 struct btrfs_trans_handle *trans;
3480 int ret;
3481
3482 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3483 BTRFS_BLOCK_RSV_TEMP);
3484 if (!rc->block_rsv)
3485 return -ENOMEM;
3486
3487 memset(&rc->cluster, 0, sizeof(rc->cluster));
3488 rc->search_start = rc->block_group->start;
3489 rc->extents_found = 0;
3490 rc->nodes_relocated = 0;
3491 rc->merging_rsv_size = 0;
3492 rc->reserved_bytes = 0;
3493 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3494 RELOCATION_RESERVED_NODES;
3495 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3496 rc->block_rsv, rc->block_rsv->size,
3497 BTRFS_RESERVE_FLUSH_ALL);
3498 if (ret)
3499 return ret;
3500
3501 rc->create_reloc_tree = true;
3502 set_reloc_control(rc);
3503
3504 trans = btrfs_join_transaction(rc->extent_root);
3505 if (IS_ERR(trans)) {
3506 unset_reloc_control(rc);
3507 /*
3508 * extent tree is not a ref_cow tree and has no reloc_root to
3509 * cleanup. And callers are responsible to free the above
3510 * block rsv.
3511 */
3512 return PTR_ERR(trans);
3513 }
3514
3515 ret = btrfs_commit_transaction(trans);
3516 if (ret)
3517 unset_reloc_control(rc);
3518
3519 return ret;
3520 }
3521
relocate_block_group(struct reloc_control * rc)3522 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3523 {
3524 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3525 struct rb_root blocks = RB_ROOT;
3526 struct btrfs_key key;
3527 struct btrfs_trans_handle *trans = NULL;
3528 struct btrfs_path *path;
3529 struct btrfs_extent_item *ei;
3530 u64 flags;
3531 int ret;
3532 int err = 0;
3533 int progress = 0;
3534
3535 path = btrfs_alloc_path();
3536 if (!path)
3537 return -ENOMEM;
3538 path->reada = READA_FORWARD;
3539
3540 ret = prepare_to_relocate(rc);
3541 if (ret) {
3542 err = ret;
3543 goto out_free;
3544 }
3545
3546 while (1) {
3547 rc->reserved_bytes = 0;
3548 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3549 rc->block_rsv->size,
3550 BTRFS_RESERVE_FLUSH_ALL);
3551 if (ret) {
3552 err = ret;
3553 break;
3554 }
3555 progress++;
3556 trans = btrfs_start_transaction(rc->extent_root, 0);
3557 if (IS_ERR(trans)) {
3558 err = PTR_ERR(trans);
3559 trans = NULL;
3560 break;
3561 }
3562 restart:
3563 if (rc->backref_cache.last_trans != trans->transid)
3564 btrfs_backref_release_cache(&rc->backref_cache);
3565 rc->backref_cache.last_trans = trans->transid;
3566
3567 ret = find_next_extent(rc, path, &key);
3568 if (ret < 0)
3569 err = ret;
3570 if (ret != 0)
3571 break;
3572
3573 rc->extents_found++;
3574
3575 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3576 struct btrfs_extent_item);
3577 flags = btrfs_extent_flags(path->nodes[0], ei);
3578
3579 /*
3580 * If we are relocating a simple quota owned extent item, we
3581 * need to note the owner on the reloc data root so that when
3582 * we allocate the replacement item, we can attribute it to the
3583 * correct eventual owner (rather than the reloc data root).
3584 */
3585 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3586 struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
3587 u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
3588 path->nodes[0],
3589 path->slots[0]);
3590
3591 root->relocation_src_root = owning_root_id;
3592 }
3593
3594 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3595 ret = add_tree_block(rc, &key, path, &blocks);
3596 } else if (rc->stage == UPDATE_DATA_PTRS &&
3597 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3598 ret = add_data_references(rc, &key, path, &blocks);
3599 } else {
3600 btrfs_release_path(path);
3601 ret = 0;
3602 }
3603 if (ret < 0) {
3604 err = ret;
3605 break;
3606 }
3607
3608 if (!RB_EMPTY_ROOT(&blocks)) {
3609 ret = relocate_tree_blocks(trans, rc, &blocks);
3610 if (ret < 0) {
3611 if (ret != -EAGAIN) {
3612 err = ret;
3613 break;
3614 }
3615 rc->extents_found--;
3616 rc->search_start = key.objectid;
3617 }
3618 }
3619
3620 btrfs_end_transaction_throttle(trans);
3621 btrfs_btree_balance_dirty(fs_info);
3622 trans = NULL;
3623
3624 if (rc->stage == MOVE_DATA_EXTENTS &&
3625 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3626 rc->found_file_extent = true;
3627 ret = relocate_data_extent(rc, &key);
3628 if (ret < 0) {
3629 err = ret;
3630 break;
3631 }
3632 }
3633 if (btrfs_should_cancel_balance(fs_info)) {
3634 err = -ECANCELED;
3635 break;
3636 }
3637 }
3638 if (trans && progress && err == -ENOSPC) {
3639 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3640 if (ret == 1) {
3641 err = 0;
3642 progress = 0;
3643 goto restart;
3644 }
3645 }
3646
3647 btrfs_release_path(path);
3648 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3649
3650 if (trans) {
3651 btrfs_end_transaction_throttle(trans);
3652 btrfs_btree_balance_dirty(fs_info);
3653 }
3654
3655 if (!err) {
3656 ret = relocate_file_extent_cluster(rc);
3657 if (ret < 0)
3658 err = ret;
3659 }
3660
3661 rc->create_reloc_tree = false;
3662 set_reloc_control(rc);
3663
3664 btrfs_backref_release_cache(&rc->backref_cache);
3665 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3666
3667 /*
3668 * Even in the case when the relocation is cancelled, we should all go
3669 * through prepare_to_merge() and merge_reloc_roots().
3670 *
3671 * For error (including cancelled balance), prepare_to_merge() will
3672 * mark all reloc trees orphan, then queue them for cleanup in
3673 * merge_reloc_roots()
3674 */
3675 err = prepare_to_merge(rc, err);
3676
3677 merge_reloc_roots(rc);
3678
3679 rc->merge_reloc_tree = false;
3680 unset_reloc_control(rc);
3681 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3682
3683 /* get rid of pinned extents */
3684 trans = btrfs_join_transaction(rc->extent_root);
3685 if (IS_ERR(trans)) {
3686 err = PTR_ERR(trans);
3687 goto out_free;
3688 }
3689 ret = btrfs_commit_transaction(trans);
3690 if (ret && !err)
3691 err = ret;
3692 out_free:
3693 ret = clean_dirty_subvols(rc);
3694 if (ret < 0 && !err)
3695 err = ret;
3696 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3697 btrfs_free_path(path);
3698 return err;
3699 }
3700
__insert_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3701 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3702 struct btrfs_root *root, u64 objectid)
3703 {
3704 struct btrfs_path *path;
3705 struct btrfs_inode_item *item;
3706 struct extent_buffer *leaf;
3707 int ret;
3708
3709 path = btrfs_alloc_path();
3710 if (!path)
3711 return -ENOMEM;
3712
3713 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3714 if (ret)
3715 goto out;
3716
3717 leaf = path->nodes[0];
3718 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3719 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3720 btrfs_set_inode_generation(leaf, item, 1);
3721 btrfs_set_inode_size(leaf, item, 0);
3722 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3723 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3724 BTRFS_INODE_PREALLOC);
3725 out:
3726 btrfs_free_path(path);
3727 return ret;
3728 }
3729
delete_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3730 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3731 struct btrfs_root *root, u64 objectid)
3732 {
3733 struct btrfs_path *path;
3734 struct btrfs_key key;
3735 int ret = 0;
3736
3737 path = btrfs_alloc_path();
3738 if (!path) {
3739 ret = -ENOMEM;
3740 goto out;
3741 }
3742
3743 key.objectid = objectid;
3744 key.type = BTRFS_INODE_ITEM_KEY;
3745 key.offset = 0;
3746 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3747 if (ret) {
3748 if (ret > 0)
3749 ret = -ENOENT;
3750 goto out;
3751 }
3752 ret = btrfs_del_item(trans, root, path);
3753 out:
3754 if (ret)
3755 btrfs_abort_transaction(trans, ret);
3756 btrfs_free_path(path);
3757 }
3758
3759 /*
3760 * helper to create inode for data relocation.
3761 * the inode is in data relocation tree and its link count is 0
3762 */
create_reloc_inode(struct btrfs_fs_info * fs_info,const struct btrfs_block_group * group)3763 static noinline_for_stack struct inode *create_reloc_inode(
3764 struct btrfs_fs_info *fs_info,
3765 const struct btrfs_block_group *group)
3766 {
3767 struct inode *inode = NULL;
3768 struct btrfs_trans_handle *trans;
3769 struct btrfs_root *root;
3770 u64 objectid;
3771 int ret = 0;
3772
3773 root = btrfs_grab_root(fs_info->data_reloc_root);
3774 trans = btrfs_start_transaction(root, 6);
3775 if (IS_ERR(trans)) {
3776 btrfs_put_root(root);
3777 return ERR_CAST(trans);
3778 }
3779
3780 ret = btrfs_get_free_objectid(root, &objectid);
3781 if (ret)
3782 goto out;
3783
3784 ret = __insert_orphan_inode(trans, root, objectid);
3785 if (ret)
3786 goto out;
3787
3788 inode = btrfs_iget(objectid, root);
3789 if (IS_ERR(inode)) {
3790 delete_orphan_inode(trans, root, objectid);
3791 ret = PTR_ERR(inode);
3792 inode = NULL;
3793 goto out;
3794 }
3795 BTRFS_I(inode)->reloc_block_group_start = group->start;
3796
3797 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
3798 out:
3799 btrfs_put_root(root);
3800 btrfs_end_transaction(trans);
3801 btrfs_btree_balance_dirty(fs_info);
3802 if (ret) {
3803 iput(inode);
3804 inode = ERR_PTR(ret);
3805 }
3806 return inode;
3807 }
3808
3809 /*
3810 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3811 * has been requested meanwhile and don't start in that case.
3812 *
3813 * Return:
3814 * 0 success
3815 * -EINPROGRESS operation is already in progress, that's probably a bug
3816 * -ECANCELED cancellation request was set before the operation started
3817 */
reloc_chunk_start(struct btrfs_fs_info * fs_info)3818 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3819 {
3820 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3821 /* This should not happen */
3822 btrfs_err(fs_info, "reloc already running, cannot start");
3823 return -EINPROGRESS;
3824 }
3825
3826 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3827 btrfs_info(fs_info, "chunk relocation canceled on start");
3828 /*
3829 * On cancel, clear all requests but let the caller mark
3830 * the end after cleanup operations.
3831 */
3832 atomic_set(&fs_info->reloc_cancel_req, 0);
3833 return -ECANCELED;
3834 }
3835 return 0;
3836 }
3837
3838 /*
3839 * Mark end of chunk relocation that is cancellable and wake any waiters.
3840 */
reloc_chunk_end(struct btrfs_fs_info * fs_info)3841 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3842 {
3843 /* Requested after start, clear bit first so any waiters can continue */
3844 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3845 btrfs_info(fs_info, "chunk relocation canceled during operation");
3846 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3847 atomic_set(&fs_info->reloc_cancel_req, 0);
3848 }
3849
alloc_reloc_control(struct btrfs_fs_info * fs_info)3850 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3851 {
3852 struct reloc_control *rc;
3853
3854 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3855 if (!rc)
3856 return NULL;
3857
3858 INIT_LIST_HEAD(&rc->reloc_roots);
3859 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3860 btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
3861 rc->reloc_root_tree.rb_root = RB_ROOT;
3862 spin_lock_init(&rc->reloc_root_tree.lock);
3863 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3864 return rc;
3865 }
3866
free_reloc_control(struct reloc_control * rc)3867 static void free_reloc_control(struct reloc_control *rc)
3868 {
3869 struct mapping_node *node, *tmp;
3870
3871 free_reloc_roots(&rc->reloc_roots);
3872 rbtree_postorder_for_each_entry_safe(node, tmp,
3873 &rc->reloc_root_tree.rb_root, rb_node)
3874 kfree(node);
3875
3876 kfree(rc);
3877 }
3878
3879 /*
3880 * Print the block group being relocated
3881 */
describe_relocation(struct btrfs_block_group * block_group)3882 static void describe_relocation(struct btrfs_block_group *block_group)
3883 {
3884 char buf[128] = {'\0'};
3885
3886 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3887
3888 btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
3889 block_group->start, buf);
3890 }
3891
stage_to_string(enum reloc_stage stage)3892 static const char *stage_to_string(enum reloc_stage stage)
3893 {
3894 if (stage == MOVE_DATA_EXTENTS)
3895 return "move data extents";
3896 if (stage == UPDATE_DATA_PTRS)
3897 return "update data pointers";
3898 return "unknown";
3899 }
3900
3901 /*
3902 * function to relocate all extents in a block group.
3903 */
btrfs_relocate_block_group(struct btrfs_fs_info * fs_info,u64 group_start)3904 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3905 {
3906 struct btrfs_block_group *bg;
3907 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3908 struct reloc_control *rc;
3909 struct inode *inode;
3910 struct btrfs_path *path;
3911 int ret;
3912 int rw = 0;
3913 int err = 0;
3914
3915 /*
3916 * This only gets set if we had a half-deleted snapshot on mount. We
3917 * cannot allow relocation to start while we're still trying to clean up
3918 * these pending deletions.
3919 */
3920 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3921 if (ret)
3922 return ret;
3923
3924 /* We may have been woken up by close_ctree, so bail if we're closing. */
3925 if (btrfs_fs_closing(fs_info))
3926 return -EINTR;
3927
3928 bg = btrfs_lookup_block_group(fs_info, group_start);
3929 if (!bg)
3930 return -ENOENT;
3931
3932 /*
3933 * Relocation of a data block group creates ordered extents. Without
3934 * sb_start_write(), we can freeze the filesystem while unfinished
3935 * ordered extents are left. Such ordered extents can cause a deadlock
3936 * e.g. when syncfs() is waiting for their completion but they can't
3937 * finish because they block when joining a transaction, due to the
3938 * fact that the freeze locks are being held in write mode.
3939 */
3940 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
3941 ASSERT(sb_write_started(fs_info->sb));
3942
3943 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
3944 btrfs_put_block_group(bg);
3945 return -ETXTBSY;
3946 }
3947
3948 rc = alloc_reloc_control(fs_info);
3949 if (!rc) {
3950 btrfs_put_block_group(bg);
3951 return -ENOMEM;
3952 }
3953
3954 ret = reloc_chunk_start(fs_info);
3955 if (ret < 0) {
3956 err = ret;
3957 goto out_put_bg;
3958 }
3959
3960 rc->extent_root = extent_root;
3961 rc->block_group = bg;
3962
3963 ret = btrfs_inc_block_group_ro(rc->block_group, true);
3964 if (ret) {
3965 err = ret;
3966 goto out;
3967 }
3968 rw = 1;
3969
3970 path = btrfs_alloc_path();
3971 if (!path) {
3972 err = -ENOMEM;
3973 goto out;
3974 }
3975
3976 inode = lookup_free_space_inode(rc->block_group, path);
3977 btrfs_free_path(path);
3978
3979 if (!IS_ERR(inode))
3980 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
3981 else
3982 ret = PTR_ERR(inode);
3983
3984 if (ret && ret != -ENOENT) {
3985 err = ret;
3986 goto out;
3987 }
3988
3989 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
3990 if (IS_ERR(rc->data_inode)) {
3991 err = PTR_ERR(rc->data_inode);
3992 rc->data_inode = NULL;
3993 goto out;
3994 }
3995
3996 describe_relocation(rc->block_group);
3997
3998 btrfs_wait_block_group_reservations(rc->block_group);
3999 btrfs_wait_nocow_writers(rc->block_group);
4000 btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
4001
4002 ret = btrfs_zone_finish(rc->block_group);
4003 WARN_ON(ret && ret != -EAGAIN);
4004
4005 while (1) {
4006 enum reloc_stage finishes_stage;
4007
4008 mutex_lock(&fs_info->cleaner_mutex);
4009 ret = relocate_block_group(rc);
4010 mutex_unlock(&fs_info->cleaner_mutex);
4011 if (ret < 0)
4012 err = ret;
4013
4014 finishes_stage = rc->stage;
4015 /*
4016 * We may have gotten ENOSPC after we already dirtied some
4017 * extents. If writeout happens while we're relocating a
4018 * different block group we could end up hitting the
4019 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4020 * btrfs_reloc_cow_block. Make sure we write everything out
4021 * properly so we don't trip over this problem, and then break
4022 * out of the loop if we hit an error.
4023 */
4024 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4025 ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
4026 (u64)-1);
4027 if (ret)
4028 err = ret;
4029 invalidate_mapping_pages(rc->data_inode->i_mapping,
4030 0, -1);
4031 rc->stage = UPDATE_DATA_PTRS;
4032 }
4033
4034 if (err < 0)
4035 goto out;
4036
4037 if (rc->extents_found == 0)
4038 break;
4039
4040 btrfs_info(fs_info, "found %llu extents, stage: %s",
4041 rc->extents_found, stage_to_string(finishes_stage));
4042 }
4043
4044 WARN_ON(rc->block_group->pinned > 0);
4045 WARN_ON(rc->block_group->reserved > 0);
4046 WARN_ON(rc->block_group->used > 0);
4047 out:
4048 if (err && rw)
4049 btrfs_dec_block_group_ro(rc->block_group);
4050 iput(rc->data_inode);
4051 out_put_bg:
4052 btrfs_put_block_group(bg);
4053 reloc_chunk_end(fs_info);
4054 free_reloc_control(rc);
4055 return err;
4056 }
4057
mark_garbage_root(struct btrfs_root * root)4058 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4059 {
4060 struct btrfs_fs_info *fs_info = root->fs_info;
4061 struct btrfs_trans_handle *trans;
4062 int ret, err;
4063
4064 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4065 if (IS_ERR(trans))
4066 return PTR_ERR(trans);
4067
4068 memset(&root->root_item.drop_progress, 0,
4069 sizeof(root->root_item.drop_progress));
4070 btrfs_set_root_drop_level(&root->root_item, 0);
4071 btrfs_set_root_refs(&root->root_item, 0);
4072 ret = btrfs_update_root(trans, fs_info->tree_root,
4073 &root->root_key, &root->root_item);
4074
4075 err = btrfs_end_transaction(trans);
4076 if (err)
4077 return err;
4078 return ret;
4079 }
4080
4081 /*
4082 * recover relocation interrupted by system crash.
4083 *
4084 * this function resumes merging reloc trees with corresponding fs trees.
4085 * this is important for keeping the sharing of tree blocks
4086 */
btrfs_recover_relocation(struct btrfs_fs_info * fs_info)4087 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4088 {
4089 LIST_HEAD(reloc_roots);
4090 struct btrfs_key key;
4091 struct btrfs_root *fs_root;
4092 struct btrfs_root *reloc_root;
4093 struct btrfs_path *path;
4094 struct extent_buffer *leaf;
4095 struct reloc_control *rc = NULL;
4096 struct btrfs_trans_handle *trans;
4097 int ret2;
4098 int ret = 0;
4099
4100 path = btrfs_alloc_path();
4101 if (!path)
4102 return -ENOMEM;
4103 path->reada = READA_BACK;
4104
4105 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4106 key.type = BTRFS_ROOT_ITEM_KEY;
4107 key.offset = (u64)-1;
4108
4109 while (1) {
4110 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4111 path, 0, 0);
4112 if (ret < 0)
4113 goto out;
4114 if (ret > 0) {
4115 if (path->slots[0] == 0)
4116 break;
4117 path->slots[0]--;
4118 }
4119 ret = 0;
4120 leaf = path->nodes[0];
4121 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4122 btrfs_release_path(path);
4123
4124 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4125 key.type != BTRFS_ROOT_ITEM_KEY)
4126 break;
4127
4128 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4129 if (IS_ERR(reloc_root)) {
4130 ret = PTR_ERR(reloc_root);
4131 goto out;
4132 }
4133
4134 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4135 list_add(&reloc_root->root_list, &reloc_roots);
4136
4137 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4138 fs_root = btrfs_get_fs_root(fs_info,
4139 reloc_root->root_key.offset, false);
4140 if (IS_ERR(fs_root)) {
4141 ret = PTR_ERR(fs_root);
4142 if (ret != -ENOENT)
4143 goto out;
4144 ret = mark_garbage_root(reloc_root);
4145 if (ret < 0)
4146 goto out;
4147 ret = 0;
4148 } else {
4149 btrfs_put_root(fs_root);
4150 }
4151 }
4152
4153 if (key.offset == 0)
4154 break;
4155
4156 key.offset--;
4157 }
4158 btrfs_release_path(path);
4159
4160 if (list_empty(&reloc_roots))
4161 goto out;
4162
4163 rc = alloc_reloc_control(fs_info);
4164 if (!rc) {
4165 ret = -ENOMEM;
4166 goto out;
4167 }
4168
4169 ret = reloc_chunk_start(fs_info);
4170 if (ret < 0)
4171 goto out_end;
4172
4173 rc->extent_root = btrfs_extent_root(fs_info, 0);
4174
4175 set_reloc_control(rc);
4176
4177 trans = btrfs_join_transaction(rc->extent_root);
4178 if (IS_ERR(trans)) {
4179 ret = PTR_ERR(trans);
4180 goto out_unset;
4181 }
4182
4183 rc->merge_reloc_tree = true;
4184
4185 while (!list_empty(&reloc_roots)) {
4186 reloc_root = list_entry(reloc_roots.next,
4187 struct btrfs_root, root_list);
4188 list_del(&reloc_root->root_list);
4189
4190 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4191 list_add_tail(&reloc_root->root_list,
4192 &rc->reloc_roots);
4193 continue;
4194 }
4195
4196 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4197 false);
4198 if (IS_ERR(fs_root)) {
4199 ret = PTR_ERR(fs_root);
4200 list_add_tail(&reloc_root->root_list, &reloc_roots);
4201 btrfs_end_transaction(trans);
4202 goto out_unset;
4203 }
4204
4205 ret = __add_reloc_root(reloc_root);
4206 ASSERT(ret != -EEXIST);
4207 if (ret) {
4208 list_add_tail(&reloc_root->root_list, &reloc_roots);
4209 btrfs_put_root(fs_root);
4210 btrfs_end_transaction(trans);
4211 goto out_unset;
4212 }
4213 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4214 btrfs_put_root(fs_root);
4215 }
4216
4217 ret = btrfs_commit_transaction(trans);
4218 if (ret)
4219 goto out_unset;
4220
4221 merge_reloc_roots(rc);
4222
4223 unset_reloc_control(rc);
4224
4225 trans = btrfs_join_transaction(rc->extent_root);
4226 if (IS_ERR(trans)) {
4227 ret = PTR_ERR(trans);
4228 goto out_clean;
4229 }
4230 ret = btrfs_commit_transaction(trans);
4231 out_clean:
4232 ret2 = clean_dirty_subvols(rc);
4233 if (ret2 < 0 && !ret)
4234 ret = ret2;
4235 out_unset:
4236 unset_reloc_control(rc);
4237 out_end:
4238 reloc_chunk_end(fs_info);
4239 free_reloc_control(rc);
4240 out:
4241 free_reloc_roots(&reloc_roots);
4242
4243 btrfs_free_path(path);
4244
4245 if (ret == 0) {
4246 /* cleanup orphan inode in data relocation tree */
4247 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4248 ASSERT(fs_root);
4249 ret = btrfs_orphan_cleanup(fs_root);
4250 btrfs_put_root(fs_root);
4251 }
4252 return ret;
4253 }
4254
4255 /*
4256 * helper to add ordered checksum for data relocation.
4257 *
4258 * cloning checksum properly handles the nodatasum extents.
4259 * it also saves CPU time to re-calculate the checksum.
4260 */
btrfs_reloc_clone_csums(struct btrfs_ordered_extent * ordered)4261 int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
4262 {
4263 struct btrfs_inode *inode = ordered->inode;
4264 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4265 u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
4266 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4267 LIST_HEAD(list);
4268 int ret;
4269
4270 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4271 disk_bytenr + ordered->num_bytes - 1,
4272 &list, false);
4273 if (ret < 0) {
4274 btrfs_mark_ordered_extent_error(ordered);
4275 return ret;
4276 }
4277
4278 while (!list_empty(&list)) {
4279 struct btrfs_ordered_sum *sums =
4280 list_entry(list.next, struct btrfs_ordered_sum, list);
4281
4282 list_del_init(&sums->list);
4283
4284 /*
4285 * We need to offset the new_bytenr based on where the csum is.
4286 * We need to do this because we will read in entire prealloc
4287 * extents but we may have written to say the middle of the
4288 * prealloc extent, so we need to make sure the csum goes with
4289 * the right disk offset.
4290 *
4291 * We can do this because the data reloc inode refers strictly
4292 * to the on disk bytes, so we don't have to worry about
4293 * disk_len vs real len like with real inodes since it's all
4294 * disk length.
4295 */
4296 sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
4297 btrfs_add_ordered_sum(ordered, sums);
4298 }
4299
4300 return 0;
4301 }
4302
btrfs_reloc_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct extent_buffer * buf,struct extent_buffer * cow)4303 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4304 struct btrfs_root *root,
4305 const struct extent_buffer *buf,
4306 struct extent_buffer *cow)
4307 {
4308 struct btrfs_fs_info *fs_info = root->fs_info;
4309 struct reloc_control *rc;
4310 struct btrfs_backref_node *node;
4311 int first_cow = 0;
4312 int level;
4313 int ret = 0;
4314
4315 rc = fs_info->reloc_ctl;
4316 if (!rc)
4317 return 0;
4318
4319 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4320
4321 level = btrfs_header_level(buf);
4322 if (btrfs_header_generation(buf) <=
4323 btrfs_root_last_snapshot(&root->root_item))
4324 first_cow = 1;
4325
4326 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
4327 WARN_ON(!first_cow && level == 0);
4328
4329 node = rc->backref_cache.path[level];
4330
4331 /*
4332 * If node->bytenr != buf->start and node->new_bytenr !=
4333 * buf->start then we've got the wrong backref node for what we
4334 * expected to see here and the cache is incorrect.
4335 */
4336 if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
4337 btrfs_err(fs_info,
4338 "bytenr %llu was found but our backref cache was expecting %llu or %llu",
4339 buf->start, node->bytenr, node->new_bytenr);
4340 return -EUCLEAN;
4341 }
4342
4343 btrfs_backref_drop_node_buffer(node);
4344 atomic_inc(&cow->refs);
4345 node->eb = cow;
4346 node->new_bytenr = cow->start;
4347
4348 if (!node->pending) {
4349 list_move_tail(&node->list,
4350 &rc->backref_cache.pending[level]);
4351 node->pending = 1;
4352 }
4353
4354 if (first_cow)
4355 mark_block_processed(rc, node);
4356
4357 if (first_cow && level > 0)
4358 rc->nodes_relocated += buf->len;
4359 }
4360
4361 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4362 ret = replace_file_extents(trans, rc, root, cow);
4363 return ret;
4364 }
4365
4366 /*
4367 * called before creating snapshot. it calculates metadata reservation
4368 * required for relocating tree blocks in the snapshot
4369 */
btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot * pending,u64 * bytes_to_reserve)4370 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4371 u64 *bytes_to_reserve)
4372 {
4373 struct btrfs_root *root = pending->root;
4374 struct reloc_control *rc = root->fs_info->reloc_ctl;
4375
4376 if (!rc || !have_reloc_root(root))
4377 return;
4378
4379 if (!rc->merge_reloc_tree)
4380 return;
4381
4382 root = root->reloc_root;
4383 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4384 /*
4385 * relocation is in the stage of merging trees. the space
4386 * used by merging a reloc tree is twice the size of
4387 * relocated tree nodes in the worst case. half for cowing
4388 * the reloc tree, half for cowing the fs tree. the space
4389 * used by cowing the reloc tree will be freed after the
4390 * tree is dropped. if we create snapshot, cowing the fs
4391 * tree may use more space than it frees. so we need
4392 * reserve extra space.
4393 */
4394 *bytes_to_reserve += rc->nodes_relocated;
4395 }
4396
4397 /*
4398 * called after snapshot is created. migrate block reservation
4399 * and create reloc root for the newly created snapshot
4400 *
4401 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4402 * references held on the reloc_root, one for root->reloc_root and one for
4403 * rc->reloc_roots.
4404 */
btrfs_reloc_post_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)4405 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4406 struct btrfs_pending_snapshot *pending)
4407 {
4408 struct btrfs_root *root = pending->root;
4409 struct btrfs_root *reloc_root;
4410 struct btrfs_root *new_root;
4411 struct reloc_control *rc = root->fs_info->reloc_ctl;
4412 int ret;
4413
4414 if (!rc || !have_reloc_root(root))
4415 return 0;
4416
4417 rc = root->fs_info->reloc_ctl;
4418 rc->merging_rsv_size += rc->nodes_relocated;
4419
4420 if (rc->merge_reloc_tree) {
4421 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4422 rc->block_rsv,
4423 rc->nodes_relocated, true);
4424 if (ret)
4425 return ret;
4426 }
4427
4428 new_root = pending->snap;
4429 reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
4430 if (IS_ERR(reloc_root))
4431 return PTR_ERR(reloc_root);
4432
4433 ret = __add_reloc_root(reloc_root);
4434 ASSERT(ret != -EEXIST);
4435 if (ret) {
4436 /* Pairs with create_reloc_root */
4437 btrfs_put_root(reloc_root);
4438 return ret;
4439 }
4440 new_root->reloc_root = btrfs_grab_root(reloc_root);
4441 return 0;
4442 }
4443
4444 /*
4445 * Get the current bytenr for the block group which is being relocated.
4446 *
4447 * Return U64_MAX if no running relocation.
4448 */
btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info * fs_info)4449 u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
4450 {
4451 u64 logical = U64_MAX;
4452
4453 lockdep_assert_held(&fs_info->reloc_mutex);
4454
4455 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
4456 logical = fs_info->reloc_ctl->block_group->start;
4457 return logical;
4458 }
4459