1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
12 #include <linux/error-injection.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "volumes.h"
17 #include "locking.h"
18 #include "btrfs_inode.h"
19 #include "async-thread.h"
20 #include "free-space-cache.h"
21 #include "qgroup.h"
22 #include "print-tree.h"
23 #include "delalloc-space.h"
24 #include "block-group.h"
25 #include "backref.h"
26 #include "misc.h"
27 #include "subpage.h"
28 #include "zoned.h"
29 #include "inode-item.h"
30 #include "space-info.h"
31 #include "fs.h"
32 #include "accessors.h"
33 #include "extent-tree.h"
34 #include "root-tree.h"
35 #include "file-item.h"
36 #include "relocation.h"
37 #include "super.h"
38 #include "tree-checker.h"
39 #include "raid-stripe-tree.h"
40
41 /*
42 * Relocation overview
43 *
44 * [What does relocation do]
45 *
46 * The objective of relocation is to relocate all extents of the target block
47 * group to other block groups.
48 * This is utilized by resize (shrink only), profile converting, compacting
49 * space, or balance routine to spread chunks over devices.
50 *
51 * Before | After
52 * ------------------------------------------------------------------
53 * BG A: 10 data extents | BG A: deleted
54 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
55 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
56 *
57 * [How does relocation work]
58 *
59 * 1. Mark the target block group read-only
60 * New extents won't be allocated from the target block group.
61 *
62 * 2.1 Record each extent in the target block group
63 * To build a proper map of extents to be relocated.
64 *
65 * 2.2 Build data reloc tree and reloc trees
66 * Data reloc tree will contain an inode, recording all newly relocated
67 * data extents.
68 * There will be only one data reloc tree for one data block group.
69 *
70 * Reloc tree will be a special snapshot of its source tree, containing
71 * relocated tree blocks.
72 * Each tree referring to a tree block in target block group will get its
73 * reloc tree built.
74 *
75 * 2.3 Swap source tree with its corresponding reloc tree
76 * Each involved tree only refers to new extents after swap.
77 *
78 * 3. Cleanup reloc trees and data reloc tree.
79 * As old extents in the target block group are still referenced by reloc
80 * trees, we need to clean them up before really freeing the target block
81 * group.
82 *
83 * The main complexity is in steps 2.2 and 2.3.
84 *
85 * The entry point of relocation is relocate_block_group() function.
86 */
87
88 #define RELOCATION_RESERVED_NODES 256
89 /*
90 * map address of tree root to tree
91 */
92 struct mapping_node {
93 struct {
94 struct rb_node rb_node;
95 u64 bytenr;
96 }; /* Use rb_simle_node for search/insert */
97 void *data;
98 };
99
100 struct mapping_tree {
101 struct rb_root rb_root;
102 spinlock_t lock;
103 };
104
105 /*
106 * present a tree block to process
107 */
108 struct tree_block {
109 struct {
110 struct rb_node rb_node;
111 u64 bytenr;
112 }; /* Use rb_simple_node for search/insert */
113 u64 owner;
114 struct btrfs_key key;
115 u8 level;
116 bool key_ready;
117 };
118
119 #define MAX_EXTENTS 128
120
121 struct file_extent_cluster {
122 u64 start;
123 u64 end;
124 u64 boundary[MAX_EXTENTS];
125 unsigned int nr;
126 u64 owning_root;
127 };
128
129 /* Stages of data relocation. */
130 enum reloc_stage {
131 MOVE_DATA_EXTENTS,
132 UPDATE_DATA_PTRS
133 };
134
135 struct reloc_control {
136 /* block group to relocate */
137 struct btrfs_block_group *block_group;
138 /* extent tree */
139 struct btrfs_root *extent_root;
140 /* inode for moving data */
141 struct inode *data_inode;
142
143 struct btrfs_block_rsv *block_rsv;
144
145 struct btrfs_backref_cache backref_cache;
146
147 struct file_extent_cluster cluster;
148 /* tree blocks have been processed */
149 struct extent_io_tree processed_blocks;
150 /* map start of tree root to corresponding reloc tree */
151 struct mapping_tree reloc_root_tree;
152 /* list of reloc trees */
153 struct list_head reloc_roots;
154 /* list of subvolume trees that get relocated */
155 struct list_head dirty_subvol_roots;
156 /* size of metadata reservation for merging reloc trees */
157 u64 merging_rsv_size;
158 /* size of relocated tree nodes */
159 u64 nodes_relocated;
160 /* reserved size for block group relocation*/
161 u64 reserved_bytes;
162
163 u64 search_start;
164 u64 extents_found;
165
166 enum reloc_stage stage;
167 bool create_reloc_tree;
168 bool merge_reloc_tree;
169 bool found_file_extent;
170 };
171
mark_block_processed(struct reloc_control * rc,struct btrfs_backref_node * node)172 static void mark_block_processed(struct reloc_control *rc,
173 struct btrfs_backref_node *node)
174 {
175 u32 blocksize;
176
177 if (node->level == 0 ||
178 in_range(node->bytenr, rc->block_group->start,
179 rc->block_group->length)) {
180 blocksize = rc->extent_root->fs_info->nodesize;
181 btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr,
182 node->bytenr + blocksize - 1, EXTENT_DIRTY,
183 NULL);
184 }
185 node->processed = 1;
186 }
187
188 /*
189 * walk up backref nodes until reach node presents tree root
190 */
walk_up_backref(struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[],int * index)191 static struct btrfs_backref_node *walk_up_backref(
192 struct btrfs_backref_node *node,
193 struct btrfs_backref_edge *edges[], int *index)
194 {
195 struct btrfs_backref_edge *edge;
196 int idx = *index;
197
198 while (!list_empty(&node->upper)) {
199 edge = list_first_entry(&node->upper, struct btrfs_backref_edge,
200 list[LOWER]);
201 edges[idx++] = edge;
202 node = edge->node[UPPER];
203 }
204 BUG_ON(node->detached);
205 *index = idx;
206 return node;
207 }
208
209 /*
210 * walk down backref nodes to find start of next reference path
211 */
walk_down_backref(struct btrfs_backref_edge * edges[],int * index)212 static struct btrfs_backref_node *walk_down_backref(
213 struct btrfs_backref_edge *edges[], int *index)
214 {
215 struct btrfs_backref_edge *edge;
216 struct btrfs_backref_node *lower;
217 int idx = *index;
218
219 while (idx > 0) {
220 edge = edges[idx - 1];
221 lower = edge->node[LOWER];
222 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
223 idx--;
224 continue;
225 }
226 edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge,
227 list[LOWER]);
228 edges[idx - 1] = edge;
229 *index = idx;
230 return edge->node[UPPER];
231 }
232 *index = 0;
233 return NULL;
234 }
235
reloc_root_is_dead(const struct btrfs_root * root)236 static bool reloc_root_is_dead(const struct btrfs_root *root)
237 {
238 /*
239 * Pair with set_bit/clear_bit in clean_dirty_subvols and
240 * btrfs_update_reloc_root. We need to see the updated bit before
241 * trying to access reloc_root
242 */
243 smp_rmb();
244 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
245 return true;
246 return false;
247 }
248
249 /*
250 * Check if this subvolume tree has valid reloc tree.
251 *
252 * Reloc tree after swap is considered dead, thus not considered as valid.
253 * This is enough for most callers, as they don't distinguish dead reloc root
254 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
255 * special case.
256 */
have_reloc_root(const struct btrfs_root * root)257 static bool have_reloc_root(const struct btrfs_root *root)
258 {
259 if (reloc_root_is_dead(root))
260 return false;
261 if (!root->reloc_root)
262 return false;
263 return true;
264 }
265
btrfs_should_ignore_reloc_root(const struct btrfs_root * root)266 bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
267 {
268 struct btrfs_root *reloc_root;
269
270 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
271 return false;
272
273 /* This root has been merged with its reloc tree, we can ignore it */
274 if (reloc_root_is_dead(root))
275 return true;
276
277 reloc_root = root->reloc_root;
278 if (!reloc_root)
279 return false;
280
281 if (btrfs_header_generation(reloc_root->commit_root) ==
282 root->fs_info->running_transaction->transid)
283 return false;
284 /*
285 * If there is reloc tree and it was created in previous transaction
286 * backref lookup can find the reloc tree, so backref node for the fs
287 * tree root is useless for relocation.
288 */
289 return true;
290 }
291
292 /*
293 * find reloc tree by address of tree root
294 */
find_reloc_root(struct btrfs_fs_info * fs_info,u64 bytenr)295 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
296 {
297 struct reloc_control *rc = fs_info->reloc_ctl;
298 struct rb_node *rb_node;
299 struct mapping_node *node;
300 struct btrfs_root *root = NULL;
301
302 ASSERT(rc);
303 spin_lock(&rc->reloc_root_tree.lock);
304 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
305 if (rb_node) {
306 node = rb_entry(rb_node, struct mapping_node, rb_node);
307 root = node->data;
308 }
309 spin_unlock(&rc->reloc_root_tree.lock);
310 return btrfs_grab_root(root);
311 }
312
313 /*
314 * For useless nodes, do two major clean ups:
315 *
316 * - Cleanup the children edges and nodes
317 * If child node is also orphan (no parent) during cleanup, then the child
318 * node will also be cleaned up.
319 *
320 * - Freeing up leaves (level 0), keeps nodes detached
321 * For nodes, the node is still cached as "detached"
322 *
323 * Return false if @node is not in the @useless_nodes list.
324 * Return true if @node is in the @useless_nodes list.
325 */
handle_useless_nodes(struct reloc_control * rc,struct btrfs_backref_node * node)326 static bool handle_useless_nodes(struct reloc_control *rc,
327 struct btrfs_backref_node *node)
328 {
329 struct btrfs_backref_cache *cache = &rc->backref_cache;
330 struct list_head *useless_node = &cache->useless_node;
331 bool ret = false;
332
333 while (!list_empty(useless_node)) {
334 struct btrfs_backref_node *cur;
335
336 cur = list_first_entry(useless_node, struct btrfs_backref_node,
337 list);
338 list_del_init(&cur->list);
339
340 /* Only tree root nodes can be added to @useless_nodes */
341 ASSERT(list_empty(&cur->upper));
342
343 if (cur == node)
344 ret = true;
345
346 /* Cleanup the lower edges */
347 while (!list_empty(&cur->lower)) {
348 struct btrfs_backref_edge *edge;
349 struct btrfs_backref_node *lower;
350
351 edge = list_first_entry(&cur->lower, struct btrfs_backref_edge,
352 list[UPPER]);
353 list_del(&edge->list[UPPER]);
354 list_del(&edge->list[LOWER]);
355 lower = edge->node[LOWER];
356 btrfs_backref_free_edge(cache, edge);
357
358 /* Child node is also orphan, queue for cleanup */
359 if (list_empty(&lower->upper))
360 list_add(&lower->list, useless_node);
361 }
362 /* Mark this block processed for relocation */
363 mark_block_processed(rc, cur);
364
365 /*
366 * Backref nodes for tree leaves are deleted from the cache.
367 * Backref nodes for upper level tree blocks are left in the
368 * cache to avoid unnecessary backref lookup.
369 */
370 if (cur->level > 0) {
371 cur->detached = 1;
372 } else {
373 rb_erase(&cur->rb_node, &cache->rb_root);
374 btrfs_backref_free_node(cache, cur);
375 }
376 }
377 return ret;
378 }
379
380 /*
381 * Build backref tree for a given tree block. Root of the backref tree
382 * corresponds the tree block, leaves of the backref tree correspond roots of
383 * b-trees that reference the tree block.
384 *
385 * The basic idea of this function is check backrefs of a given block to find
386 * upper level blocks that reference the block, and then check backrefs of
387 * these upper level blocks recursively. The recursion stops when tree root is
388 * reached or backrefs for the block is cached.
389 *
390 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
391 * all upper level blocks that directly/indirectly reference the block are also
392 * cached.
393 */
build_backref_tree(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_key * node_key,int level,u64 bytenr)394 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
395 struct btrfs_trans_handle *trans,
396 struct reloc_control *rc, struct btrfs_key *node_key,
397 int level, u64 bytenr)
398 {
399 struct btrfs_backref_iter *iter;
400 struct btrfs_backref_cache *cache = &rc->backref_cache;
401 /* For searching parent of TREE_BLOCK_REF */
402 struct btrfs_path *path;
403 struct btrfs_backref_node *cur;
404 struct btrfs_backref_node *node = NULL;
405 struct btrfs_backref_edge *edge;
406 int ret;
407
408 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
409 if (!iter)
410 return ERR_PTR(-ENOMEM);
411 path = btrfs_alloc_path();
412 if (!path) {
413 ret = -ENOMEM;
414 goto out;
415 }
416
417 node = btrfs_backref_alloc_node(cache, bytenr, level);
418 if (!node) {
419 ret = -ENOMEM;
420 goto out;
421 }
422
423 cur = node;
424
425 /* Breadth-first search to build backref cache */
426 do {
427 ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
428 node_key, cur);
429 if (ret < 0)
430 goto out;
431
432 edge = list_first_entry_or_null(&cache->pending_edge,
433 struct btrfs_backref_edge, list[UPPER]);
434 /*
435 * The pending list isn't empty, take the first block to
436 * process
437 */
438 if (edge) {
439 list_del_init(&edge->list[UPPER]);
440 cur = edge->node[UPPER];
441 }
442 } while (edge);
443
444 /* Finish the upper linkage of newly added edges/nodes */
445 ret = btrfs_backref_finish_upper_links(cache, node);
446 if (ret < 0)
447 goto out;
448
449 if (handle_useless_nodes(rc, node))
450 node = NULL;
451 out:
452 btrfs_free_path(iter->path);
453 kfree(iter);
454 btrfs_free_path(path);
455 if (ret) {
456 btrfs_backref_error_cleanup(cache, node);
457 return ERR_PTR(ret);
458 }
459 ASSERT(!node || !node->detached);
460 ASSERT(list_empty(&cache->useless_node) &&
461 list_empty(&cache->pending_edge));
462 return node;
463 }
464
465 /*
466 * helper to add 'address of tree root -> reloc tree' mapping
467 */
__add_reloc_root(struct btrfs_root * root)468 static int __add_reloc_root(struct btrfs_root *root)
469 {
470 struct btrfs_fs_info *fs_info = root->fs_info;
471 struct rb_node *rb_node;
472 struct mapping_node *node;
473 struct reloc_control *rc = fs_info->reloc_ctl;
474
475 node = kmalloc(sizeof(*node), GFP_NOFS);
476 if (!node)
477 return -ENOMEM;
478
479 node->bytenr = root->commit_root->start;
480 node->data = root;
481
482 spin_lock(&rc->reloc_root_tree.lock);
483 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
484 node->bytenr, &node->rb_node);
485 spin_unlock(&rc->reloc_root_tree.lock);
486 if (rb_node) {
487 btrfs_err(fs_info,
488 "Duplicate root found for start=%llu while inserting into relocation tree",
489 node->bytenr);
490 return -EEXIST;
491 }
492
493 list_add_tail(&root->root_list, &rc->reloc_roots);
494 return 0;
495 }
496
497 /*
498 * helper to delete the 'address of tree root -> reloc tree'
499 * mapping
500 */
__del_reloc_root(struct btrfs_root * root)501 static void __del_reloc_root(struct btrfs_root *root)
502 {
503 struct btrfs_fs_info *fs_info = root->fs_info;
504 struct rb_node *rb_node;
505 struct mapping_node *node = NULL;
506 struct reloc_control *rc = fs_info->reloc_ctl;
507 bool put_ref = false;
508
509 if (rc && root->node) {
510 spin_lock(&rc->reloc_root_tree.lock);
511 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
512 root->commit_root->start);
513 if (rb_node) {
514 node = rb_entry(rb_node, struct mapping_node, rb_node);
515 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
516 RB_CLEAR_NODE(&node->rb_node);
517 }
518 spin_unlock(&rc->reloc_root_tree.lock);
519 ASSERT(!node || (struct btrfs_root *)node->data == root);
520 }
521
522 /*
523 * We only put the reloc root here if it's on the list. There's a lot
524 * of places where the pattern is to splice the rc->reloc_roots, process
525 * the reloc roots, and then add the reloc root back onto
526 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
527 * list we don't want the reference being dropped, because the guy
528 * messing with the list is in charge of the reference.
529 */
530 spin_lock(&fs_info->trans_lock);
531 if (!list_empty(&root->root_list)) {
532 put_ref = true;
533 list_del_init(&root->root_list);
534 }
535 spin_unlock(&fs_info->trans_lock);
536 if (put_ref)
537 btrfs_put_root(root);
538 kfree(node);
539 }
540
541 /*
542 * helper to update the 'address of tree root -> reloc tree'
543 * mapping
544 */
__update_reloc_root(struct btrfs_root * root)545 static int __update_reloc_root(struct btrfs_root *root)
546 {
547 struct btrfs_fs_info *fs_info = root->fs_info;
548 struct rb_node *rb_node;
549 struct mapping_node *node = NULL;
550 struct reloc_control *rc = fs_info->reloc_ctl;
551
552 spin_lock(&rc->reloc_root_tree.lock);
553 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
554 root->commit_root->start);
555 if (rb_node) {
556 node = rb_entry(rb_node, struct mapping_node, rb_node);
557 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
558 }
559 spin_unlock(&rc->reloc_root_tree.lock);
560
561 if (!node)
562 return 0;
563 BUG_ON((struct btrfs_root *)node->data != root);
564
565 spin_lock(&rc->reloc_root_tree.lock);
566 node->bytenr = root->node->start;
567 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
568 node->bytenr, &node->rb_node);
569 spin_unlock(&rc->reloc_root_tree.lock);
570 if (rb_node)
571 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
572 return 0;
573 }
574
create_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)575 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
576 struct btrfs_root *root, u64 objectid)
577 {
578 struct btrfs_fs_info *fs_info = root->fs_info;
579 struct btrfs_root *reloc_root;
580 struct extent_buffer *eb;
581 struct btrfs_root_item *root_item;
582 struct btrfs_key root_key;
583 int ret = 0;
584 bool must_abort = false;
585
586 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
587 if (!root_item)
588 return ERR_PTR(-ENOMEM);
589
590 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
591 root_key.type = BTRFS_ROOT_ITEM_KEY;
592 root_key.offset = objectid;
593
594 if (btrfs_root_id(root) == objectid) {
595 u64 commit_root_gen;
596
597 /* called by btrfs_init_reloc_root */
598 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
599 BTRFS_TREE_RELOC_OBJECTID);
600 if (ret)
601 goto fail;
602
603 /*
604 * Set the last_snapshot field to the generation of the commit
605 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
606 * correctly (returns true) when the relocation root is created
607 * either inside the critical section of a transaction commit
608 * (through transaction.c:qgroup_account_snapshot()) and when
609 * it's created before the transaction commit is started.
610 */
611 commit_root_gen = btrfs_header_generation(root->commit_root);
612 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
613 } else {
614 /*
615 * called by btrfs_reloc_post_snapshot_hook.
616 * the source tree is a reloc tree, all tree blocks
617 * modified after it was created have RELOC flag
618 * set in their headers. so it's OK to not update
619 * the 'last_snapshot'.
620 */
621 ret = btrfs_copy_root(trans, root, root->node, &eb,
622 BTRFS_TREE_RELOC_OBJECTID);
623 if (ret)
624 goto fail;
625 }
626
627 /*
628 * We have changed references at this point, we must abort the
629 * transaction if anything fails.
630 */
631 must_abort = true;
632
633 memcpy(root_item, &root->root_item, sizeof(*root_item));
634 btrfs_set_root_bytenr(root_item, eb->start);
635 btrfs_set_root_level(root_item, btrfs_header_level(eb));
636 btrfs_set_root_generation(root_item, trans->transid);
637
638 if (btrfs_root_id(root) == objectid) {
639 btrfs_set_root_refs(root_item, 0);
640 memset(&root_item->drop_progress, 0,
641 sizeof(struct btrfs_disk_key));
642 btrfs_set_root_drop_level(root_item, 0);
643 }
644
645 btrfs_tree_unlock(eb);
646 free_extent_buffer(eb);
647
648 ret = btrfs_insert_root(trans, fs_info->tree_root,
649 &root_key, root_item);
650 if (ret)
651 goto fail;
652
653 kfree(root_item);
654
655 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
656 if (IS_ERR(reloc_root)) {
657 ret = PTR_ERR(reloc_root);
658 goto abort;
659 }
660 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
661 btrfs_set_root_last_trans(reloc_root, trans->transid);
662 return reloc_root;
663 fail:
664 kfree(root_item);
665 abort:
666 if (must_abort)
667 btrfs_abort_transaction(trans, ret);
668 return ERR_PTR(ret);
669 }
670
671 /*
672 * create reloc tree for a given fs tree. reloc tree is just a
673 * snapshot of the fs tree with special root objectid.
674 *
675 * The reloc_root comes out of here with two references, one for
676 * root->reloc_root, and another for being on the rc->reloc_roots list.
677 */
btrfs_init_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)678 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
679 struct btrfs_root *root)
680 {
681 struct btrfs_fs_info *fs_info = root->fs_info;
682 struct btrfs_root *reloc_root;
683 struct reloc_control *rc = fs_info->reloc_ctl;
684 struct btrfs_block_rsv *rsv;
685 int clear_rsv = 0;
686 int ret;
687
688 if (!rc)
689 return 0;
690
691 /*
692 * The subvolume has reloc tree but the swap is finished, no need to
693 * create/update the dead reloc tree
694 */
695 if (reloc_root_is_dead(root))
696 return 0;
697
698 /*
699 * This is subtle but important. We do not do
700 * record_root_in_transaction for reloc roots, instead we record their
701 * corresponding fs root, and then here we update the last trans for the
702 * reloc root. This means that we have to do this for the entire life
703 * of the reloc root, regardless of which stage of the relocation we are
704 * in.
705 */
706 if (root->reloc_root) {
707 reloc_root = root->reloc_root;
708 btrfs_set_root_last_trans(reloc_root, trans->transid);
709 return 0;
710 }
711
712 /*
713 * We are merging reloc roots, we do not need new reloc trees. Also
714 * reloc trees never need their own reloc tree.
715 */
716 if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
717 return 0;
718
719 if (!trans->reloc_reserved) {
720 rsv = trans->block_rsv;
721 trans->block_rsv = rc->block_rsv;
722 clear_rsv = 1;
723 }
724 reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
725 if (clear_rsv)
726 trans->block_rsv = rsv;
727 if (IS_ERR(reloc_root))
728 return PTR_ERR(reloc_root);
729
730 ret = __add_reloc_root(reloc_root);
731 ASSERT(ret != -EEXIST);
732 if (ret) {
733 /* Pairs with create_reloc_root */
734 btrfs_put_root(reloc_root);
735 return ret;
736 }
737 root->reloc_root = btrfs_grab_root(reloc_root);
738 return 0;
739 }
740
741 /*
742 * update root item of reloc tree
743 */
btrfs_update_reloc_root(struct btrfs_trans_handle * trans,struct btrfs_root * root)744 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
745 struct btrfs_root *root)
746 {
747 struct btrfs_fs_info *fs_info = root->fs_info;
748 struct btrfs_root *reloc_root;
749 struct btrfs_root_item *root_item;
750 int ret;
751
752 if (!have_reloc_root(root))
753 return 0;
754
755 reloc_root = root->reloc_root;
756 root_item = &reloc_root->root_item;
757
758 /*
759 * We are probably ok here, but __del_reloc_root() will drop its ref of
760 * the root. We have the ref for root->reloc_root, but just in case
761 * hold it while we update the reloc root.
762 */
763 btrfs_grab_root(reloc_root);
764
765 /* root->reloc_root will stay until current relocation finished */
766 if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
767 btrfs_root_refs(root_item) == 0) {
768 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
769 /*
770 * Mark the tree as dead before we change reloc_root so
771 * have_reloc_root will not touch it from now on.
772 */
773 smp_wmb();
774 __del_reloc_root(reloc_root);
775 }
776
777 if (reloc_root->commit_root != reloc_root->node) {
778 __update_reloc_root(reloc_root);
779 btrfs_set_root_node(root_item, reloc_root->node);
780 free_extent_buffer(reloc_root->commit_root);
781 reloc_root->commit_root = btrfs_root_node(reloc_root);
782 }
783
784 ret = btrfs_update_root(trans, fs_info->tree_root,
785 &reloc_root->root_key, root_item);
786 btrfs_put_root(reloc_root);
787 return ret;
788 }
789
790 /*
791 * get new location of data
792 */
get_new_location(struct inode * reloc_inode,u64 * new_bytenr,u64 bytenr,u64 num_bytes)793 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
794 u64 bytenr, u64 num_bytes)
795 {
796 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
797 struct btrfs_path *path;
798 struct btrfs_file_extent_item *fi;
799 struct extent_buffer *leaf;
800 int ret;
801
802 path = btrfs_alloc_path();
803 if (!path)
804 return -ENOMEM;
805
806 bytenr -= BTRFS_I(reloc_inode)->reloc_block_group_start;
807 ret = btrfs_lookup_file_extent(NULL, root, path,
808 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
809 if (ret < 0)
810 goto out;
811 if (ret > 0) {
812 ret = -ENOENT;
813 goto out;
814 }
815
816 leaf = path->nodes[0];
817 fi = btrfs_item_ptr(leaf, path->slots[0],
818 struct btrfs_file_extent_item);
819
820 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
821 btrfs_file_extent_compression(leaf, fi) ||
822 btrfs_file_extent_encryption(leaf, fi) ||
823 btrfs_file_extent_other_encoding(leaf, fi));
824
825 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
826 ret = -EINVAL;
827 goto out;
828 }
829
830 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
831 ret = 0;
832 out:
833 btrfs_free_path(path);
834 return ret;
835 }
836
837 /*
838 * update file extent items in the tree leaf to point to
839 * the new locations.
840 */
841 static noinline_for_stack
replace_file_extents(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root,struct extent_buffer * leaf)842 int replace_file_extents(struct btrfs_trans_handle *trans,
843 struct reloc_control *rc,
844 struct btrfs_root *root,
845 struct extent_buffer *leaf)
846 {
847 struct btrfs_fs_info *fs_info = root->fs_info;
848 struct btrfs_key key;
849 struct btrfs_file_extent_item *fi;
850 struct btrfs_inode *inode = NULL;
851 u64 parent;
852 u64 bytenr;
853 u64 new_bytenr = 0;
854 u64 num_bytes;
855 u64 end;
856 u32 nritems;
857 u32 i;
858 int ret = 0;
859 int first = 1;
860
861 if (rc->stage != UPDATE_DATA_PTRS)
862 return 0;
863
864 /* reloc trees always use full backref */
865 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
866 parent = leaf->start;
867 else
868 parent = 0;
869
870 nritems = btrfs_header_nritems(leaf);
871 for (i = 0; i < nritems; i++) {
872 struct btrfs_ref ref = { 0 };
873
874 cond_resched();
875 btrfs_item_key_to_cpu(leaf, &key, i);
876 if (key.type != BTRFS_EXTENT_DATA_KEY)
877 continue;
878 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
879 if (btrfs_file_extent_type(leaf, fi) ==
880 BTRFS_FILE_EXTENT_INLINE)
881 continue;
882 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
883 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
884 if (bytenr == 0)
885 continue;
886 if (!in_range(bytenr, rc->block_group->start,
887 rc->block_group->length))
888 continue;
889
890 /*
891 * if we are modifying block in fs tree, wait for read_folio
892 * to complete and drop the extent cache
893 */
894 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
895 if (first) {
896 inode = btrfs_find_first_inode(root, key.objectid);
897 first = 0;
898 } else if (inode && btrfs_ino(inode) < key.objectid) {
899 btrfs_add_delayed_iput(inode);
900 inode = btrfs_find_first_inode(root, key.objectid);
901 }
902 if (inode && btrfs_ino(inode) == key.objectid) {
903 struct extent_state *cached_state = NULL;
904
905 end = key.offset +
906 btrfs_file_extent_num_bytes(leaf, fi);
907 WARN_ON(!IS_ALIGNED(key.offset,
908 fs_info->sectorsize));
909 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
910 end--;
911 /* Take mmap lock to serialize with reflinks. */
912 if (!down_read_trylock(&inode->i_mmap_lock))
913 continue;
914 ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
915 end, &cached_state);
916 if (!ret) {
917 up_read(&inode->i_mmap_lock);
918 continue;
919 }
920
921 btrfs_drop_extent_map_range(inode, key.offset, end, true);
922 btrfs_unlock_extent(&inode->io_tree, key.offset, end,
923 &cached_state);
924 up_read(&inode->i_mmap_lock);
925 }
926 }
927
928 ret = get_new_location(rc->data_inode, &new_bytenr,
929 bytenr, num_bytes);
930 if (ret) {
931 /*
932 * Don't have to abort since we've not changed anything
933 * in the file extent yet.
934 */
935 break;
936 }
937
938 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
939
940 key.offset -= btrfs_file_extent_offset(leaf, fi);
941 ref.action = BTRFS_ADD_DELAYED_REF;
942 ref.bytenr = new_bytenr;
943 ref.num_bytes = num_bytes;
944 ref.parent = parent;
945 ref.owning_root = btrfs_root_id(root);
946 ref.ref_root = btrfs_header_owner(leaf);
947 btrfs_init_data_ref(&ref, key.objectid, key.offset,
948 btrfs_root_id(root), false);
949 ret = btrfs_inc_extent_ref(trans, &ref);
950 if (ret) {
951 btrfs_abort_transaction(trans, ret);
952 break;
953 }
954
955 ref.action = BTRFS_DROP_DELAYED_REF;
956 ref.bytenr = bytenr;
957 ref.num_bytes = num_bytes;
958 ref.parent = parent;
959 ref.owning_root = btrfs_root_id(root);
960 ref.ref_root = btrfs_header_owner(leaf);
961 btrfs_init_data_ref(&ref, key.objectid, key.offset,
962 btrfs_root_id(root), false);
963 ret = btrfs_free_extent(trans, &ref);
964 if (ret) {
965 btrfs_abort_transaction(trans, ret);
966 break;
967 }
968 }
969 if (inode)
970 btrfs_add_delayed_iput(inode);
971 return ret;
972 }
973
memcmp_node_keys(const struct extent_buffer * eb,int slot,const struct btrfs_path * path,int level)974 static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
975 int slot, const struct btrfs_path *path,
976 int level)
977 {
978 struct btrfs_disk_key key1;
979 struct btrfs_disk_key key2;
980 btrfs_node_key(eb, &key1, slot);
981 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
982 return memcmp(&key1, &key2, sizeof(key1));
983 }
984
985 /*
986 * try to replace tree blocks in fs tree with the new blocks
987 * in reloc tree. tree blocks haven't been modified since the
988 * reloc tree was create can be replaced.
989 *
990 * if a block was replaced, level of the block + 1 is returned.
991 * if no block got replaced, 0 is returned. if there are other
992 * errors, a negative error number is returned.
993 */
994 static noinline_for_stack
replace_path(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * dest,struct btrfs_root * src,struct btrfs_path * path,struct btrfs_key * next_key,int lowest_level,int max_level)995 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
996 struct btrfs_root *dest, struct btrfs_root *src,
997 struct btrfs_path *path, struct btrfs_key *next_key,
998 int lowest_level, int max_level)
999 {
1000 struct btrfs_fs_info *fs_info = dest->fs_info;
1001 struct extent_buffer *eb;
1002 struct extent_buffer *parent;
1003 struct btrfs_ref ref = { 0 };
1004 struct btrfs_key key;
1005 u64 old_bytenr;
1006 u64 new_bytenr;
1007 u64 old_ptr_gen;
1008 u64 new_ptr_gen;
1009 u64 last_snapshot;
1010 u32 blocksize;
1011 int cow = 0;
1012 int level;
1013 int ret;
1014 int slot;
1015
1016 ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
1017 ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
1018
1019 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1020 again:
1021 slot = path->slots[lowest_level];
1022 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1023
1024 eb = btrfs_lock_root_node(dest);
1025 level = btrfs_header_level(eb);
1026
1027 if (level < lowest_level) {
1028 btrfs_tree_unlock(eb);
1029 free_extent_buffer(eb);
1030 return 0;
1031 }
1032
1033 if (cow) {
1034 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1035 BTRFS_NESTING_COW);
1036 if (ret) {
1037 btrfs_tree_unlock(eb);
1038 free_extent_buffer(eb);
1039 return ret;
1040 }
1041 }
1042
1043 if (next_key) {
1044 next_key->objectid = (u64)-1;
1045 next_key->type = (u8)-1;
1046 next_key->offset = (u64)-1;
1047 }
1048
1049 parent = eb;
1050 while (1) {
1051 level = btrfs_header_level(parent);
1052 ASSERT(level >= lowest_level);
1053
1054 ret = btrfs_bin_search(parent, 0, &key, &slot);
1055 if (ret < 0)
1056 break;
1057 if (ret && slot > 0)
1058 slot--;
1059
1060 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1061 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1062
1063 old_bytenr = btrfs_node_blockptr(parent, slot);
1064 blocksize = fs_info->nodesize;
1065 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1066
1067 if (level <= max_level) {
1068 eb = path->nodes[level];
1069 new_bytenr = btrfs_node_blockptr(eb,
1070 path->slots[level]);
1071 new_ptr_gen = btrfs_node_ptr_generation(eb,
1072 path->slots[level]);
1073 } else {
1074 new_bytenr = 0;
1075 new_ptr_gen = 0;
1076 }
1077
1078 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1079 ret = level;
1080 break;
1081 }
1082
1083 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1084 memcmp_node_keys(parent, slot, path, level)) {
1085 if (level <= lowest_level) {
1086 ret = 0;
1087 break;
1088 }
1089
1090 eb = btrfs_read_node_slot(parent, slot);
1091 if (IS_ERR(eb)) {
1092 ret = PTR_ERR(eb);
1093 break;
1094 }
1095 btrfs_tree_lock(eb);
1096 if (cow) {
1097 ret = btrfs_cow_block(trans, dest, eb, parent,
1098 slot, &eb,
1099 BTRFS_NESTING_COW);
1100 if (ret) {
1101 btrfs_tree_unlock(eb);
1102 free_extent_buffer(eb);
1103 break;
1104 }
1105 }
1106
1107 btrfs_tree_unlock(parent);
1108 free_extent_buffer(parent);
1109
1110 parent = eb;
1111 continue;
1112 }
1113
1114 if (!cow) {
1115 btrfs_tree_unlock(parent);
1116 free_extent_buffer(parent);
1117 cow = 1;
1118 goto again;
1119 }
1120
1121 btrfs_node_key_to_cpu(path->nodes[level], &key,
1122 path->slots[level]);
1123 btrfs_release_path(path);
1124
1125 path->lowest_level = level;
1126 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1127 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1128 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1129 path->lowest_level = 0;
1130 if (ret) {
1131 if (ret > 0)
1132 ret = -ENOENT;
1133 break;
1134 }
1135
1136 /*
1137 * Info qgroup to trace both subtrees.
1138 *
1139 * We must trace both trees.
1140 * 1) Tree reloc subtree
1141 * If not traced, we will leak data numbers
1142 * 2) Fs subtree
1143 * If not traced, we will double count old data
1144 *
1145 * We don't scan the subtree right now, but only record
1146 * the swapped tree blocks.
1147 * The real subtree rescan is delayed until we have new
1148 * CoW on the subtree root node before transaction commit.
1149 */
1150 ret = btrfs_qgroup_add_swapped_blocks(dest,
1151 rc->block_group, parent, slot,
1152 path->nodes[level], path->slots[level],
1153 last_snapshot);
1154 if (ret < 0)
1155 break;
1156 /*
1157 * swap blocks in fs tree and reloc tree.
1158 */
1159 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1160 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1161
1162 btrfs_set_node_blockptr(path->nodes[level],
1163 path->slots[level], old_bytenr);
1164 btrfs_set_node_ptr_generation(path->nodes[level],
1165 path->slots[level], old_ptr_gen);
1166
1167 ref.action = BTRFS_ADD_DELAYED_REF;
1168 ref.bytenr = old_bytenr;
1169 ref.num_bytes = blocksize;
1170 ref.parent = path->nodes[level]->start;
1171 ref.owning_root = btrfs_root_id(src);
1172 ref.ref_root = btrfs_root_id(src);
1173 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1174 ret = btrfs_inc_extent_ref(trans, &ref);
1175 if (ret) {
1176 btrfs_abort_transaction(trans, ret);
1177 break;
1178 }
1179
1180 ref.action = BTRFS_ADD_DELAYED_REF;
1181 ref.bytenr = new_bytenr;
1182 ref.num_bytes = blocksize;
1183 ref.parent = 0;
1184 ref.owning_root = btrfs_root_id(dest);
1185 ref.ref_root = btrfs_root_id(dest);
1186 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1187 ret = btrfs_inc_extent_ref(trans, &ref);
1188 if (ret) {
1189 btrfs_abort_transaction(trans, ret);
1190 break;
1191 }
1192
1193 /* We don't know the real owning_root, use 0. */
1194 ref.action = BTRFS_DROP_DELAYED_REF;
1195 ref.bytenr = new_bytenr;
1196 ref.num_bytes = blocksize;
1197 ref.parent = path->nodes[level]->start;
1198 ref.owning_root = 0;
1199 ref.ref_root = btrfs_root_id(src);
1200 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1201 ret = btrfs_free_extent(trans, &ref);
1202 if (ret) {
1203 btrfs_abort_transaction(trans, ret);
1204 break;
1205 }
1206
1207 /* We don't know the real owning_root, use 0. */
1208 ref.action = BTRFS_DROP_DELAYED_REF;
1209 ref.bytenr = old_bytenr;
1210 ref.num_bytes = blocksize;
1211 ref.parent = 0;
1212 ref.owning_root = 0;
1213 ref.ref_root = btrfs_root_id(dest);
1214 btrfs_init_tree_ref(&ref, level - 1, 0, true);
1215 ret = btrfs_free_extent(trans, &ref);
1216 if (ret) {
1217 btrfs_abort_transaction(trans, ret);
1218 break;
1219 }
1220
1221 btrfs_unlock_up_safe(path, 0);
1222
1223 ret = level;
1224 break;
1225 }
1226 btrfs_tree_unlock(parent);
1227 free_extent_buffer(parent);
1228 return ret;
1229 }
1230
1231 /*
1232 * helper to find next relocated block in reloc tree
1233 */
1234 static noinline_for_stack
walk_up_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1235 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1236 int *level)
1237 {
1238 struct extent_buffer *eb;
1239 int i;
1240 u64 last_snapshot;
1241 u32 nritems;
1242
1243 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1244
1245 for (i = 0; i < *level; i++) {
1246 free_extent_buffer(path->nodes[i]);
1247 path->nodes[i] = NULL;
1248 }
1249
1250 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1251 eb = path->nodes[i];
1252 nritems = btrfs_header_nritems(eb);
1253 while (path->slots[i] + 1 < nritems) {
1254 path->slots[i]++;
1255 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1256 last_snapshot)
1257 continue;
1258
1259 *level = i;
1260 return 0;
1261 }
1262 free_extent_buffer(path->nodes[i]);
1263 path->nodes[i] = NULL;
1264 }
1265 return 1;
1266 }
1267
1268 /*
1269 * walk down reloc tree to find relocated block of lowest level
1270 */
1271 static noinline_for_stack
walk_down_reloc_tree(struct btrfs_root * root,struct btrfs_path * path,int * level)1272 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1273 int *level)
1274 {
1275 struct extent_buffer *eb = NULL;
1276 int i;
1277 u64 ptr_gen = 0;
1278 u64 last_snapshot;
1279 u32 nritems;
1280
1281 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1282
1283 for (i = *level; i > 0; i--) {
1284 eb = path->nodes[i];
1285 nritems = btrfs_header_nritems(eb);
1286 while (path->slots[i] < nritems) {
1287 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1288 if (ptr_gen > last_snapshot)
1289 break;
1290 path->slots[i]++;
1291 }
1292 if (path->slots[i] >= nritems) {
1293 if (i == *level)
1294 break;
1295 *level = i + 1;
1296 return 0;
1297 }
1298 if (i == 1) {
1299 *level = i;
1300 return 0;
1301 }
1302
1303 eb = btrfs_read_node_slot(eb, path->slots[i]);
1304 if (IS_ERR(eb))
1305 return PTR_ERR(eb);
1306 BUG_ON(btrfs_header_level(eb) != i - 1);
1307 path->nodes[i - 1] = eb;
1308 path->slots[i - 1] = 0;
1309 }
1310 return 1;
1311 }
1312
1313 /*
1314 * invalidate extent cache for file extents whose key in range of
1315 * [min_key, max_key)
1316 */
invalidate_extent_cache(struct btrfs_root * root,const struct btrfs_key * min_key,const struct btrfs_key * max_key)1317 static int invalidate_extent_cache(struct btrfs_root *root,
1318 const struct btrfs_key *min_key,
1319 const struct btrfs_key *max_key)
1320 {
1321 struct btrfs_fs_info *fs_info = root->fs_info;
1322 struct btrfs_inode *inode = NULL;
1323 u64 objectid;
1324 u64 start, end;
1325 u64 ino;
1326
1327 objectid = min_key->objectid;
1328 while (1) {
1329 struct extent_state *cached_state = NULL;
1330
1331 cond_resched();
1332 if (inode)
1333 iput(&inode->vfs_inode);
1334
1335 if (objectid > max_key->objectid)
1336 break;
1337
1338 inode = btrfs_find_first_inode(root, objectid);
1339 if (!inode)
1340 break;
1341 ino = btrfs_ino(inode);
1342
1343 if (ino > max_key->objectid) {
1344 iput(&inode->vfs_inode);
1345 break;
1346 }
1347
1348 objectid = ino + 1;
1349 if (!S_ISREG(inode->vfs_inode.i_mode))
1350 continue;
1351
1352 if (unlikely(min_key->objectid == ino)) {
1353 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1354 continue;
1355 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1356 start = 0;
1357 else {
1358 start = min_key->offset;
1359 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1360 }
1361 } else {
1362 start = 0;
1363 }
1364
1365 if (unlikely(max_key->objectid == ino)) {
1366 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1367 continue;
1368 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1369 end = (u64)-1;
1370 } else {
1371 if (max_key->offset == 0)
1372 continue;
1373 end = max_key->offset;
1374 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1375 end--;
1376 }
1377 } else {
1378 end = (u64)-1;
1379 }
1380
1381 /* the lock_extent waits for read_folio to complete */
1382 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
1383 btrfs_drop_extent_map_range(inode, start, end, true);
1384 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
1385 }
1386 return 0;
1387 }
1388
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)1389 static int find_next_key(struct btrfs_path *path, int level,
1390 struct btrfs_key *key)
1391
1392 {
1393 while (level < BTRFS_MAX_LEVEL) {
1394 if (!path->nodes[level])
1395 break;
1396 if (path->slots[level] + 1 <
1397 btrfs_header_nritems(path->nodes[level])) {
1398 btrfs_node_key_to_cpu(path->nodes[level], key,
1399 path->slots[level] + 1);
1400 return 0;
1401 }
1402 level++;
1403 }
1404 return 1;
1405 }
1406
1407 /*
1408 * Insert current subvolume into reloc_control::dirty_subvol_roots
1409 */
insert_dirty_subvol(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_root * root)1410 static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1411 struct reloc_control *rc,
1412 struct btrfs_root *root)
1413 {
1414 struct btrfs_root *reloc_root = root->reloc_root;
1415 struct btrfs_root_item *reloc_root_item;
1416 int ret;
1417
1418 /* @root must be a subvolume tree root with a valid reloc tree */
1419 ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
1420 ASSERT(reloc_root);
1421
1422 reloc_root_item = &reloc_root->root_item;
1423 memset(&reloc_root_item->drop_progress, 0,
1424 sizeof(reloc_root_item->drop_progress));
1425 btrfs_set_root_drop_level(reloc_root_item, 0);
1426 btrfs_set_root_refs(reloc_root_item, 0);
1427 ret = btrfs_update_reloc_root(trans, root);
1428 if (ret)
1429 return ret;
1430
1431 if (list_empty(&root->reloc_dirty_list)) {
1432 btrfs_grab_root(root);
1433 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1434 }
1435
1436 return 0;
1437 }
1438
clean_dirty_subvols(struct reloc_control * rc)1439 static int clean_dirty_subvols(struct reloc_control *rc)
1440 {
1441 struct btrfs_root *root;
1442 struct btrfs_root *next;
1443 int ret = 0;
1444 int ret2;
1445
1446 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1447 reloc_dirty_list) {
1448 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
1449 /* Merged subvolume, cleanup its reloc root */
1450 struct btrfs_root *reloc_root = root->reloc_root;
1451
1452 list_del_init(&root->reloc_dirty_list);
1453 root->reloc_root = NULL;
1454 /*
1455 * Need barrier to ensure clear_bit() only happens after
1456 * root->reloc_root = NULL. Pairs with have_reloc_root.
1457 */
1458 smp_wmb();
1459 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1460 if (reloc_root) {
1461 /*
1462 * btrfs_drop_snapshot drops our ref we hold for
1463 * ->reloc_root. If it fails however we must
1464 * drop the ref ourselves.
1465 */
1466 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1467 if (ret2 < 0) {
1468 btrfs_put_root(reloc_root);
1469 if (!ret)
1470 ret = ret2;
1471 }
1472 }
1473 btrfs_put_root(root);
1474 } else {
1475 /* Orphan reloc tree, just clean it up */
1476 ret2 = btrfs_drop_snapshot(root, 0, 1);
1477 if (ret2 < 0) {
1478 btrfs_put_root(root);
1479 if (!ret)
1480 ret = ret2;
1481 }
1482 }
1483 }
1484 return ret;
1485 }
1486
1487 /*
1488 * merge the relocated tree blocks in reloc tree with corresponding
1489 * fs tree.
1490 */
merge_reloc_root(struct reloc_control * rc,struct btrfs_root * root)1491 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1492 struct btrfs_root *root)
1493 {
1494 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1495 struct btrfs_key key;
1496 struct btrfs_key next_key;
1497 struct btrfs_trans_handle *trans = NULL;
1498 struct btrfs_root *reloc_root;
1499 struct btrfs_root_item *root_item;
1500 struct btrfs_path *path;
1501 struct extent_buffer *leaf;
1502 int reserve_level;
1503 int level;
1504 int max_level;
1505 int replaced = 0;
1506 int ret = 0;
1507 u32 min_reserved;
1508
1509 path = btrfs_alloc_path();
1510 if (!path)
1511 return -ENOMEM;
1512 path->reada = READA_FORWARD;
1513
1514 reloc_root = root->reloc_root;
1515 root_item = &reloc_root->root_item;
1516
1517 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1518 level = btrfs_root_level(root_item);
1519 atomic_inc(&reloc_root->node->refs);
1520 path->nodes[level] = reloc_root->node;
1521 path->slots[level] = 0;
1522 } else {
1523 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1524
1525 level = btrfs_root_drop_level(root_item);
1526 BUG_ON(level == 0);
1527 path->lowest_level = level;
1528 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1529 path->lowest_level = 0;
1530 if (ret < 0) {
1531 btrfs_free_path(path);
1532 return ret;
1533 }
1534
1535 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1536 path->slots[level]);
1537 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1538
1539 btrfs_unlock_up_safe(path, 0);
1540 }
1541
1542 /*
1543 * In merge_reloc_root(), we modify the upper level pointer to swap the
1544 * tree blocks between reloc tree and subvolume tree. Thus for tree
1545 * block COW, we COW at most from level 1 to root level for each tree.
1546 *
1547 * Thus the needed metadata size is at most root_level * nodesize,
1548 * and * 2 since we have two trees to COW.
1549 */
1550 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1551 min_reserved = fs_info->nodesize * reserve_level * 2;
1552 memset(&next_key, 0, sizeof(next_key));
1553
1554 while (1) {
1555 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1556 min_reserved,
1557 BTRFS_RESERVE_FLUSH_LIMIT);
1558 if (ret)
1559 goto out;
1560 trans = btrfs_start_transaction(root, 0);
1561 if (IS_ERR(trans)) {
1562 ret = PTR_ERR(trans);
1563 trans = NULL;
1564 goto out;
1565 }
1566
1567 /*
1568 * At this point we no longer have a reloc_control, so we can't
1569 * depend on btrfs_init_reloc_root to update our last_trans.
1570 *
1571 * But that's ok, we started the trans handle on our
1572 * corresponding fs_root, which means it's been added to the
1573 * dirty list. At commit time we'll still call
1574 * btrfs_update_reloc_root() and update our root item
1575 * appropriately.
1576 */
1577 btrfs_set_root_last_trans(reloc_root, trans->transid);
1578 trans->block_rsv = rc->block_rsv;
1579
1580 replaced = 0;
1581 max_level = level;
1582
1583 ret = walk_down_reloc_tree(reloc_root, path, &level);
1584 if (ret < 0)
1585 goto out;
1586 if (ret > 0)
1587 break;
1588
1589 if (!find_next_key(path, level, &key) &&
1590 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1591 ret = 0;
1592 } else {
1593 ret = replace_path(trans, rc, root, reloc_root, path,
1594 &next_key, level, max_level);
1595 }
1596 if (ret < 0)
1597 goto out;
1598 if (ret > 0) {
1599 level = ret;
1600 btrfs_node_key_to_cpu(path->nodes[level], &key,
1601 path->slots[level]);
1602 replaced = 1;
1603 }
1604
1605 ret = walk_up_reloc_tree(reloc_root, path, &level);
1606 if (ret > 0)
1607 break;
1608
1609 BUG_ON(level == 0);
1610 /*
1611 * save the merging progress in the drop_progress.
1612 * this is OK since root refs == 1 in this case.
1613 */
1614 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1615 path->slots[level]);
1616 btrfs_set_root_drop_level(root_item, level);
1617
1618 btrfs_end_transaction_throttle(trans);
1619 trans = NULL;
1620
1621 btrfs_btree_balance_dirty(fs_info);
1622
1623 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1624 invalidate_extent_cache(root, &key, &next_key);
1625 }
1626
1627 /*
1628 * handle the case only one block in the fs tree need to be
1629 * relocated and the block is tree root.
1630 */
1631 leaf = btrfs_lock_root_node(root);
1632 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1633 BTRFS_NESTING_COW);
1634 btrfs_tree_unlock(leaf);
1635 free_extent_buffer(leaf);
1636 out:
1637 btrfs_free_path(path);
1638
1639 if (ret == 0) {
1640 ret = insert_dirty_subvol(trans, rc, root);
1641 if (ret)
1642 btrfs_abort_transaction(trans, ret);
1643 }
1644
1645 if (trans)
1646 btrfs_end_transaction_throttle(trans);
1647
1648 btrfs_btree_balance_dirty(fs_info);
1649
1650 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1651 invalidate_extent_cache(root, &key, &next_key);
1652
1653 return ret;
1654 }
1655
1656 static noinline_for_stack
prepare_to_merge(struct reloc_control * rc,int err)1657 int prepare_to_merge(struct reloc_control *rc, int err)
1658 {
1659 struct btrfs_root *root = rc->extent_root;
1660 struct btrfs_fs_info *fs_info = root->fs_info;
1661 struct btrfs_root *reloc_root;
1662 struct btrfs_trans_handle *trans;
1663 LIST_HEAD(reloc_roots);
1664 u64 num_bytes = 0;
1665 int ret;
1666
1667 mutex_lock(&fs_info->reloc_mutex);
1668 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1669 rc->merging_rsv_size += rc->nodes_relocated * 2;
1670 mutex_unlock(&fs_info->reloc_mutex);
1671
1672 again:
1673 if (!err) {
1674 num_bytes = rc->merging_rsv_size;
1675 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1676 BTRFS_RESERVE_FLUSH_ALL);
1677 if (ret)
1678 err = ret;
1679 }
1680
1681 trans = btrfs_join_transaction(rc->extent_root);
1682 if (IS_ERR(trans)) {
1683 if (!err)
1684 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1685 num_bytes, NULL);
1686 return PTR_ERR(trans);
1687 }
1688
1689 if (!err) {
1690 if (num_bytes != rc->merging_rsv_size) {
1691 btrfs_end_transaction(trans);
1692 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1693 num_bytes, NULL);
1694 goto again;
1695 }
1696 }
1697
1698 rc->merge_reloc_tree = true;
1699
1700 while (!list_empty(&rc->reloc_roots)) {
1701 reloc_root = list_first_entry(&rc->reloc_roots,
1702 struct btrfs_root, root_list);
1703 list_del_init(&reloc_root->root_list);
1704
1705 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1706 false);
1707 if (IS_ERR(root)) {
1708 /*
1709 * Even if we have an error we need this reloc root
1710 * back on our list so we can clean up properly.
1711 */
1712 list_add(&reloc_root->root_list, &reloc_roots);
1713 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1714 if (!err)
1715 err = PTR_ERR(root);
1716 break;
1717 }
1718
1719 if (unlikely(root->reloc_root != reloc_root)) {
1720 if (root->reloc_root) {
1721 btrfs_err(fs_info,
1722 "reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
1723 btrfs_root_id(root),
1724 btrfs_root_id(root->reloc_root),
1725 root->reloc_root->root_key.type,
1726 root->reloc_root->root_key.offset,
1727 btrfs_root_generation(
1728 &root->reloc_root->root_item),
1729 btrfs_root_id(reloc_root),
1730 reloc_root->root_key.type,
1731 reloc_root->root_key.offset,
1732 btrfs_root_generation(
1733 &reloc_root->root_item));
1734 } else {
1735 btrfs_err(fs_info,
1736 "reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
1737 btrfs_root_id(root),
1738 btrfs_root_id(reloc_root),
1739 reloc_root->root_key.type,
1740 reloc_root->root_key.offset,
1741 btrfs_root_generation(
1742 &reloc_root->root_item));
1743 }
1744 list_add(&reloc_root->root_list, &reloc_roots);
1745 btrfs_put_root(root);
1746 btrfs_abort_transaction(trans, -EUCLEAN);
1747 if (!err)
1748 err = -EUCLEAN;
1749 break;
1750 }
1751
1752 /*
1753 * set reference count to 1, so btrfs_recover_relocation
1754 * knows it should resumes merging
1755 */
1756 if (!err)
1757 btrfs_set_root_refs(&reloc_root->root_item, 1);
1758 ret = btrfs_update_reloc_root(trans, root);
1759
1760 /*
1761 * Even if we have an error we need this reloc root back on our
1762 * list so we can clean up properly.
1763 */
1764 list_add(&reloc_root->root_list, &reloc_roots);
1765 btrfs_put_root(root);
1766
1767 if (ret) {
1768 btrfs_abort_transaction(trans, ret);
1769 if (!err)
1770 err = ret;
1771 break;
1772 }
1773 }
1774
1775 list_splice(&reloc_roots, &rc->reloc_roots);
1776
1777 if (!err)
1778 err = btrfs_commit_transaction(trans);
1779 else
1780 btrfs_end_transaction(trans);
1781 return err;
1782 }
1783
1784 static noinline_for_stack
free_reloc_roots(struct list_head * list)1785 void free_reloc_roots(struct list_head *list)
1786 {
1787 struct btrfs_root *reloc_root, *tmp;
1788
1789 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1790 __del_reloc_root(reloc_root);
1791 }
1792
1793 static noinline_for_stack
merge_reloc_roots(struct reloc_control * rc)1794 void merge_reloc_roots(struct reloc_control *rc)
1795 {
1796 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1797 struct btrfs_root *root;
1798 struct btrfs_root *reloc_root;
1799 LIST_HEAD(reloc_roots);
1800 int found = 0;
1801 int ret = 0;
1802 again:
1803 root = rc->extent_root;
1804
1805 /*
1806 * this serializes us with btrfs_record_root_in_transaction,
1807 * we have to make sure nobody is in the middle of
1808 * adding their roots to the list while we are
1809 * doing this splice
1810 */
1811 mutex_lock(&fs_info->reloc_mutex);
1812 list_splice_init(&rc->reloc_roots, &reloc_roots);
1813 mutex_unlock(&fs_info->reloc_mutex);
1814
1815 while (!list_empty(&reloc_roots)) {
1816 found = 1;
1817 reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
1818
1819 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1820 false);
1821 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1822 if (WARN_ON(IS_ERR(root))) {
1823 /*
1824 * For recovery we read the fs roots on mount,
1825 * and if we didn't find the root then we marked
1826 * the reloc root as a garbage root. For normal
1827 * relocation obviously the root should exist in
1828 * memory. However there's no reason we can't
1829 * handle the error properly here just in case.
1830 */
1831 ret = PTR_ERR(root);
1832 goto out;
1833 }
1834 if (WARN_ON(root->reloc_root != reloc_root)) {
1835 /*
1836 * This can happen if on-disk metadata has some
1837 * corruption, e.g. bad reloc tree key offset.
1838 */
1839 ret = -EINVAL;
1840 goto out;
1841 }
1842 ret = merge_reloc_root(rc, root);
1843 btrfs_put_root(root);
1844 if (ret) {
1845 if (list_empty(&reloc_root->root_list))
1846 list_add_tail(&reloc_root->root_list,
1847 &reloc_roots);
1848 goto out;
1849 }
1850 } else {
1851 if (!IS_ERR(root)) {
1852 if (root->reloc_root == reloc_root) {
1853 root->reloc_root = NULL;
1854 btrfs_put_root(reloc_root);
1855 }
1856 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
1857 &root->state);
1858 btrfs_put_root(root);
1859 }
1860
1861 list_del_init(&reloc_root->root_list);
1862 /* Don't forget to queue this reloc root for cleanup */
1863 list_add_tail(&reloc_root->reloc_dirty_list,
1864 &rc->dirty_subvol_roots);
1865 }
1866 }
1867
1868 if (found) {
1869 found = 0;
1870 goto again;
1871 }
1872 out:
1873 if (ret) {
1874 btrfs_handle_fs_error(fs_info, ret, NULL);
1875 free_reloc_roots(&reloc_roots);
1876
1877 /* new reloc root may be added */
1878 mutex_lock(&fs_info->reloc_mutex);
1879 list_splice_init(&rc->reloc_roots, &reloc_roots);
1880 mutex_unlock(&fs_info->reloc_mutex);
1881 free_reloc_roots(&reloc_roots);
1882 }
1883
1884 /*
1885 * We used to have
1886 *
1887 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
1888 *
1889 * here, but it's wrong. If we fail to start the transaction in
1890 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
1891 * have actually been removed from the reloc_root_tree rb tree. This is
1892 * fine because we're bailing here, and we hold a reference on the root
1893 * for the list that holds it, so these roots will be cleaned up when we
1894 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
1895 * will be cleaned up on unmount.
1896 *
1897 * The remaining nodes will be cleaned up by free_reloc_control.
1898 */
1899 }
1900
free_block_list(struct rb_root * blocks)1901 static void free_block_list(struct rb_root *blocks)
1902 {
1903 struct tree_block *block;
1904 struct rb_node *rb_node;
1905 while ((rb_node = rb_first(blocks))) {
1906 block = rb_entry(rb_node, struct tree_block, rb_node);
1907 rb_erase(rb_node, blocks);
1908 kfree(block);
1909 }
1910 }
1911
record_reloc_root_in_trans(struct btrfs_trans_handle * trans,struct btrfs_root * reloc_root)1912 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
1913 struct btrfs_root *reloc_root)
1914 {
1915 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
1916 struct btrfs_root *root;
1917 int ret;
1918
1919 if (btrfs_get_root_last_trans(reloc_root) == trans->transid)
1920 return 0;
1921
1922 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
1923
1924 /*
1925 * This should succeed, since we can't have a reloc root without having
1926 * already looked up the actual root and created the reloc root for this
1927 * root.
1928 *
1929 * However if there's some sort of corruption where we have a ref to a
1930 * reloc root without a corresponding root this could return ENOENT.
1931 */
1932 if (IS_ERR(root)) {
1933 DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root));
1934 return PTR_ERR(root);
1935 }
1936 if (root->reloc_root != reloc_root) {
1937 DEBUG_WARN("unexpected reloc root found");
1938 btrfs_err(fs_info,
1939 "root %llu has two reloc roots associated with it",
1940 reloc_root->root_key.offset);
1941 btrfs_put_root(root);
1942 return -EUCLEAN;
1943 }
1944 ret = btrfs_record_root_in_trans(trans, root);
1945 btrfs_put_root(root);
1946
1947 return ret;
1948 }
1949
1950 static noinline_for_stack
select_reloc_root(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_backref_edge * edges[])1951 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
1952 struct reloc_control *rc,
1953 struct btrfs_backref_node *node,
1954 struct btrfs_backref_edge *edges[])
1955 {
1956 struct btrfs_backref_node *next;
1957 struct btrfs_root *root;
1958 int index = 0;
1959 int ret;
1960
1961 next = walk_up_backref(node, edges, &index);
1962 root = next->root;
1963
1964 /*
1965 * If there is no root, then our references for this block are
1966 * incomplete, as we should be able to walk all the way up to a block
1967 * that is owned by a root.
1968 *
1969 * This path is only for SHAREABLE roots, so if we come upon a
1970 * non-SHAREABLE root then we have backrefs that resolve improperly.
1971 *
1972 * Both of these cases indicate file system corruption, or a bug in the
1973 * backref walking code.
1974 */
1975 if (unlikely(!root)) {
1976 btrfs_err(trans->fs_info,
1977 "bytenr %llu doesn't have a backref path ending in a root",
1978 node->bytenr);
1979 return ERR_PTR(-EUCLEAN);
1980 }
1981 if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) {
1982 btrfs_err(trans->fs_info,
1983 "bytenr %llu has multiple refs with one ending in a non-shareable root",
1984 node->bytenr);
1985 return ERR_PTR(-EUCLEAN);
1986 }
1987
1988 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
1989 ret = record_reloc_root_in_trans(trans, root);
1990 if (ret)
1991 return ERR_PTR(ret);
1992 goto found;
1993 }
1994
1995 ret = btrfs_record_root_in_trans(trans, root);
1996 if (ret)
1997 return ERR_PTR(ret);
1998 root = root->reloc_root;
1999
2000 /*
2001 * We could have raced with another thread which failed, so
2002 * root->reloc_root may not be set, return ENOENT in this case.
2003 */
2004 if (!root)
2005 return ERR_PTR(-ENOENT);
2006
2007 if (next->new_bytenr) {
2008 /*
2009 * We just created the reloc root, so we shouldn't have
2010 * ->new_bytenr set yet. If it is then we have multiple roots
2011 * pointing at the same bytenr which indicates corruption, or
2012 * we've made a mistake in the backref walking code.
2013 */
2014 ASSERT(next->new_bytenr == 0);
2015 btrfs_err(trans->fs_info,
2016 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2017 node->bytenr, next->bytenr);
2018 return ERR_PTR(-EUCLEAN);
2019 }
2020
2021 next->new_bytenr = root->node->start;
2022 btrfs_put_root(next->root);
2023 next->root = btrfs_grab_root(root);
2024 ASSERT(next->root);
2025 mark_block_processed(rc, next);
2026 found:
2027 next = node;
2028 /* setup backref node path for btrfs_reloc_cow_block */
2029 while (1) {
2030 rc->backref_cache.path[next->level] = next;
2031 if (--index < 0)
2032 break;
2033 next = edges[index]->node[UPPER];
2034 }
2035 return root;
2036 }
2037
2038 /*
2039 * Select a tree root for relocation.
2040 *
2041 * Return NULL if the block is not shareable. We should use do_relocation() in
2042 * this case.
2043 *
2044 * Return a tree root pointer if the block is shareable.
2045 * Return -ENOENT if the block is root of reloc tree.
2046 */
2047 static noinline_for_stack
select_one_root(struct btrfs_backref_node * node)2048 struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2049 {
2050 struct btrfs_backref_node *next;
2051 struct btrfs_root *root;
2052 struct btrfs_root *fs_root = NULL;
2053 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2054 int index = 0;
2055
2056 next = node;
2057 while (1) {
2058 cond_resched();
2059 next = walk_up_backref(next, edges, &index);
2060 root = next->root;
2061
2062 /*
2063 * This can occur if we have incomplete extent refs leading all
2064 * the way up a particular path, in this case return -EUCLEAN.
2065 */
2066 if (!root)
2067 return ERR_PTR(-EUCLEAN);
2068
2069 /* No other choice for non-shareable tree */
2070 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2071 return root;
2072
2073 if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
2074 fs_root = root;
2075
2076 if (next != node)
2077 return NULL;
2078
2079 next = walk_down_backref(edges, &index);
2080 if (!next || next->level <= node->level)
2081 break;
2082 }
2083
2084 if (!fs_root)
2085 return ERR_PTR(-ENOENT);
2086 return fs_root;
2087 }
2088
calcu_metadata_size(struct reloc_control * rc,struct btrfs_backref_node * node)2089 static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
2090 struct btrfs_backref_node *node)
2091 {
2092 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2093 struct btrfs_backref_node *next = node;
2094 struct btrfs_backref_edge *edge;
2095 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2096 u64 num_bytes = 0;
2097 int index = 0;
2098
2099 BUG_ON(node->processed);
2100
2101 while (next) {
2102 cond_resched();
2103 while (1) {
2104 if (next->processed)
2105 break;
2106
2107 num_bytes += fs_info->nodesize;
2108
2109 if (list_empty(&next->upper))
2110 break;
2111
2112 edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
2113 list[LOWER]);
2114 edges[index++] = edge;
2115 next = edge->node[UPPER];
2116 }
2117 next = walk_down_backref(edges, &index);
2118 }
2119 return num_bytes;
2120 }
2121
refill_metadata_space(struct btrfs_trans_handle * trans,struct reloc_control * rc,u64 num_bytes)2122 static int refill_metadata_space(struct btrfs_trans_handle *trans,
2123 struct reloc_control *rc, u64 num_bytes)
2124 {
2125 struct btrfs_fs_info *fs_info = trans->fs_info;
2126 int ret;
2127
2128 trans->block_rsv = rc->block_rsv;
2129 rc->reserved_bytes += num_bytes;
2130
2131 /*
2132 * We are under a transaction here so we can only do limited flushing.
2133 * If we get an enospc just kick back -EAGAIN so we know to drop the
2134 * transaction and try to refill when we can flush all the things.
2135 */
2136 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2137 BTRFS_RESERVE_FLUSH_LIMIT);
2138 if (ret) {
2139 u64 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2140
2141 while (tmp <= rc->reserved_bytes)
2142 tmp <<= 1;
2143 /*
2144 * only one thread can access block_rsv at this point,
2145 * so we don't need hold lock to protect block_rsv.
2146 * we expand more reservation size here to allow enough
2147 * space for relocation and we will return earlier in
2148 * enospc case.
2149 */
2150 rc->block_rsv->size = tmp + fs_info->nodesize *
2151 RELOCATION_RESERVED_NODES;
2152 return -EAGAIN;
2153 }
2154
2155 return 0;
2156 }
2157
reserve_metadata_space(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node)2158 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2159 struct reloc_control *rc,
2160 struct btrfs_backref_node *node)
2161 {
2162 u64 num_bytes;
2163
2164 num_bytes = calcu_metadata_size(rc, node) * 2;
2165 return refill_metadata_space(trans, rc, num_bytes);
2166 }
2167
2168 /*
2169 * relocate a block tree, and then update pointers in upper level
2170 * blocks that reference the block to point to the new location.
2171 *
2172 * if called by link_to_upper, the block has already been relocated.
2173 * in that case this function just updates pointers.
2174 */
do_relocation(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path,int lowest)2175 static int do_relocation(struct btrfs_trans_handle *trans,
2176 struct reloc_control *rc,
2177 struct btrfs_backref_node *node,
2178 struct btrfs_key *key,
2179 struct btrfs_path *path, int lowest)
2180 {
2181 struct btrfs_backref_node *upper;
2182 struct btrfs_backref_edge *edge;
2183 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2184 struct btrfs_root *root;
2185 struct extent_buffer *eb;
2186 u32 blocksize;
2187 u64 bytenr;
2188 int slot;
2189 int ret = 0;
2190
2191 /*
2192 * If we are lowest then this is the first time we're processing this
2193 * block, and thus shouldn't have an eb associated with it yet.
2194 */
2195 ASSERT(!lowest || !node->eb);
2196
2197 path->lowest_level = node->level + 1;
2198 rc->backref_cache.path[node->level] = node;
2199 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2200 cond_resched();
2201
2202 upper = edge->node[UPPER];
2203 root = select_reloc_root(trans, rc, upper, edges);
2204 if (IS_ERR(root)) {
2205 ret = PTR_ERR(root);
2206 goto next;
2207 }
2208
2209 if (upper->eb && !upper->locked) {
2210 if (!lowest) {
2211 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2212 if (ret < 0)
2213 goto next;
2214 BUG_ON(ret);
2215 bytenr = btrfs_node_blockptr(upper->eb, slot);
2216 if (node->eb->start == bytenr)
2217 goto next;
2218 }
2219 btrfs_backref_drop_node_buffer(upper);
2220 }
2221
2222 if (!upper->eb) {
2223 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2224 if (ret) {
2225 if (ret > 0)
2226 ret = -ENOENT;
2227
2228 btrfs_release_path(path);
2229 break;
2230 }
2231
2232 if (!upper->eb) {
2233 upper->eb = path->nodes[upper->level];
2234 path->nodes[upper->level] = NULL;
2235 } else {
2236 BUG_ON(upper->eb != path->nodes[upper->level]);
2237 }
2238
2239 upper->locked = 1;
2240 path->locks[upper->level] = 0;
2241
2242 slot = path->slots[upper->level];
2243 btrfs_release_path(path);
2244 } else {
2245 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2246 if (ret < 0)
2247 goto next;
2248 BUG_ON(ret);
2249 }
2250
2251 bytenr = btrfs_node_blockptr(upper->eb, slot);
2252 if (lowest) {
2253 if (bytenr != node->bytenr) {
2254 btrfs_err(root->fs_info,
2255 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2256 bytenr, node->bytenr, slot,
2257 upper->eb->start);
2258 ret = -EIO;
2259 goto next;
2260 }
2261 } else {
2262 if (node->eb->start == bytenr)
2263 goto next;
2264 }
2265
2266 blocksize = root->fs_info->nodesize;
2267 eb = btrfs_read_node_slot(upper->eb, slot);
2268 if (IS_ERR(eb)) {
2269 ret = PTR_ERR(eb);
2270 goto next;
2271 }
2272 btrfs_tree_lock(eb);
2273
2274 if (!node->eb) {
2275 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2276 slot, &eb, BTRFS_NESTING_COW);
2277 btrfs_tree_unlock(eb);
2278 free_extent_buffer(eb);
2279 if (ret < 0)
2280 goto next;
2281 /*
2282 * We've just COWed this block, it should have updated
2283 * the correct backref node entry.
2284 */
2285 ASSERT(node->eb == eb);
2286 } else {
2287 struct btrfs_ref ref = {
2288 .action = BTRFS_ADD_DELAYED_REF,
2289 .bytenr = node->eb->start,
2290 .num_bytes = blocksize,
2291 .parent = upper->eb->start,
2292 .owning_root = btrfs_header_owner(upper->eb),
2293 .ref_root = btrfs_header_owner(upper->eb),
2294 };
2295
2296 btrfs_set_node_blockptr(upper->eb, slot,
2297 node->eb->start);
2298 btrfs_set_node_ptr_generation(upper->eb, slot,
2299 trans->transid);
2300 btrfs_mark_buffer_dirty(trans, upper->eb);
2301
2302 btrfs_init_tree_ref(&ref, node->level,
2303 btrfs_root_id(root), false);
2304 ret = btrfs_inc_extent_ref(trans, &ref);
2305 if (!ret)
2306 ret = btrfs_drop_subtree(trans, root, eb,
2307 upper->eb);
2308 if (ret)
2309 btrfs_abort_transaction(trans, ret);
2310 }
2311 next:
2312 if (!upper->pending)
2313 btrfs_backref_drop_node_buffer(upper);
2314 else
2315 btrfs_backref_unlock_node_buffer(upper);
2316 if (ret)
2317 break;
2318 }
2319
2320 if (!ret && node->pending) {
2321 btrfs_backref_drop_node_buffer(node);
2322 list_del_init(&node->list);
2323 node->pending = 0;
2324 }
2325
2326 path->lowest_level = 0;
2327
2328 /*
2329 * We should have allocated all of our space in the block rsv and thus
2330 * shouldn't ENOSPC.
2331 */
2332 ASSERT(ret != -ENOSPC);
2333 return ret;
2334 }
2335
link_to_upper(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_path * path)2336 static int link_to_upper(struct btrfs_trans_handle *trans,
2337 struct reloc_control *rc,
2338 struct btrfs_backref_node *node,
2339 struct btrfs_path *path)
2340 {
2341 struct btrfs_key key;
2342
2343 btrfs_node_key_to_cpu(node->eb, &key, 0);
2344 return do_relocation(trans, rc, node, &key, path, 0);
2345 }
2346
finish_pending_nodes(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_path * path,int err)2347 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2348 struct reloc_control *rc,
2349 struct btrfs_path *path, int err)
2350 {
2351 LIST_HEAD(list);
2352 struct btrfs_backref_cache *cache = &rc->backref_cache;
2353 struct btrfs_backref_node *node;
2354 int level;
2355 int ret;
2356
2357 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2358 while (!list_empty(&cache->pending[level])) {
2359 node = list_first_entry(&cache->pending[level],
2360 struct btrfs_backref_node, list);
2361 list_move_tail(&node->list, &list);
2362 BUG_ON(!node->pending);
2363
2364 if (!err) {
2365 ret = link_to_upper(trans, rc, node, path);
2366 if (ret < 0)
2367 err = ret;
2368 }
2369 }
2370 list_splice_init(&list, &cache->pending[level]);
2371 }
2372 return err;
2373 }
2374
2375 /*
2376 * mark a block and all blocks directly/indirectly reference the block
2377 * as processed.
2378 */
update_processed_blocks(struct reloc_control * rc,struct btrfs_backref_node * node)2379 static void update_processed_blocks(struct reloc_control *rc,
2380 struct btrfs_backref_node *node)
2381 {
2382 struct btrfs_backref_node *next = node;
2383 struct btrfs_backref_edge *edge;
2384 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2385 int index = 0;
2386
2387 while (next) {
2388 cond_resched();
2389 while (1) {
2390 if (next->processed)
2391 break;
2392
2393 mark_block_processed(rc, next);
2394
2395 if (list_empty(&next->upper))
2396 break;
2397
2398 edge = list_first_entry(&next->upper, struct btrfs_backref_edge,
2399 list[LOWER]);
2400 edges[index++] = edge;
2401 next = edge->node[UPPER];
2402 }
2403 next = walk_down_backref(edges, &index);
2404 }
2405 }
2406
tree_block_processed(u64 bytenr,struct reloc_control * rc)2407 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2408 {
2409 u32 blocksize = rc->extent_root->fs_info->nodesize;
2410
2411 if (btrfs_test_range_bit(&rc->processed_blocks, bytenr,
2412 bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
2413 return 1;
2414 return 0;
2415 }
2416
get_tree_block_key(struct btrfs_fs_info * fs_info,struct tree_block * block)2417 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2418 struct tree_block *block)
2419 {
2420 struct btrfs_tree_parent_check check = {
2421 .level = block->level,
2422 .owner_root = block->owner,
2423 .transid = block->key.offset
2424 };
2425 struct extent_buffer *eb;
2426
2427 eb = read_tree_block(fs_info, block->bytenr, &check);
2428 if (IS_ERR(eb))
2429 return PTR_ERR(eb);
2430 if (!extent_buffer_uptodate(eb)) {
2431 free_extent_buffer(eb);
2432 return -EIO;
2433 }
2434 if (block->level == 0)
2435 btrfs_item_key_to_cpu(eb, &block->key, 0);
2436 else
2437 btrfs_node_key_to_cpu(eb, &block->key, 0);
2438 free_extent_buffer(eb);
2439 block->key_ready = true;
2440 return 0;
2441 }
2442
2443 /*
2444 * helper function to relocate a tree block
2445 */
relocate_tree_block(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct btrfs_backref_node * node,struct btrfs_key * key,struct btrfs_path * path)2446 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2447 struct reloc_control *rc,
2448 struct btrfs_backref_node *node,
2449 struct btrfs_key *key,
2450 struct btrfs_path *path)
2451 {
2452 struct btrfs_root *root;
2453 int ret = 0;
2454
2455 if (!node)
2456 return 0;
2457
2458 /*
2459 * If we fail here we want to drop our backref_node because we are going
2460 * to start over and regenerate the tree for it.
2461 */
2462 ret = reserve_metadata_space(trans, rc, node);
2463 if (ret)
2464 goto out;
2465
2466 BUG_ON(node->processed);
2467 root = select_one_root(node);
2468 if (IS_ERR(root)) {
2469 ret = PTR_ERR(root);
2470
2471 /* See explanation in select_one_root for the -EUCLEAN case. */
2472 ASSERT(ret == -ENOENT);
2473 if (ret == -ENOENT) {
2474 ret = 0;
2475 update_processed_blocks(rc, node);
2476 }
2477 goto out;
2478 }
2479
2480 if (root) {
2481 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2482 /*
2483 * This block was the root block of a root, and this is
2484 * the first time we're processing the block and thus it
2485 * should not have had the ->new_bytenr modified.
2486 *
2487 * However in the case of corruption we could have
2488 * multiple refs pointing to the same block improperly,
2489 * and thus we would trip over these checks. ASSERT()
2490 * for the developer case, because it could indicate a
2491 * bug in the backref code, however error out for a
2492 * normal user in the case of corruption.
2493 */
2494 ASSERT(node->new_bytenr == 0);
2495 if (node->new_bytenr) {
2496 btrfs_err(root->fs_info,
2497 "bytenr %llu has improper references to it",
2498 node->bytenr);
2499 ret = -EUCLEAN;
2500 goto out;
2501 }
2502 ret = btrfs_record_root_in_trans(trans, root);
2503 if (ret)
2504 goto out;
2505 /*
2506 * Another thread could have failed, need to check if we
2507 * have reloc_root actually set.
2508 */
2509 if (!root->reloc_root) {
2510 ret = -ENOENT;
2511 goto out;
2512 }
2513 root = root->reloc_root;
2514 node->new_bytenr = root->node->start;
2515 btrfs_put_root(node->root);
2516 node->root = btrfs_grab_root(root);
2517 ASSERT(node->root);
2518 } else {
2519 btrfs_err(root->fs_info,
2520 "bytenr %llu resolved to a non-shareable root",
2521 node->bytenr);
2522 ret = -EUCLEAN;
2523 goto out;
2524 }
2525 if (!ret)
2526 update_processed_blocks(rc, node);
2527 } else {
2528 ret = do_relocation(trans, rc, node, key, path, 1);
2529 }
2530 out:
2531 if (ret || node->level == 0)
2532 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2533 return ret;
2534 }
2535
relocate_cowonly_block(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct tree_block * block,struct btrfs_path * path)2536 static int relocate_cowonly_block(struct btrfs_trans_handle *trans,
2537 struct reloc_control *rc, struct tree_block *block,
2538 struct btrfs_path *path)
2539 {
2540 struct btrfs_fs_info *fs_info = trans->fs_info;
2541 struct btrfs_root *root;
2542 u64 num_bytes;
2543 int nr_levels;
2544 int ret;
2545
2546 root = btrfs_get_fs_root(fs_info, block->owner, true);
2547 if (IS_ERR(root))
2548 return PTR_ERR(root);
2549
2550 nr_levels = max(btrfs_header_level(root->node) - block->level, 0) + 1;
2551
2552 num_bytes = fs_info->nodesize * nr_levels;
2553 ret = refill_metadata_space(trans, rc, num_bytes);
2554 if (ret) {
2555 btrfs_put_root(root);
2556 return ret;
2557 }
2558 path->lowest_level = block->level;
2559 if (root == root->fs_info->chunk_root)
2560 btrfs_reserve_chunk_metadata(trans, false);
2561
2562 ret = btrfs_search_slot(trans, root, &block->key, path, 0, 1);
2563 path->lowest_level = 0;
2564 btrfs_release_path(path);
2565
2566 if (root == root->fs_info->chunk_root)
2567 btrfs_trans_release_chunk_metadata(trans);
2568 if (ret > 0)
2569 ret = 0;
2570 btrfs_put_root(root);
2571
2572 return ret;
2573 }
2574
2575 /*
2576 * relocate a list of blocks
2577 */
2578 static noinline_for_stack
relocate_tree_blocks(struct btrfs_trans_handle * trans,struct reloc_control * rc,struct rb_root * blocks)2579 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2580 struct reloc_control *rc, struct rb_root *blocks)
2581 {
2582 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2583 struct btrfs_backref_node *node;
2584 struct btrfs_path *path;
2585 struct tree_block *block;
2586 struct tree_block *next;
2587 int ret = 0;
2588
2589 path = btrfs_alloc_path();
2590 if (!path) {
2591 ret = -ENOMEM;
2592 goto out_free_blocks;
2593 }
2594
2595 /* Kick in readahead for tree blocks with missing keys */
2596 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2597 if (!block->key_ready)
2598 btrfs_readahead_tree_block(fs_info, block->bytenr,
2599 block->owner, 0,
2600 block->level);
2601 }
2602
2603 /* Get first keys */
2604 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2605 if (!block->key_ready) {
2606 ret = get_tree_block_key(fs_info, block);
2607 if (ret)
2608 goto out_free_path;
2609 }
2610 }
2611
2612 /* Do tree relocation */
2613 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2614 /*
2615 * For COWonly blocks, or the data reloc tree, we only need to
2616 * COW down to the block, there's no need to generate a backref
2617 * tree.
2618 */
2619 if (block->owner &&
2620 (!is_fstree(block->owner) ||
2621 block->owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) {
2622 ret = relocate_cowonly_block(trans, rc, block, path);
2623 if (ret)
2624 break;
2625 continue;
2626 }
2627
2628 node = build_backref_tree(trans, rc, &block->key,
2629 block->level, block->bytenr);
2630 if (IS_ERR(node)) {
2631 ret = PTR_ERR(node);
2632 goto out;
2633 }
2634
2635 ret = relocate_tree_block(trans, rc, node, &block->key,
2636 path);
2637 if (ret < 0)
2638 break;
2639 }
2640 out:
2641 ret = finish_pending_nodes(trans, rc, path, ret);
2642
2643 out_free_path:
2644 btrfs_free_path(path);
2645 out_free_blocks:
2646 free_block_list(blocks);
2647 return ret;
2648 }
2649
prealloc_file_extent_cluster(struct reloc_control * rc)2650 static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control *rc)
2651 {
2652 const struct file_extent_cluster *cluster = &rc->cluster;
2653 struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2654 u64 alloc_hint = 0;
2655 u64 start;
2656 u64 end;
2657 u64 offset = inode->reloc_block_group_start;
2658 u64 num_bytes;
2659 int nr;
2660 int ret = 0;
2661 u64 i_size = i_size_read(&inode->vfs_inode);
2662 u64 prealloc_start = cluster->start - offset;
2663 u64 prealloc_end = cluster->end - offset;
2664 u64 cur_offset = prealloc_start;
2665
2666 /*
2667 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2668 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2669 * btrfs_do_readpage() call of previously relocated file cluster.
2670 *
2671 * If the current cluster starts in the above range, btrfs_do_readpage()
2672 * will skip the read, and relocate_one_folio() will later writeback
2673 * the padding zeros as new data, causing data corruption.
2674 *
2675 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2676 */
2677 if (!PAGE_ALIGNED(i_size)) {
2678 struct address_space *mapping = inode->vfs_inode.i_mapping;
2679 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2680 const u32 sectorsize = fs_info->sectorsize;
2681 struct folio *folio;
2682
2683 ASSERT(sectorsize < PAGE_SIZE);
2684 ASSERT(IS_ALIGNED(i_size, sectorsize));
2685
2686 /*
2687 * Subpage can't handle page with DIRTY but without UPTODATE
2688 * bit as it can lead to the following deadlock:
2689 *
2690 * btrfs_read_folio()
2691 * | Page already *locked*
2692 * |- btrfs_lock_and_flush_ordered_range()
2693 * |- btrfs_start_ordered_extent()
2694 * |- extent_write_cache_pages()
2695 * |- lock_page()
2696 * We try to lock the page we already hold.
2697 *
2698 * Here we just writeback the whole data reloc inode, so that
2699 * we will be ensured to have no dirty range in the page, and
2700 * are safe to clear the uptodate bits.
2701 *
2702 * This shouldn't cause too much overhead, as we need to write
2703 * the data back anyway.
2704 */
2705 ret = filemap_write_and_wait(mapping);
2706 if (ret < 0)
2707 return ret;
2708
2709 folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT);
2710 /*
2711 * If page is freed we don't need to do anything then, as we
2712 * will re-read the whole page anyway.
2713 */
2714 if (!IS_ERR(folio)) {
2715 btrfs_subpage_clear_uptodate(fs_info, folio, i_size,
2716 round_up(i_size, PAGE_SIZE) - i_size);
2717 folio_unlock(folio);
2718 folio_put(folio);
2719 }
2720 }
2721
2722 BUG_ON(cluster->start != cluster->boundary[0]);
2723 ret = btrfs_alloc_data_chunk_ondemand(inode,
2724 prealloc_end + 1 - prealloc_start);
2725 if (ret)
2726 return ret;
2727
2728 btrfs_inode_lock(inode, 0);
2729 for (nr = 0; nr < cluster->nr; nr++) {
2730 struct extent_state *cached_state = NULL;
2731
2732 start = cluster->boundary[nr] - offset;
2733 if (nr + 1 < cluster->nr)
2734 end = cluster->boundary[nr + 1] - 1 - offset;
2735 else
2736 end = cluster->end - offset;
2737
2738 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
2739 num_bytes = end + 1 - start;
2740 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2741 num_bytes, num_bytes,
2742 end + 1, &alloc_hint);
2743 cur_offset = end + 1;
2744 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
2745 if (ret)
2746 break;
2747 }
2748 btrfs_inode_unlock(inode, 0);
2749
2750 if (cur_offset < prealloc_end)
2751 btrfs_free_reserved_data_space_noquota(inode,
2752 prealloc_end + 1 - cur_offset);
2753 return ret;
2754 }
2755
setup_relocation_extent_mapping(struct reloc_control * rc)2756 static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_control *rc)
2757 {
2758 struct btrfs_inode *inode = BTRFS_I(rc->data_inode);
2759 struct extent_map *em;
2760 struct extent_state *cached_state = NULL;
2761 u64 offset = inode->reloc_block_group_start;
2762 u64 start = rc->cluster.start - offset;
2763 u64 end = rc->cluster.end - offset;
2764 int ret = 0;
2765
2766 em = btrfs_alloc_extent_map();
2767 if (!em)
2768 return -ENOMEM;
2769
2770 em->start = start;
2771 em->len = end + 1 - start;
2772 em->disk_bytenr = rc->cluster.start;
2773 em->disk_num_bytes = em->len;
2774 em->ram_bytes = em->len;
2775 em->flags |= EXTENT_FLAG_PINNED;
2776
2777 btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
2778 ret = btrfs_replace_extent_map_range(inode, em, false);
2779 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
2780 btrfs_free_extent_map(em);
2781
2782 return ret;
2783 }
2784
2785 /*
2786 * Allow error injection to test balance/relocation cancellation
2787 */
btrfs_should_cancel_balance(const struct btrfs_fs_info * fs_info)2788 noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
2789 {
2790 return atomic_read(&fs_info->balance_cancel_req) ||
2791 atomic_read(&fs_info->reloc_cancel_req) ||
2792 fatal_signal_pending(current);
2793 }
2794 ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2795
get_cluster_boundary_end(const struct file_extent_cluster * cluster,int cluster_nr)2796 static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
2797 int cluster_nr)
2798 {
2799 /* Last extent, use cluster end directly */
2800 if (cluster_nr >= cluster->nr - 1)
2801 return cluster->end;
2802
2803 /* Use next boundary start*/
2804 return cluster->boundary[cluster_nr + 1] - 1;
2805 }
2806
relocate_one_folio(struct reloc_control * rc,struct file_ra_state * ra,int * cluster_nr,unsigned long index)2807 static int relocate_one_folio(struct reloc_control *rc,
2808 struct file_ra_state *ra,
2809 int *cluster_nr, unsigned long index)
2810 {
2811 const struct file_extent_cluster *cluster = &rc->cluster;
2812 struct inode *inode = rc->data_inode;
2813 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2814 u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2815 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2816 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2817 struct folio *folio;
2818 u64 folio_start;
2819 u64 folio_end;
2820 u64 cur;
2821 int ret;
2822 const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
2823
2824 ASSERT(index <= last_index);
2825 again:
2826 folio = filemap_lock_folio(inode->i_mapping, index);
2827 if (IS_ERR(folio)) {
2828
2829 /*
2830 * On relocation we're doing readahead on the relocation inode,
2831 * but if the filesystem is backed by a RAID stripe tree we can
2832 * get ENOENT (e.g. due to preallocated extents not being
2833 * mapped in the RST) from the lookup.
2834 *
2835 * But readahead doesn't handle the error and submits invalid
2836 * reads to the device, causing a assertion failures.
2837 */
2838 if (!use_rst)
2839 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2840 index, last_index + 1 - index);
2841 folio = __filemap_get_folio(inode->i_mapping, index,
2842 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
2843 mask);
2844 if (IS_ERR(folio))
2845 return PTR_ERR(folio);
2846 }
2847
2848 WARN_ON(folio_order(folio));
2849
2850 if (folio_test_readahead(folio) && !use_rst)
2851 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2852 folio, last_index + 1 - index);
2853
2854 if (!folio_test_uptodate(folio)) {
2855 btrfs_read_folio(NULL, folio);
2856 folio_lock(folio);
2857 if (!folio_test_uptodate(folio)) {
2858 ret = -EIO;
2859 goto release_folio;
2860 }
2861 if (folio->mapping != inode->i_mapping) {
2862 folio_unlock(folio);
2863 folio_put(folio);
2864 goto again;
2865 }
2866 }
2867
2868 /*
2869 * We could have lost folio private when we dropped the lock to read the
2870 * folio above, make sure we set_folio_extent_mapped() here so we have any
2871 * of the subpage blocksize stuff we need in place.
2872 */
2873 ret = set_folio_extent_mapped(folio);
2874 if (ret < 0)
2875 goto release_folio;
2876
2877 folio_start = folio_pos(folio);
2878 folio_end = folio_start + PAGE_SIZE - 1;
2879
2880 /*
2881 * Start from the cluster, as for subpage case, the cluster can start
2882 * inside the folio.
2883 */
2884 cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
2885 while (cur <= folio_end) {
2886 struct extent_state *cached_state = NULL;
2887 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
2888 u64 extent_end = get_cluster_boundary_end(cluster,
2889 *cluster_nr) - offset;
2890 u64 clamped_start = max(folio_start, extent_start);
2891 u64 clamped_end = min(folio_end, extent_end);
2892 u32 clamped_len = clamped_end + 1 - clamped_start;
2893
2894 /* Reserve metadata for this range */
2895 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2896 clamped_len, clamped_len,
2897 false);
2898 if (ret)
2899 goto release_folio;
2900
2901 /* Mark the range delalloc and dirty for later writeback */
2902 btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
2903 clamped_end, &cached_state);
2904 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
2905 clamped_end, 0, &cached_state);
2906 if (ret) {
2907 btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree,
2908 clamped_start, clamped_end,
2909 EXTENT_LOCKED | EXTENT_BOUNDARY,
2910 &cached_state);
2911 btrfs_delalloc_release_metadata(BTRFS_I(inode),
2912 clamped_len, true);
2913 btrfs_delalloc_release_extents(BTRFS_I(inode),
2914 clamped_len);
2915 goto release_folio;
2916 }
2917 btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
2918
2919 /*
2920 * Set the boundary if it's inside the folio.
2921 * Data relocation requires the destination extents to have the
2922 * same size as the source.
2923 * EXTENT_BOUNDARY bit prevents current extent from being merged
2924 * with previous extent.
2925 */
2926 if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
2927 u64 boundary_start = cluster->boundary[*cluster_nr] -
2928 offset;
2929 u64 boundary_end = boundary_start +
2930 fs_info->sectorsize - 1;
2931
2932 btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree,
2933 boundary_start, boundary_end,
2934 EXTENT_BOUNDARY, NULL);
2935 }
2936 btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
2937 &cached_state);
2938 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
2939 cur += clamped_len;
2940
2941 /* Crossed extent end, go to next extent */
2942 if (cur >= extent_end) {
2943 (*cluster_nr)++;
2944 /* Just finished the last extent of the cluster, exit. */
2945 if (*cluster_nr >= cluster->nr)
2946 break;
2947 }
2948 }
2949 folio_unlock(folio);
2950 folio_put(folio);
2951
2952 balance_dirty_pages_ratelimited(inode->i_mapping);
2953 btrfs_throttle(fs_info);
2954 if (btrfs_should_cancel_balance(fs_info))
2955 ret = -ECANCELED;
2956 return ret;
2957
2958 release_folio:
2959 folio_unlock(folio);
2960 folio_put(folio);
2961 return ret;
2962 }
2963
relocate_file_extent_cluster(struct reloc_control * rc)2964 static int relocate_file_extent_cluster(struct reloc_control *rc)
2965 {
2966 struct inode *inode = rc->data_inode;
2967 const struct file_extent_cluster *cluster = &rc->cluster;
2968 u64 offset = BTRFS_I(inode)->reloc_block_group_start;
2969 unsigned long index;
2970 unsigned long last_index;
2971 struct file_ra_state *ra;
2972 int cluster_nr = 0;
2973 int ret = 0;
2974
2975 if (!cluster->nr)
2976 return 0;
2977
2978 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2979 if (!ra)
2980 return -ENOMEM;
2981
2982 ret = prealloc_file_extent_cluster(rc);
2983 if (ret)
2984 goto out;
2985
2986 file_ra_state_init(ra, inode->i_mapping);
2987
2988 ret = setup_relocation_extent_mapping(rc);
2989 if (ret)
2990 goto out;
2991
2992 last_index = (cluster->end - offset) >> PAGE_SHIFT;
2993 for (index = (cluster->start - offset) >> PAGE_SHIFT;
2994 index <= last_index && !ret; index++)
2995 ret = relocate_one_folio(rc, ra, &cluster_nr, index);
2996 if (ret == 0)
2997 WARN_ON(cluster_nr != cluster->nr);
2998 out:
2999 kfree(ra);
3000 return ret;
3001 }
3002
relocate_data_extent(struct reloc_control * rc,const struct btrfs_key * extent_key)3003 static noinline_for_stack int relocate_data_extent(struct reloc_control *rc,
3004 const struct btrfs_key *extent_key)
3005 {
3006 struct inode *inode = rc->data_inode;
3007 struct file_extent_cluster *cluster = &rc->cluster;
3008 int ret;
3009 struct btrfs_root *root = BTRFS_I(inode)->root;
3010
3011 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3012 ret = relocate_file_extent_cluster(rc);
3013 if (ret)
3014 return ret;
3015 cluster->nr = 0;
3016 }
3017
3018 /*
3019 * Under simple quotas, we set root->relocation_src_root when we find
3020 * the extent. If adjacent extents have different owners, we can't merge
3021 * them while relocating. Handle this by storing the owning root that
3022 * started a cluster and if we see an extent from a different root break
3023 * cluster formation (just like the above case of non-adjacent extents).
3024 *
3025 * Without simple quotas, relocation_src_root is always 0, so we should
3026 * never see a mismatch, and it should have no effect on relocation
3027 * clusters.
3028 */
3029 if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
3030 u64 tmp = root->relocation_src_root;
3031
3032 /*
3033 * root->relocation_src_root is the state that actually affects
3034 * the preallocation we do here, so set it to the root owning
3035 * the cluster we need to relocate.
3036 */
3037 root->relocation_src_root = cluster->owning_root;
3038 ret = relocate_file_extent_cluster(rc);
3039 if (ret)
3040 return ret;
3041 cluster->nr = 0;
3042 /* And reset it back for the current extent's owning root. */
3043 root->relocation_src_root = tmp;
3044 }
3045
3046 if (!cluster->nr) {
3047 cluster->start = extent_key->objectid;
3048 cluster->owning_root = root->relocation_src_root;
3049 }
3050 else
3051 BUG_ON(cluster->nr >= MAX_EXTENTS);
3052 cluster->end = extent_key->objectid + extent_key->offset - 1;
3053 cluster->boundary[cluster->nr] = extent_key->objectid;
3054 cluster->nr++;
3055
3056 if (cluster->nr >= MAX_EXTENTS) {
3057 ret = relocate_file_extent_cluster(rc);
3058 if (ret)
3059 return ret;
3060 cluster->nr = 0;
3061 }
3062 return 0;
3063 }
3064
3065 /*
3066 * helper to add a tree block to the list.
3067 * the major work is getting the generation and level of the block
3068 */
add_tree_block(struct reloc_control * rc,const struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3069 static int add_tree_block(struct reloc_control *rc,
3070 const struct btrfs_key *extent_key,
3071 struct btrfs_path *path,
3072 struct rb_root *blocks)
3073 {
3074 struct extent_buffer *eb;
3075 struct btrfs_extent_item *ei;
3076 struct btrfs_tree_block_info *bi;
3077 struct tree_block *block;
3078 struct rb_node *rb_node;
3079 u32 item_size;
3080 int level = -1;
3081 u64 generation;
3082 u64 owner = 0;
3083
3084 eb = path->nodes[0];
3085 item_size = btrfs_item_size(eb, path->slots[0]);
3086
3087 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3088 item_size >= sizeof(*ei) + sizeof(*bi)) {
3089 unsigned long ptr = 0, end;
3090
3091 ei = btrfs_item_ptr(eb, path->slots[0],
3092 struct btrfs_extent_item);
3093 end = (unsigned long)ei + item_size;
3094 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3095 bi = (struct btrfs_tree_block_info *)(ei + 1);
3096 level = btrfs_tree_block_level(eb, bi);
3097 ptr = (unsigned long)(bi + 1);
3098 } else {
3099 level = (int)extent_key->offset;
3100 ptr = (unsigned long)(ei + 1);
3101 }
3102 generation = btrfs_extent_generation(eb, ei);
3103
3104 /*
3105 * We're reading random blocks without knowing their owner ahead
3106 * of time. This is ok most of the time, as all reloc roots and
3107 * fs roots have the same lock type. However normal trees do
3108 * not, and the only way to know ahead of time is to read the
3109 * inline ref offset. We know it's an fs root if
3110 *
3111 * 1. There's more than one ref.
3112 * 2. There's a SHARED_DATA_REF_KEY set.
3113 * 3. FULL_BACKREF is set on the flags.
3114 *
3115 * Otherwise it's safe to assume that the ref offset == the
3116 * owner of this block, so we can use that when calling
3117 * read_tree_block.
3118 */
3119 if (btrfs_extent_refs(eb, ei) == 1 &&
3120 !(btrfs_extent_flags(eb, ei) &
3121 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3122 ptr < end) {
3123 struct btrfs_extent_inline_ref *iref;
3124 int type;
3125
3126 iref = (struct btrfs_extent_inline_ref *)ptr;
3127 type = btrfs_get_extent_inline_ref_type(eb, iref,
3128 BTRFS_REF_TYPE_BLOCK);
3129 if (type == BTRFS_REF_TYPE_INVALID)
3130 return -EINVAL;
3131 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3132 owner = btrfs_extent_inline_ref_offset(eb, iref);
3133 }
3134 } else {
3135 btrfs_print_leaf(eb);
3136 btrfs_err(rc->block_group->fs_info,
3137 "unrecognized tree backref at tree block %llu slot %u",
3138 eb->start, path->slots[0]);
3139 btrfs_release_path(path);
3140 return -EUCLEAN;
3141 }
3142
3143 btrfs_release_path(path);
3144
3145 BUG_ON(level == -1);
3146
3147 block = kmalloc(sizeof(*block), GFP_NOFS);
3148 if (!block)
3149 return -ENOMEM;
3150
3151 block->bytenr = extent_key->objectid;
3152 block->key.objectid = rc->extent_root->fs_info->nodesize;
3153 block->key.offset = generation;
3154 block->level = level;
3155 block->key_ready = false;
3156 block->owner = owner;
3157
3158 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3159 if (rb_node)
3160 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3161 -EEXIST);
3162
3163 return 0;
3164 }
3165
3166 /*
3167 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3168 */
__add_tree_block(struct reloc_control * rc,u64 bytenr,u32 blocksize,struct rb_root * blocks)3169 static int __add_tree_block(struct reloc_control *rc,
3170 u64 bytenr, u32 blocksize,
3171 struct rb_root *blocks)
3172 {
3173 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3174 struct btrfs_path *path;
3175 struct btrfs_key key;
3176 int ret;
3177 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3178
3179 if (tree_block_processed(bytenr, rc))
3180 return 0;
3181
3182 if (rb_simple_search(blocks, bytenr))
3183 return 0;
3184
3185 path = btrfs_alloc_path();
3186 if (!path)
3187 return -ENOMEM;
3188 again:
3189 key.objectid = bytenr;
3190 if (skinny) {
3191 key.type = BTRFS_METADATA_ITEM_KEY;
3192 key.offset = (u64)-1;
3193 } else {
3194 key.type = BTRFS_EXTENT_ITEM_KEY;
3195 key.offset = blocksize;
3196 }
3197
3198 path->search_commit_root = 1;
3199 path->skip_locking = 1;
3200 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3201 if (ret < 0)
3202 goto out;
3203
3204 if (ret > 0 && skinny) {
3205 if (path->slots[0]) {
3206 path->slots[0]--;
3207 btrfs_item_key_to_cpu(path->nodes[0], &key,
3208 path->slots[0]);
3209 if (key.objectid == bytenr &&
3210 (key.type == BTRFS_METADATA_ITEM_KEY ||
3211 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3212 key.offset == blocksize)))
3213 ret = 0;
3214 }
3215
3216 if (ret) {
3217 skinny = false;
3218 btrfs_release_path(path);
3219 goto again;
3220 }
3221 }
3222 if (ret) {
3223 ASSERT(ret == 1);
3224 btrfs_print_leaf(path->nodes[0]);
3225 btrfs_err(fs_info,
3226 "tree block extent item (%llu) is not found in extent tree",
3227 bytenr);
3228 WARN_ON(1);
3229 ret = -EINVAL;
3230 goto out;
3231 }
3232
3233 ret = add_tree_block(rc, &key, path, blocks);
3234 out:
3235 btrfs_free_path(path);
3236 return ret;
3237 }
3238
delete_block_group_cache(struct btrfs_block_group * block_group,struct inode * inode,u64 ino)3239 static int delete_block_group_cache(struct btrfs_block_group *block_group,
3240 struct inode *inode,
3241 u64 ino)
3242 {
3243 struct btrfs_fs_info *fs_info = block_group->fs_info;
3244 struct btrfs_root *root = fs_info->tree_root;
3245 struct btrfs_trans_handle *trans;
3246 struct btrfs_inode *btrfs_inode;
3247 int ret = 0;
3248
3249 if (inode)
3250 goto truncate;
3251
3252 btrfs_inode = btrfs_iget(ino, root);
3253 if (IS_ERR(btrfs_inode))
3254 return -ENOENT;
3255 inode = &btrfs_inode->vfs_inode;
3256
3257 truncate:
3258 ret = btrfs_check_trunc_cache_free_space(fs_info,
3259 &fs_info->global_block_rsv);
3260 if (ret)
3261 goto out;
3262
3263 trans = btrfs_join_transaction(root);
3264 if (IS_ERR(trans)) {
3265 ret = PTR_ERR(trans);
3266 goto out;
3267 }
3268
3269 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3270
3271 btrfs_end_transaction(trans);
3272 btrfs_btree_balance_dirty(fs_info);
3273 out:
3274 iput(inode);
3275 return ret;
3276 }
3277
3278 /*
3279 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3280 * cache inode, to avoid free space cache data extent blocking data relocation.
3281 */
delete_v1_space_cache(struct extent_buffer * leaf,struct btrfs_block_group * block_group,u64 data_bytenr)3282 static int delete_v1_space_cache(struct extent_buffer *leaf,
3283 struct btrfs_block_group *block_group,
3284 u64 data_bytenr)
3285 {
3286 u64 space_cache_ino;
3287 struct btrfs_file_extent_item *ei;
3288 struct btrfs_key key;
3289 bool found = false;
3290 int i;
3291 int ret;
3292
3293 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3294 return 0;
3295
3296 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3297 u8 type;
3298
3299 btrfs_item_key_to_cpu(leaf, &key, i);
3300 if (key.type != BTRFS_EXTENT_DATA_KEY)
3301 continue;
3302 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3303 type = btrfs_file_extent_type(leaf, ei);
3304
3305 if ((type == BTRFS_FILE_EXTENT_REG ||
3306 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3307 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3308 found = true;
3309 space_cache_ino = key.objectid;
3310 break;
3311 }
3312 }
3313 if (!found)
3314 return -ENOENT;
3315 ret = delete_block_group_cache(block_group, NULL, space_cache_ino);
3316 return ret;
3317 }
3318
3319 /*
3320 * helper to find all tree blocks that reference a given data extent
3321 */
add_data_references(struct reloc_control * rc,const struct btrfs_key * extent_key,struct btrfs_path * path,struct rb_root * blocks)3322 static noinline_for_stack int add_data_references(struct reloc_control *rc,
3323 const struct btrfs_key *extent_key,
3324 struct btrfs_path *path,
3325 struct rb_root *blocks)
3326 {
3327 struct btrfs_backref_walk_ctx ctx = { 0 };
3328 struct ulist_iterator leaf_uiter;
3329 struct ulist_node *ref_node = NULL;
3330 const u32 blocksize = rc->extent_root->fs_info->nodesize;
3331 int ret = 0;
3332
3333 btrfs_release_path(path);
3334
3335 ctx.bytenr = extent_key->objectid;
3336 ctx.skip_inode_ref_list = true;
3337 ctx.fs_info = rc->extent_root->fs_info;
3338
3339 ret = btrfs_find_all_leafs(&ctx);
3340 if (ret < 0)
3341 return ret;
3342
3343 ULIST_ITER_INIT(&leaf_uiter);
3344 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3345 struct btrfs_tree_parent_check check = { 0 };
3346 struct extent_buffer *eb;
3347
3348 eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3349 if (IS_ERR(eb)) {
3350 ret = PTR_ERR(eb);
3351 break;
3352 }
3353 ret = delete_v1_space_cache(eb, rc->block_group,
3354 extent_key->objectid);
3355 free_extent_buffer(eb);
3356 if (ret < 0)
3357 break;
3358 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3359 if (ret < 0)
3360 break;
3361 }
3362 if (ret < 0)
3363 free_block_list(blocks);
3364 ulist_free(ctx.refs);
3365 return ret;
3366 }
3367
3368 /*
3369 * helper to find next unprocessed extent
3370 */
3371 static noinline_for_stack
find_next_extent(struct reloc_control * rc,struct btrfs_path * path,struct btrfs_key * extent_key)3372 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3373 struct btrfs_key *extent_key)
3374 {
3375 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3376 struct btrfs_key key;
3377 struct extent_buffer *leaf;
3378 u64 start, end, last;
3379 int ret;
3380
3381 last = rc->block_group->start + rc->block_group->length;
3382 while (1) {
3383 bool block_found;
3384
3385 cond_resched();
3386 if (rc->search_start >= last) {
3387 ret = 1;
3388 break;
3389 }
3390
3391 key.objectid = rc->search_start;
3392 key.type = BTRFS_EXTENT_ITEM_KEY;
3393 key.offset = 0;
3394
3395 path->search_commit_root = 1;
3396 path->skip_locking = 1;
3397 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3398 0, 0);
3399 if (ret < 0)
3400 break;
3401 next:
3402 leaf = path->nodes[0];
3403 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3404 ret = btrfs_next_leaf(rc->extent_root, path);
3405 if (ret != 0)
3406 break;
3407 leaf = path->nodes[0];
3408 }
3409
3410 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3411 if (key.objectid >= last) {
3412 ret = 1;
3413 break;
3414 }
3415
3416 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3417 key.type != BTRFS_METADATA_ITEM_KEY) {
3418 path->slots[0]++;
3419 goto next;
3420 }
3421
3422 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3423 key.objectid + key.offset <= rc->search_start) {
3424 path->slots[0]++;
3425 goto next;
3426 }
3427
3428 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3429 key.objectid + fs_info->nodesize <=
3430 rc->search_start) {
3431 path->slots[0]++;
3432 goto next;
3433 }
3434
3435 block_found = btrfs_find_first_extent_bit(&rc->processed_blocks,
3436 key.objectid, &start, &end,
3437 EXTENT_DIRTY, NULL);
3438
3439 if (block_found && start <= key.objectid) {
3440 btrfs_release_path(path);
3441 rc->search_start = end + 1;
3442 } else {
3443 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3444 rc->search_start = key.objectid + key.offset;
3445 else
3446 rc->search_start = key.objectid +
3447 fs_info->nodesize;
3448 memcpy(extent_key, &key, sizeof(key));
3449 return 0;
3450 }
3451 }
3452 btrfs_release_path(path);
3453 return ret;
3454 }
3455
set_reloc_control(struct reloc_control * rc)3456 static void set_reloc_control(struct reloc_control *rc)
3457 {
3458 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3459
3460 mutex_lock(&fs_info->reloc_mutex);
3461 fs_info->reloc_ctl = rc;
3462 mutex_unlock(&fs_info->reloc_mutex);
3463 }
3464
unset_reloc_control(struct reloc_control * rc)3465 static void unset_reloc_control(struct reloc_control *rc)
3466 {
3467 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3468
3469 mutex_lock(&fs_info->reloc_mutex);
3470 fs_info->reloc_ctl = NULL;
3471 mutex_unlock(&fs_info->reloc_mutex);
3472 }
3473
3474 static noinline_for_stack
prepare_to_relocate(struct reloc_control * rc)3475 int prepare_to_relocate(struct reloc_control *rc)
3476 {
3477 struct btrfs_trans_handle *trans;
3478 int ret;
3479
3480 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3481 BTRFS_BLOCK_RSV_TEMP);
3482 if (!rc->block_rsv)
3483 return -ENOMEM;
3484
3485 memset(&rc->cluster, 0, sizeof(rc->cluster));
3486 rc->search_start = rc->block_group->start;
3487 rc->extents_found = 0;
3488 rc->nodes_relocated = 0;
3489 rc->merging_rsv_size = 0;
3490 rc->reserved_bytes = 0;
3491 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3492 RELOCATION_RESERVED_NODES;
3493 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3494 rc->block_rsv, rc->block_rsv->size,
3495 BTRFS_RESERVE_FLUSH_ALL);
3496 if (ret)
3497 return ret;
3498
3499 rc->create_reloc_tree = true;
3500 set_reloc_control(rc);
3501
3502 trans = btrfs_join_transaction(rc->extent_root);
3503 if (IS_ERR(trans)) {
3504 unset_reloc_control(rc);
3505 /*
3506 * extent tree is not a ref_cow tree and has no reloc_root to
3507 * cleanup. And callers are responsible to free the above
3508 * block rsv.
3509 */
3510 return PTR_ERR(trans);
3511 }
3512
3513 ret = btrfs_commit_transaction(trans);
3514 if (ret)
3515 unset_reloc_control(rc);
3516
3517 return ret;
3518 }
3519
relocate_block_group(struct reloc_control * rc)3520 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3521 {
3522 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3523 struct rb_root blocks = RB_ROOT;
3524 struct btrfs_key key;
3525 struct btrfs_trans_handle *trans = NULL;
3526 struct btrfs_path *path;
3527 struct btrfs_extent_item *ei;
3528 u64 flags;
3529 int ret;
3530 int err = 0;
3531 int progress = 0;
3532
3533 path = btrfs_alloc_path();
3534 if (!path)
3535 return -ENOMEM;
3536 path->reada = READA_FORWARD;
3537
3538 ret = prepare_to_relocate(rc);
3539 if (ret) {
3540 err = ret;
3541 goto out_free;
3542 }
3543
3544 while (1) {
3545 rc->reserved_bytes = 0;
3546 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3547 rc->block_rsv->size,
3548 BTRFS_RESERVE_FLUSH_ALL);
3549 if (ret) {
3550 err = ret;
3551 break;
3552 }
3553 progress++;
3554 trans = btrfs_start_transaction(rc->extent_root, 0);
3555 if (IS_ERR(trans)) {
3556 err = PTR_ERR(trans);
3557 trans = NULL;
3558 break;
3559 }
3560 restart:
3561 if (rc->backref_cache.last_trans != trans->transid)
3562 btrfs_backref_release_cache(&rc->backref_cache);
3563 rc->backref_cache.last_trans = trans->transid;
3564
3565 ret = find_next_extent(rc, path, &key);
3566 if (ret < 0)
3567 err = ret;
3568 if (ret != 0)
3569 break;
3570
3571 rc->extents_found++;
3572
3573 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3574 struct btrfs_extent_item);
3575 flags = btrfs_extent_flags(path->nodes[0], ei);
3576
3577 /*
3578 * If we are relocating a simple quota owned extent item, we
3579 * need to note the owner on the reloc data root so that when
3580 * we allocate the replacement item, we can attribute it to the
3581 * correct eventual owner (rather than the reloc data root).
3582 */
3583 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3584 struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
3585 u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
3586 path->nodes[0],
3587 path->slots[0]);
3588
3589 root->relocation_src_root = owning_root_id;
3590 }
3591
3592 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3593 ret = add_tree_block(rc, &key, path, &blocks);
3594 } else if (rc->stage == UPDATE_DATA_PTRS &&
3595 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3596 ret = add_data_references(rc, &key, path, &blocks);
3597 } else {
3598 btrfs_release_path(path);
3599 ret = 0;
3600 }
3601 if (ret < 0) {
3602 err = ret;
3603 break;
3604 }
3605
3606 if (!RB_EMPTY_ROOT(&blocks)) {
3607 ret = relocate_tree_blocks(trans, rc, &blocks);
3608 if (ret < 0) {
3609 if (ret != -EAGAIN) {
3610 err = ret;
3611 break;
3612 }
3613 rc->extents_found--;
3614 rc->search_start = key.objectid;
3615 }
3616 }
3617
3618 btrfs_end_transaction_throttle(trans);
3619 btrfs_btree_balance_dirty(fs_info);
3620 trans = NULL;
3621
3622 if (rc->stage == MOVE_DATA_EXTENTS &&
3623 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3624 rc->found_file_extent = true;
3625 ret = relocate_data_extent(rc, &key);
3626 if (ret < 0) {
3627 err = ret;
3628 break;
3629 }
3630 }
3631 if (btrfs_should_cancel_balance(fs_info)) {
3632 err = -ECANCELED;
3633 break;
3634 }
3635 }
3636 if (trans && progress && err == -ENOSPC) {
3637 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3638 if (ret == 1) {
3639 err = 0;
3640 progress = 0;
3641 goto restart;
3642 }
3643 }
3644
3645 btrfs_release_path(path);
3646 btrfs_clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3647
3648 if (trans) {
3649 btrfs_end_transaction_throttle(trans);
3650 btrfs_btree_balance_dirty(fs_info);
3651 }
3652
3653 if (!err) {
3654 ret = relocate_file_extent_cluster(rc);
3655 if (ret < 0)
3656 err = ret;
3657 }
3658
3659 rc->create_reloc_tree = false;
3660 set_reloc_control(rc);
3661
3662 btrfs_backref_release_cache(&rc->backref_cache);
3663 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3664
3665 /*
3666 * Even in the case when the relocation is cancelled, we should all go
3667 * through prepare_to_merge() and merge_reloc_roots().
3668 *
3669 * For error (including cancelled balance), prepare_to_merge() will
3670 * mark all reloc trees orphan, then queue them for cleanup in
3671 * merge_reloc_roots()
3672 */
3673 err = prepare_to_merge(rc, err);
3674
3675 merge_reloc_roots(rc);
3676
3677 rc->merge_reloc_tree = false;
3678 unset_reloc_control(rc);
3679 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3680
3681 /* get rid of pinned extents */
3682 trans = btrfs_join_transaction(rc->extent_root);
3683 if (IS_ERR(trans)) {
3684 err = PTR_ERR(trans);
3685 goto out_free;
3686 }
3687 ret = btrfs_commit_transaction(trans);
3688 if (ret && !err)
3689 err = ret;
3690 out_free:
3691 ret = clean_dirty_subvols(rc);
3692 if (ret < 0 && !err)
3693 err = ret;
3694 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3695 btrfs_free_path(path);
3696 return err;
3697 }
3698
__insert_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3699 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3700 struct btrfs_root *root, u64 objectid)
3701 {
3702 struct btrfs_path *path;
3703 struct btrfs_inode_item *item;
3704 struct extent_buffer *leaf;
3705 int ret;
3706
3707 path = btrfs_alloc_path();
3708 if (!path)
3709 return -ENOMEM;
3710
3711 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3712 if (ret)
3713 goto out;
3714
3715 leaf = path->nodes[0];
3716 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3717 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3718 btrfs_set_inode_generation(leaf, item, 1);
3719 btrfs_set_inode_size(leaf, item, 0);
3720 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3721 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3722 BTRFS_INODE_PREALLOC);
3723 out:
3724 btrfs_free_path(path);
3725 return ret;
3726 }
3727
delete_orphan_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid)3728 static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3729 struct btrfs_root *root, u64 objectid)
3730 {
3731 struct btrfs_path *path;
3732 struct btrfs_key key;
3733 int ret = 0;
3734
3735 path = btrfs_alloc_path();
3736 if (!path) {
3737 ret = -ENOMEM;
3738 goto out;
3739 }
3740
3741 key.objectid = objectid;
3742 key.type = BTRFS_INODE_ITEM_KEY;
3743 key.offset = 0;
3744 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3745 if (ret) {
3746 if (ret > 0)
3747 ret = -ENOENT;
3748 goto out;
3749 }
3750 ret = btrfs_del_item(trans, root, path);
3751 out:
3752 if (ret)
3753 btrfs_abort_transaction(trans, ret);
3754 btrfs_free_path(path);
3755 }
3756
3757 /*
3758 * helper to create inode for data relocation.
3759 * the inode is in data relocation tree and its link count is 0
3760 */
create_reloc_inode(const struct btrfs_block_group * group)3761 static noinline_for_stack struct inode *create_reloc_inode(
3762 const struct btrfs_block_group *group)
3763 {
3764 struct btrfs_fs_info *fs_info = group->fs_info;
3765 struct btrfs_inode *inode = NULL;
3766 struct btrfs_trans_handle *trans;
3767 struct btrfs_root *root;
3768 u64 objectid;
3769 int ret = 0;
3770
3771 root = btrfs_grab_root(fs_info->data_reloc_root);
3772 trans = btrfs_start_transaction(root, 6);
3773 if (IS_ERR(trans)) {
3774 btrfs_put_root(root);
3775 return ERR_CAST(trans);
3776 }
3777
3778 ret = btrfs_get_free_objectid(root, &objectid);
3779 if (ret)
3780 goto out;
3781
3782 ret = __insert_orphan_inode(trans, root, objectid);
3783 if (ret)
3784 goto out;
3785
3786 inode = btrfs_iget(objectid, root);
3787 if (IS_ERR(inode)) {
3788 delete_orphan_inode(trans, root, objectid);
3789 ret = PTR_ERR(inode);
3790 inode = NULL;
3791 goto out;
3792 }
3793 inode->reloc_block_group_start = group->start;
3794
3795 ret = btrfs_orphan_add(trans, inode);
3796 out:
3797 btrfs_put_root(root);
3798 btrfs_end_transaction(trans);
3799 btrfs_btree_balance_dirty(fs_info);
3800 if (ret) {
3801 if (inode)
3802 iput(&inode->vfs_inode);
3803 return ERR_PTR(ret);
3804 }
3805 return &inode->vfs_inode;
3806 }
3807
3808 /*
3809 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3810 * has been requested meanwhile and don't start in that case.
3811 *
3812 * Return:
3813 * 0 success
3814 * -EINPROGRESS operation is already in progress, that's probably a bug
3815 * -ECANCELED cancellation request was set before the operation started
3816 */
reloc_chunk_start(struct btrfs_fs_info * fs_info)3817 static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3818 {
3819 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3820 /* This should not happen */
3821 btrfs_err(fs_info, "reloc already running, cannot start");
3822 return -EINPROGRESS;
3823 }
3824
3825 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3826 btrfs_info(fs_info, "chunk relocation canceled on start");
3827 /*
3828 * On cancel, clear all requests but let the caller mark
3829 * the end after cleanup operations.
3830 */
3831 atomic_set(&fs_info->reloc_cancel_req, 0);
3832 return -ECANCELED;
3833 }
3834 return 0;
3835 }
3836
3837 /*
3838 * Mark end of chunk relocation that is cancellable and wake any waiters.
3839 */
reloc_chunk_end(struct btrfs_fs_info * fs_info)3840 static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3841 {
3842 /* Requested after start, clear bit first so any waiters can continue */
3843 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3844 btrfs_info(fs_info, "chunk relocation canceled during operation");
3845 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3846 atomic_set(&fs_info->reloc_cancel_req, 0);
3847 }
3848
alloc_reloc_control(struct btrfs_fs_info * fs_info)3849 static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3850 {
3851 struct reloc_control *rc;
3852
3853 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3854 if (!rc)
3855 return NULL;
3856
3857 INIT_LIST_HEAD(&rc->reloc_roots);
3858 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3859 btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
3860 rc->reloc_root_tree.rb_root = RB_ROOT;
3861 spin_lock_init(&rc->reloc_root_tree.lock);
3862 btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3863 return rc;
3864 }
3865
free_reloc_control(struct reloc_control * rc)3866 static void free_reloc_control(struct reloc_control *rc)
3867 {
3868 struct mapping_node *node, *tmp;
3869
3870 free_reloc_roots(&rc->reloc_roots);
3871 rbtree_postorder_for_each_entry_safe(node, tmp,
3872 &rc->reloc_root_tree.rb_root, rb_node)
3873 kfree(node);
3874
3875 kfree(rc);
3876 }
3877
3878 /*
3879 * Print the block group being relocated
3880 */
describe_relocation(struct btrfs_block_group * block_group)3881 static void describe_relocation(struct btrfs_block_group *block_group)
3882 {
3883 char buf[128] = {'\0'};
3884
3885 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3886
3887 btrfs_info(block_group->fs_info, "relocating block group %llu flags %s",
3888 block_group->start, buf);
3889 }
3890
stage_to_string(enum reloc_stage stage)3891 static const char *stage_to_string(enum reloc_stage stage)
3892 {
3893 if (stage == MOVE_DATA_EXTENTS)
3894 return "move data extents";
3895 if (stage == UPDATE_DATA_PTRS)
3896 return "update data pointers";
3897 return "unknown";
3898 }
3899
3900 /*
3901 * function to relocate all extents in a block group.
3902 */
btrfs_relocate_block_group(struct btrfs_fs_info * fs_info,u64 group_start)3903 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3904 {
3905 struct btrfs_block_group *bg;
3906 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3907 struct reloc_control *rc;
3908 struct inode *inode;
3909 struct btrfs_path *path;
3910 int ret;
3911 int rw = 0;
3912 int err = 0;
3913
3914 /*
3915 * This only gets set if we had a half-deleted snapshot on mount. We
3916 * cannot allow relocation to start while we're still trying to clean up
3917 * these pending deletions.
3918 */
3919 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3920 if (ret)
3921 return ret;
3922
3923 /* We may have been woken up by close_ctree, so bail if we're closing. */
3924 if (btrfs_fs_closing(fs_info))
3925 return -EINTR;
3926
3927 bg = btrfs_lookup_block_group(fs_info, group_start);
3928 if (!bg)
3929 return -ENOENT;
3930
3931 /*
3932 * Relocation of a data block group creates ordered extents. Without
3933 * sb_start_write(), we can freeze the filesystem while unfinished
3934 * ordered extents are left. Such ordered extents can cause a deadlock
3935 * e.g. when syncfs() is waiting for their completion but they can't
3936 * finish because they block when joining a transaction, due to the
3937 * fact that the freeze locks are being held in write mode.
3938 */
3939 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
3940 ASSERT(sb_write_started(fs_info->sb));
3941
3942 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
3943 btrfs_put_block_group(bg);
3944 return -ETXTBSY;
3945 }
3946
3947 rc = alloc_reloc_control(fs_info);
3948 if (!rc) {
3949 btrfs_put_block_group(bg);
3950 return -ENOMEM;
3951 }
3952
3953 ret = reloc_chunk_start(fs_info);
3954 if (ret < 0) {
3955 err = ret;
3956 goto out_put_bg;
3957 }
3958
3959 rc->extent_root = extent_root;
3960 rc->block_group = bg;
3961
3962 ret = btrfs_inc_block_group_ro(rc->block_group, true);
3963 if (ret) {
3964 err = ret;
3965 goto out;
3966 }
3967 rw = 1;
3968
3969 path = btrfs_alloc_path();
3970 if (!path) {
3971 err = -ENOMEM;
3972 goto out;
3973 }
3974
3975 inode = lookup_free_space_inode(rc->block_group, path);
3976 btrfs_free_path(path);
3977
3978 if (!IS_ERR(inode))
3979 ret = delete_block_group_cache(rc->block_group, inode, 0);
3980 else
3981 ret = PTR_ERR(inode);
3982
3983 if (ret && ret != -ENOENT) {
3984 err = ret;
3985 goto out;
3986 }
3987
3988 rc->data_inode = create_reloc_inode(rc->block_group);
3989 if (IS_ERR(rc->data_inode)) {
3990 err = PTR_ERR(rc->data_inode);
3991 rc->data_inode = NULL;
3992 goto out;
3993 }
3994
3995 describe_relocation(rc->block_group);
3996
3997 btrfs_wait_block_group_reservations(rc->block_group);
3998 btrfs_wait_nocow_writers(rc->block_group);
3999 btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
4000
4001 ret = btrfs_zone_finish(rc->block_group);
4002 WARN_ON(ret && ret != -EAGAIN);
4003
4004 while (1) {
4005 enum reloc_stage finishes_stage;
4006
4007 mutex_lock(&fs_info->cleaner_mutex);
4008 ret = relocate_block_group(rc);
4009 mutex_unlock(&fs_info->cleaner_mutex);
4010 if (ret < 0)
4011 err = ret;
4012
4013 finishes_stage = rc->stage;
4014 /*
4015 * We may have gotten ENOSPC after we already dirtied some
4016 * extents. If writeout happens while we're relocating a
4017 * different block group we could end up hitting the
4018 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4019 * btrfs_reloc_cow_block. Make sure we write everything out
4020 * properly so we don't trip over this problem, and then break
4021 * out of the loop if we hit an error.
4022 */
4023 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4024 ret = btrfs_wait_ordered_range(BTRFS_I(rc->data_inode), 0,
4025 (u64)-1);
4026 if (ret)
4027 err = ret;
4028 invalidate_mapping_pages(rc->data_inode->i_mapping,
4029 0, -1);
4030 rc->stage = UPDATE_DATA_PTRS;
4031 }
4032
4033 if (err < 0)
4034 goto out;
4035
4036 if (rc->extents_found == 0)
4037 break;
4038
4039 btrfs_info(fs_info, "found %llu extents, stage: %s",
4040 rc->extents_found, stage_to_string(finishes_stage));
4041 }
4042
4043 WARN_ON(rc->block_group->pinned > 0);
4044 WARN_ON(rc->block_group->reserved > 0);
4045 WARN_ON(rc->block_group->used > 0);
4046 out:
4047 if (err && rw)
4048 btrfs_dec_block_group_ro(rc->block_group);
4049 iput(rc->data_inode);
4050 out_put_bg:
4051 btrfs_put_block_group(bg);
4052 reloc_chunk_end(fs_info);
4053 free_reloc_control(rc);
4054 return err;
4055 }
4056
mark_garbage_root(struct btrfs_root * root)4057 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4058 {
4059 struct btrfs_fs_info *fs_info = root->fs_info;
4060 struct btrfs_trans_handle *trans;
4061 int ret, err;
4062
4063 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4064 if (IS_ERR(trans))
4065 return PTR_ERR(trans);
4066
4067 memset(&root->root_item.drop_progress, 0,
4068 sizeof(root->root_item.drop_progress));
4069 btrfs_set_root_drop_level(&root->root_item, 0);
4070 btrfs_set_root_refs(&root->root_item, 0);
4071 ret = btrfs_update_root(trans, fs_info->tree_root,
4072 &root->root_key, &root->root_item);
4073
4074 err = btrfs_end_transaction(trans);
4075 if (err)
4076 return err;
4077 return ret;
4078 }
4079
4080 /*
4081 * recover relocation interrupted by system crash.
4082 *
4083 * this function resumes merging reloc trees with corresponding fs trees.
4084 * this is important for keeping the sharing of tree blocks
4085 */
btrfs_recover_relocation(struct btrfs_fs_info * fs_info)4086 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4087 {
4088 LIST_HEAD(reloc_roots);
4089 struct btrfs_key key;
4090 struct btrfs_root *fs_root;
4091 struct btrfs_root *reloc_root;
4092 struct btrfs_path *path;
4093 struct extent_buffer *leaf;
4094 struct reloc_control *rc = NULL;
4095 struct btrfs_trans_handle *trans;
4096 int ret2;
4097 int ret = 0;
4098
4099 path = btrfs_alloc_path();
4100 if (!path)
4101 return -ENOMEM;
4102 path->reada = READA_BACK;
4103
4104 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4105 key.type = BTRFS_ROOT_ITEM_KEY;
4106 key.offset = (u64)-1;
4107
4108 while (1) {
4109 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4110 path, 0, 0);
4111 if (ret < 0)
4112 goto out;
4113 if (ret > 0) {
4114 if (path->slots[0] == 0)
4115 break;
4116 path->slots[0]--;
4117 }
4118 ret = 0;
4119 leaf = path->nodes[0];
4120 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4121 btrfs_release_path(path);
4122
4123 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4124 key.type != BTRFS_ROOT_ITEM_KEY)
4125 break;
4126
4127 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4128 if (IS_ERR(reloc_root)) {
4129 ret = PTR_ERR(reloc_root);
4130 goto out;
4131 }
4132
4133 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4134 list_add(&reloc_root->root_list, &reloc_roots);
4135
4136 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4137 fs_root = btrfs_get_fs_root(fs_info,
4138 reloc_root->root_key.offset, false);
4139 if (IS_ERR(fs_root)) {
4140 ret = PTR_ERR(fs_root);
4141 if (ret != -ENOENT)
4142 goto out;
4143 ret = mark_garbage_root(reloc_root);
4144 if (ret < 0)
4145 goto out;
4146 ret = 0;
4147 } else {
4148 btrfs_put_root(fs_root);
4149 }
4150 }
4151
4152 if (key.offset == 0)
4153 break;
4154
4155 key.offset--;
4156 }
4157 btrfs_release_path(path);
4158
4159 if (list_empty(&reloc_roots))
4160 goto out;
4161
4162 rc = alloc_reloc_control(fs_info);
4163 if (!rc) {
4164 ret = -ENOMEM;
4165 goto out;
4166 }
4167
4168 ret = reloc_chunk_start(fs_info);
4169 if (ret < 0)
4170 goto out_end;
4171
4172 rc->extent_root = btrfs_extent_root(fs_info, 0);
4173
4174 set_reloc_control(rc);
4175
4176 trans = btrfs_join_transaction(rc->extent_root);
4177 if (IS_ERR(trans)) {
4178 ret = PTR_ERR(trans);
4179 goto out_unset;
4180 }
4181
4182 rc->merge_reloc_tree = true;
4183
4184 while (!list_empty(&reloc_roots)) {
4185 reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list);
4186 list_del(&reloc_root->root_list);
4187
4188 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4189 list_add_tail(&reloc_root->root_list,
4190 &rc->reloc_roots);
4191 continue;
4192 }
4193
4194 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4195 false);
4196 if (IS_ERR(fs_root)) {
4197 ret = PTR_ERR(fs_root);
4198 list_add_tail(&reloc_root->root_list, &reloc_roots);
4199 btrfs_end_transaction(trans);
4200 goto out_unset;
4201 }
4202
4203 ret = __add_reloc_root(reloc_root);
4204 ASSERT(ret != -EEXIST);
4205 if (ret) {
4206 list_add_tail(&reloc_root->root_list, &reloc_roots);
4207 btrfs_put_root(fs_root);
4208 btrfs_end_transaction(trans);
4209 goto out_unset;
4210 }
4211 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4212 btrfs_put_root(fs_root);
4213 }
4214
4215 ret = btrfs_commit_transaction(trans);
4216 if (ret)
4217 goto out_unset;
4218
4219 merge_reloc_roots(rc);
4220
4221 unset_reloc_control(rc);
4222
4223 trans = btrfs_join_transaction(rc->extent_root);
4224 if (IS_ERR(trans)) {
4225 ret = PTR_ERR(trans);
4226 goto out_clean;
4227 }
4228 ret = btrfs_commit_transaction(trans);
4229 out_clean:
4230 ret2 = clean_dirty_subvols(rc);
4231 if (ret2 < 0 && !ret)
4232 ret = ret2;
4233 out_unset:
4234 unset_reloc_control(rc);
4235 out_end:
4236 reloc_chunk_end(fs_info);
4237 free_reloc_control(rc);
4238 out:
4239 free_reloc_roots(&reloc_roots);
4240
4241 btrfs_free_path(path);
4242
4243 if (ret == 0) {
4244 /* cleanup orphan inode in data relocation tree */
4245 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4246 ASSERT(fs_root);
4247 ret = btrfs_orphan_cleanup(fs_root);
4248 btrfs_put_root(fs_root);
4249 }
4250 return ret;
4251 }
4252
4253 /*
4254 * helper to add ordered checksum for data relocation.
4255 *
4256 * cloning checksum properly handles the nodatasum extents.
4257 * it also saves CPU time to re-calculate the checksum.
4258 */
btrfs_reloc_clone_csums(struct btrfs_ordered_extent * ordered)4259 int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
4260 {
4261 struct btrfs_inode *inode = ordered->inode;
4262 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4263 u64 disk_bytenr = ordered->file_offset + inode->reloc_block_group_start;
4264 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4265 LIST_HEAD(list);
4266 int ret;
4267
4268 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4269 disk_bytenr + ordered->num_bytes - 1,
4270 &list, false);
4271 if (ret < 0) {
4272 btrfs_mark_ordered_extent_error(ordered);
4273 return ret;
4274 }
4275
4276 while (!list_empty(&list)) {
4277 struct btrfs_ordered_sum *sums =
4278 list_first_entry(&list, struct btrfs_ordered_sum, list);
4279
4280 list_del_init(&sums->list);
4281
4282 /*
4283 * We need to offset the new_bytenr based on where the csum is.
4284 * We need to do this because we will read in entire prealloc
4285 * extents but we may have written to say the middle of the
4286 * prealloc extent, so we need to make sure the csum goes with
4287 * the right disk offset.
4288 *
4289 * We can do this because the data reloc inode refers strictly
4290 * to the on disk bytes, so we don't have to worry about
4291 * disk_len vs real len like with real inodes since it's all
4292 * disk length.
4293 */
4294 sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
4295 btrfs_add_ordered_sum(ordered, sums);
4296 }
4297
4298 return 0;
4299 }
4300
btrfs_reloc_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct extent_buffer * buf,struct extent_buffer * cow)4301 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4302 struct btrfs_root *root,
4303 const struct extent_buffer *buf,
4304 struct extent_buffer *cow)
4305 {
4306 struct btrfs_fs_info *fs_info = root->fs_info;
4307 struct reloc_control *rc;
4308 struct btrfs_backref_node *node;
4309 int first_cow = 0;
4310 int level;
4311 int ret = 0;
4312
4313 rc = fs_info->reloc_ctl;
4314 if (!rc)
4315 return 0;
4316
4317 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4318
4319 level = btrfs_header_level(buf);
4320 if (btrfs_header_generation(buf) <=
4321 btrfs_root_last_snapshot(&root->root_item))
4322 first_cow = 1;
4323
4324 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
4325 WARN_ON(!first_cow && level == 0);
4326
4327 node = rc->backref_cache.path[level];
4328
4329 /*
4330 * If node->bytenr != buf->start and node->new_bytenr !=
4331 * buf->start then we've got the wrong backref node for what we
4332 * expected to see here and the cache is incorrect.
4333 */
4334 if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
4335 btrfs_err(fs_info,
4336 "bytenr %llu was found but our backref cache was expecting %llu or %llu",
4337 buf->start, node->bytenr, node->new_bytenr);
4338 return -EUCLEAN;
4339 }
4340
4341 btrfs_backref_drop_node_buffer(node);
4342 atomic_inc(&cow->refs);
4343 node->eb = cow;
4344 node->new_bytenr = cow->start;
4345
4346 if (!node->pending) {
4347 list_move_tail(&node->list,
4348 &rc->backref_cache.pending[level]);
4349 node->pending = 1;
4350 }
4351
4352 if (first_cow)
4353 mark_block_processed(rc, node);
4354
4355 if (first_cow && level > 0)
4356 rc->nodes_relocated += buf->len;
4357 }
4358
4359 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4360 ret = replace_file_extents(trans, rc, root, cow);
4361 return ret;
4362 }
4363
4364 /*
4365 * called before creating snapshot. it calculates metadata reservation
4366 * required for relocating tree blocks in the snapshot
4367 */
btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot * pending,u64 * bytes_to_reserve)4368 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4369 u64 *bytes_to_reserve)
4370 {
4371 struct btrfs_root *root = pending->root;
4372 struct reloc_control *rc = root->fs_info->reloc_ctl;
4373
4374 if (!rc || !have_reloc_root(root))
4375 return;
4376
4377 if (!rc->merge_reloc_tree)
4378 return;
4379
4380 root = root->reloc_root;
4381 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4382 /*
4383 * relocation is in the stage of merging trees. the space
4384 * used by merging a reloc tree is twice the size of
4385 * relocated tree nodes in the worst case. half for cowing
4386 * the reloc tree, half for cowing the fs tree. the space
4387 * used by cowing the reloc tree will be freed after the
4388 * tree is dropped. if we create snapshot, cowing the fs
4389 * tree may use more space than it frees. so we need
4390 * reserve extra space.
4391 */
4392 *bytes_to_reserve += rc->nodes_relocated;
4393 }
4394
4395 /*
4396 * called after snapshot is created. migrate block reservation
4397 * and create reloc root for the newly created snapshot
4398 *
4399 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4400 * references held on the reloc_root, one for root->reloc_root and one for
4401 * rc->reloc_roots.
4402 */
btrfs_reloc_post_snapshot(struct btrfs_trans_handle * trans,struct btrfs_pending_snapshot * pending)4403 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4404 struct btrfs_pending_snapshot *pending)
4405 {
4406 struct btrfs_root *root = pending->root;
4407 struct btrfs_root *reloc_root;
4408 struct btrfs_root *new_root;
4409 struct reloc_control *rc = root->fs_info->reloc_ctl;
4410 int ret;
4411
4412 if (!rc || !have_reloc_root(root))
4413 return 0;
4414
4415 rc = root->fs_info->reloc_ctl;
4416 rc->merging_rsv_size += rc->nodes_relocated;
4417
4418 if (rc->merge_reloc_tree) {
4419 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4420 rc->block_rsv,
4421 rc->nodes_relocated, true);
4422 if (ret)
4423 return ret;
4424 }
4425
4426 new_root = pending->snap;
4427 reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
4428 if (IS_ERR(reloc_root))
4429 return PTR_ERR(reloc_root);
4430
4431 ret = __add_reloc_root(reloc_root);
4432 ASSERT(ret != -EEXIST);
4433 if (ret) {
4434 /* Pairs with create_reloc_root */
4435 btrfs_put_root(reloc_root);
4436 return ret;
4437 }
4438 new_root->reloc_root = btrfs_grab_root(reloc_root);
4439 return 0;
4440 }
4441
4442 /*
4443 * Get the current bytenr for the block group which is being relocated.
4444 *
4445 * Return U64_MAX if no running relocation.
4446 */
btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info * fs_info)4447 u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
4448 {
4449 u64 logical = U64_MAX;
4450
4451 lockdep_assert_held(&fs_info->reloc_mutex);
4452
4453 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
4454 logical = fs_info->reloc_ctl->block_group->start;
4455 return logical;
4456 }
4457