send.c (d307d2f35ca5c57ef2966d2a7ccb2de1118f61f4) send.c (90b90d4ac03cbaaa87a81ee04e9cdd9b4225cdd4)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>

--- 18 unchanged lines hidden (view full) ---

27#include "compression.h"
28#include "xattr.h"
29#include "print-tree.h"
30#include "accessors.h"
31#include "dir-item.h"
32#include "file-item.h"
33#include "ioctl.h"
34#include "verity.h"
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>

--- 18 unchanged lines hidden (view full) ---

27#include "compression.h"
28#include "xattr.h"
29#include "print-tree.h"
30#include "accessors.h"
31#include "dir-item.h"
32#include "file-item.h"
33#include "ioctl.h"
34#include "verity.h"
35#include "lru_cache.h"
35
36/*
37 * Maximum number of references an extent can have in order for us to attempt to
38 * issue clone operations instead of write operations. This currently exists to
39 * avoid hitting limitations of the backreference walking code (taking a lot of
40 * time and using too much memory for extents with large number of references).
41 */
42#define SEND_MAX_EXTENT_REFS 1024

--- 59 unchanged lines hidden (view full) ---

102
103/*
104 * A backref cache entry maps a leaf to a list of IDs of roots from which the
105 * leaf is accessible and we can use for clone operations.
106 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on
107 * x86_64).
108 */
109struct backref_cache_entry {
36
37/*
38 * Maximum number of references an extent can have in order for us to attempt to
39 * issue clone operations instead of write operations. This currently exists to
40 * avoid hitting limitations of the backreference walking code (taking a lot of
41 * time and using too much memory for extents with large number of references).
42 */
43#define SEND_MAX_EXTENT_REFS 1024

--- 59 unchanged lines hidden (view full) ---

103
104/*
105 * A backref cache entry maps a leaf to a list of IDs of roots from which the
106 * leaf is accessible and we can use for clone operations.
107 * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on
108 * x86_64).
109 */
110struct backref_cache_entry {
110 /* List to link to the cache's lru list. */
111 struct list_head list;
112 /* The key for this entry in the cache. */
113 u64 key;
111 struct btrfs_lru_cache_entry entry;
114 u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
115 /* Number of valid elements in the root_ids array. */
116 int num_roots;
117};
118
112 u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
113 /* Number of valid elements in the root_ids array. */
114 int num_roots;
115};
116
117/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
118static_assert(offsetof(struct backref_cache_entry, entry) == 0);
119
119struct send_ctx {
120 struct file *send_filp;
121 loff_t send_off;
122 char *send_buf;
123 u32 send_size;
124 u32 send_max_size;
125 /*
126 * Whether BTRFS_SEND_A_DATA attribute was already added to current

--- 153 unchanged lines hidden (view full) ---

280 *
281 * Indexed by the inode number of the directory to be deleted.
282 */
283 struct rb_root orphan_dirs;
284
285 struct rb_root rbtree_new_refs;
286 struct rb_root rbtree_deleted_refs;
287
120struct send_ctx {
121 struct file *send_filp;
122 loff_t send_off;
123 char *send_buf;
124 u32 send_size;
125 u32 send_max_size;
126 /*
127 * Whether BTRFS_SEND_A_DATA attribute was already added to current

--- 153 unchanged lines hidden (view full) ---

281 *
282 * Indexed by the inode number of the directory to be deleted.
283 */
284 struct rb_root orphan_dirs;
285
286 struct rb_root rbtree_new_refs;
287 struct rb_root rbtree_deleted_refs;
288
288 struct {
289 u64 last_reloc_trans;
290 struct list_head lru_list;
291 struct maple_tree entries;
292 /* Number of entries stored in the cache. */
293 int size;
294 } backref_cache;
289 struct btrfs_lru_cache backref_cache;
290 u64 backref_cache_last_reloc_trans;
295};
296
297struct pending_dir_move {
298 struct rb_node node;
299 struct list_head list;
300 u64 parent_ino;
301 u64 ino;
302 u64 gen;

--- 1079 unchanged lines hidden (view full) ---

1382 */
1383 if (num_bytes >= bctx->extent_len)
1384 return BTRFS_ITERATE_EXTENT_INODES_STOP;
1385 }
1386
1387 return 0;
1388}
1389
291};
292
293struct pending_dir_move {
294 struct rb_node node;
295 struct list_head list;
296 u64 parent_ino;
297 u64 ino;
298 u64 gen;

--- 1079 unchanged lines hidden (view full) ---

1378 */
1379 if (num_bytes >= bctx->extent_len)
1380 return BTRFS_ITERATE_EXTENT_INODES_STOP;
1381 }
1382
1383 return 0;
1384}
1385
1390static void empty_backref_cache(struct send_ctx *sctx)
1391{
1392 struct backref_cache_entry *entry;
1393 struct backref_cache_entry *tmp;
1394
1395 list_for_each_entry_safe(entry, tmp, &sctx->backref_cache.lru_list, list)
1396 kfree(entry);
1397
1398 INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
1399 mtree_destroy(&sctx->backref_cache.entries);
1400 sctx->backref_cache.size = 0;
1401}
1402
1403static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
1404 const u64 **root_ids_ret, int *root_count_ret)
1405{
1406 struct backref_ctx *bctx = ctx;
1407 struct send_ctx *sctx = bctx->sctx;
1408 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1409 const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
1386static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
1387 const u64 **root_ids_ret, int *root_count_ret)
1388{
1389 struct backref_ctx *bctx = ctx;
1390 struct send_ctx *sctx = bctx->sctx;
1391 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1392 const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
1393 struct btrfs_lru_cache_entry *raw_entry;
1410 struct backref_cache_entry *entry;
1411
1394 struct backref_cache_entry *entry;
1395
1412 if (sctx->backref_cache.size == 0)
1396 if (btrfs_lru_cache_size(&sctx->backref_cache) == 0)
1413 return false;
1414
1415 /*
1416 * If relocation happened since we first filled the cache, then we must
1417 * empty the cache and can not use it, because even though we operate on
1418 * read-only roots, their leaves and nodes may have been reallocated and
1419 * now be used for different nodes/leaves of the same tree or some other
1420 * tree.
1421 *
1422 * We are called from iterate_extent_inodes() while either holding a
1423 * transaction handle or holding fs_info->commit_root_sem, so no need
1424 * to take any lock here.
1425 */
1397 return false;
1398
1399 /*
1400 * If relocation happened since we first filled the cache, then we must
1401 * empty the cache and can not use it, because even though we operate on
1402 * read-only roots, their leaves and nodes may have been reallocated and
1403 * now be used for different nodes/leaves of the same tree or some other
1404 * tree.
1405 *
1406 * We are called from iterate_extent_inodes() while either holding a
1407 * transaction handle or holding fs_info->commit_root_sem, so no need
1408 * to take any lock here.
1409 */
1426 if (fs_info->last_reloc_trans > sctx->backref_cache.last_reloc_trans) {
1427 empty_backref_cache(sctx);
1410 if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) {
1411 btrfs_lru_cache_clear(&sctx->backref_cache);
1428 return false;
1429 }
1430
1412 return false;
1413 }
1414
1431 entry = mtree_load(&sctx->backref_cache.entries, key);
1432 if (!entry)
1415 raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key);
1416 if (!raw_entry)
1433 return false;
1434
1417 return false;
1418
1419 entry = container_of(raw_entry, struct backref_cache_entry, entry);
1435 *root_ids_ret = entry->root_ids;
1436 *root_count_ret = entry->num_roots;
1420 *root_ids_ret = entry->root_ids;
1421 *root_count_ret = entry->num_roots;
1437 list_move_tail(&entry->list, &sctx->backref_cache.lru_list);
1438
1439 return true;
1440}
1441
1442static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
1443 void *ctx)
1444{
1445 struct backref_ctx *bctx = ctx;

--- 9 unchanged lines hidden (view full) ---

1455 * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a
1456 * NOFS allocation.
1457 */
1458 new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS);
1459 /* No worries, cache is optional. */
1460 if (!new_entry)
1461 return;
1462
1422
1423 return true;
1424}
1425
1426static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids,
1427 void *ctx)
1428{
1429 struct backref_ctx *bctx = ctx;

--- 9 unchanged lines hidden (view full) ---

1439 * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a
1440 * NOFS allocation.
1441 */
1442 new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS);
1443 /* No worries, cache is optional. */
1444 if (!new_entry)
1445 return;
1446
1463 new_entry->key = leaf_bytenr >> fs_info->sectorsize_bits;
1447 new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits;
1464 new_entry->num_roots = 0;
1465 ULIST_ITER_INIT(&uiter);
1466 while ((node = ulist_next(root_ids, &uiter)) != NULL) {
1467 const u64 root_id = node->val;
1468 struct clone_root *root;
1469
1470 root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots,
1471 sctx->clone_roots_cnt, sizeof(struct clone_root),

--- 11 unchanged lines hidden (view full) ---

1483 new_entry->num_roots++;
1484 }
1485
1486 /*
1487 * We may have not added any roots to the new cache entry, which means
1488 * none of the roots is part of the list of roots from which we are
1489 * allowed to clone. Cache the new entry as it's still useful to avoid
1490 * backref walking to determine which roots have a path to the leaf.
1448 new_entry->num_roots = 0;
1449 ULIST_ITER_INIT(&uiter);
1450 while ((node = ulist_next(root_ids, &uiter)) != NULL) {
1451 const u64 root_id = node->val;
1452 struct clone_root *root;
1453
1454 root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots,
1455 sctx->clone_roots_cnt, sizeof(struct clone_root),

--- 11 unchanged lines hidden (view full) ---

1467 new_entry->num_roots++;
1468 }
1469
1470 /*
1471 * We may have not added any roots to the new cache entry, which means
1472 * none of the roots is part of the list of roots from which we are
1473 * allowed to clone. Cache the new entry as it's still useful to avoid
1474 * backref walking to determine which roots have a path to the leaf.
1475 *
1476 * Also use GFP_NOFS because we're called while holding a transaction
1477 * handle or while holding fs_info->commit_root_sem.
1491 */
1478 */
1492
1493 if (sctx->backref_cache.size >= SEND_MAX_BACKREF_CACHE_SIZE) {
1494 struct backref_cache_entry *lru_entry;
1495 struct backref_cache_entry *mt_entry;
1496
1497 lru_entry = list_first_entry(&sctx->backref_cache.lru_list,
1498 struct backref_cache_entry, list);
1499 mt_entry = mtree_erase(&sctx->backref_cache.entries, lru_entry->key);
1500 ASSERT(mt_entry == lru_entry);
1501 list_del(&mt_entry->list);
1502 kfree(mt_entry);
1503 sctx->backref_cache.size--;
1504 }
1505
1506 ret = mtree_insert(&sctx->backref_cache.entries, new_entry->key,
1507 new_entry, GFP_NOFS);
1479 ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry,
1480 GFP_NOFS);
1508 ASSERT(ret == 0 || ret == -ENOMEM);
1509 if (ret) {
1510 /* Caching is optional, no worries. */
1511 kfree(new_entry);
1512 return;
1513 }
1514
1481 ASSERT(ret == 0 || ret == -ENOMEM);
1482 if (ret) {
1483 /* Caching is optional, no worries. */
1484 kfree(new_entry);
1485 return;
1486 }
1487
1515 list_add_tail(&new_entry->list, &sctx->backref_cache.lru_list);
1516
1517 /*
1518 * We are called from iterate_extent_inodes() while either holding a
1519 * transaction handle or holding fs_info->commit_root_sem, so no need
1520 * to take any lock here.
1521 */
1488 /*
1489 * We are called from iterate_extent_inodes() while either holding a
1490 * transaction handle or holding fs_info->commit_root_sem, so no need
1491 * to take any lock here.
1492 */
1522 if (sctx->backref_cache.size == 0)
1523 sctx->backref_cache.last_reloc_trans = fs_info->last_reloc_trans;
1524
1525 sctx->backref_cache.size++;
1493 if (btrfs_lru_cache_size(&sctx->backref_cache) == 1)
1494 sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans;
1526}
1527
1528static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
1529 const struct extent_buffer *leaf, void *ctx)
1530{
1531 const u64 refs = btrfs_extent_refs(leaf, ei);
1532 const struct backref_ctx *bctx = ctx;
1533 const struct send_ctx *sctx = bctx->sctx;

--- 6600 unchanged lines hidden (view full) ---

8134 goto out;
8135 }
8136
8137 INIT_LIST_HEAD(&sctx->new_refs);
8138 INIT_LIST_HEAD(&sctx->deleted_refs);
8139 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
8140 INIT_LIST_HEAD(&sctx->name_cache_list);
8141
1495}
1496
1497static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
1498 const struct extent_buffer *leaf, void *ctx)
1499{
1500 const u64 refs = btrfs_extent_refs(leaf, ei);
1501 const struct backref_ctx *bctx = ctx;
1502 const struct send_ctx *sctx = bctx->sctx;

--- 6600 unchanged lines hidden (view full) ---

8103 goto out;
8104 }
8105
8106 INIT_LIST_HEAD(&sctx->new_refs);
8107 INIT_LIST_HEAD(&sctx->deleted_refs);
8108 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
8109 INIT_LIST_HEAD(&sctx->name_cache_list);
8110
8142 INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
8143 mt_init(&sctx->backref_cache.entries);
8111 btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE);
8144
8145 sctx->pending_dir_moves = RB_ROOT;
8146 sctx->waiting_dir_moves = RB_ROOT;
8147 sctx->orphan_dirs = RB_ROOT;
8148 sctx->rbtree_new_refs = RB_ROOT;
8149 sctx->rbtree_deleted_refs = RB_ROOT;
8150
8151 sctx->flags = arg->flags;

--- 247 unchanged lines hidden (view full) ---

8399 kfree(sctx->send_buf_pages);
8400 kvfree(sctx->send_buf);
8401 kvfree(sctx->verity_descriptor);
8402
8403 name_cache_free(sctx);
8404
8405 close_current_inode(sctx);
8406
8112
8113 sctx->pending_dir_moves = RB_ROOT;
8114 sctx->waiting_dir_moves = RB_ROOT;
8115 sctx->orphan_dirs = RB_ROOT;
8116 sctx->rbtree_new_refs = RB_ROOT;
8117 sctx->rbtree_deleted_refs = RB_ROOT;
8118
8119 sctx->flags = arg->flags;

--- 247 unchanged lines hidden (view full) ---

8367 kfree(sctx->send_buf_pages);
8368 kvfree(sctx->send_buf);
8369 kvfree(sctx->verity_descriptor);
8370
8371 name_cache_free(sctx);
8372
8373 close_current_inode(sctx);
8374
8407 empty_backref_cache(sctx);
8375 btrfs_lru_cache_clear(&sctx->backref_cache);
8408
8409 kfree(sctx);
8410 }
8411
8412 return ret;
8413}
8376
8377 kfree(sctx);
8378 }
8379
8380 return ret;
8381}