| /linux/fs/btrfs/ |
| H A D | transaction.c | 148 btrfs_err(transaction->fs_info, in btrfs_put_transaction() 168 spin_lock(&transaction->fs_info->unused_bgs_lock); in btrfs_put_transaction() 170 spin_unlock(&transaction->fs_info->unused_bgs_lock); in btrfs_put_transaction() 182 struct btrfs_fs_info *fs_info = trans->fs_info; in switch_commit_roots() local 192 down_write(&fs_info->commit_root_sem); in switch_commit_roots() 194 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) in switch_commit_roots() 195 fs_info->last_reloc_trans = trans->transid; in switch_commit_roots() 214 btrfs_drop_and_free_fs_root(fs_info, root); in switch_commit_roots() 219 up_write(&fs_info->commit_root_sem); in switch_commit_roots() 256 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_trans_release_chunk_metadata() local [all …]
|
| H A D | fs.c | 108 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info, in btrfs_exclop_start() argument 113 spin_lock(&fs_info->super_lock); in btrfs_exclop_start() 114 if (fs_info->exclusive_operation == BTRFS_EXCLOP_NONE) { in btrfs_exclop_start() 115 fs_info->exclusive_operation = type; in btrfs_exclop_start() 118 spin_unlock(&fs_info->super_lock); in btrfs_exclop_start() 134 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info, in btrfs_exclop_start_try_lock() argument 137 spin_lock(&fs_info->super_lock); in btrfs_exclop_start_try_lock() 138 if (fs_info->exclusive_operation == type || in btrfs_exclop_start_try_lock() 139 (fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED && in btrfs_exclop_start_try_lock() 143 spin_unlock(&fs_info->super_lock); in btrfs_exclop_start_try_lock() [all …]
|
| H A D | qgroup.c | 33 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info) in btrfs_qgroup_mode() argument 35 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) in btrfs_qgroup_mode() 37 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) in btrfs_qgroup_mode() 42 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info) in btrfs_qgroup_enabled() argument 44 return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; in btrfs_qgroup_enabled() 47 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info) in btrfs_qgroup_full_accounting() argument 49 return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; in btrfs_qgroup_full_accounting() 82 static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, in qgroup_rsv_add() argument 86 trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); in qgroup_rsv_add() 90 static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, in qgroup_rsv_release() argument [all …]
|
| H A D | scrub.c | 197 struct btrfs_fs_info *fs_info; member 355 static int init_scrub_stripe(struct btrfs_fs_info *fs_info, in init_scrub_stripe() argument 358 const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; in init_scrub_stripe() 363 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe() 373 fs_info->block_min_order, stripe->folios); in init_scrub_stripe() 383 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, in init_scrub_stripe() 384 fs_info->csum_size, GFP_KERNEL); in init_scrub_stripe() 400 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) in __scrub_blocked_if_needed() argument 402 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed() 403 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed() [all …]
|
| H A D | super.c | 71 struct btrfs_fs_info *fs_info = btrfs_sb(sb); in btrfs_put_super() local 73 btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid); in btrfs_put_super() 74 close_ctree(fs_info); in btrfs_put_super() 667 static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info) in btrfs_clear_oneshot_options() argument 669 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); in btrfs_clear_oneshot_options() 670 btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE); in btrfs_clear_oneshot_options() 671 btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE); in btrfs_clear_oneshot_options() 674 static bool check_ro_option(const struct btrfs_fs_info *fs_info, in check_ro_option() argument 679 btrfs_err(fs_info, "%s must be used with ro mount option", in check_ro_option() 737 void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info) in btrfs_set_free_space_cache_settings() argument [all …]
|
| H A D | tree-mod-log.c | 58 static u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) in btrfs_inc_tree_mod_seq() argument 60 return atomic64_inc_return(&fs_info->tree_mod_seq); in btrfs_inc_tree_mod_seq() 71 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, in btrfs_get_tree_mod_seq() argument 74 write_lock(&fs_info->tree_mod_log_lock); in btrfs_get_tree_mod_seq() 76 elem->seq = btrfs_inc_tree_mod_seq(fs_info); in btrfs_get_tree_mod_seq() 77 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); in btrfs_get_tree_mod_seq() 78 set_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags); in btrfs_get_tree_mod_seq() 80 write_unlock(&fs_info->tree_mod_log_lock); in btrfs_get_tree_mod_seq() 85 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, in btrfs_put_tree_mod_seq() argument 98 write_lock(&fs_info->tree_mod_log_lock); in btrfs_put_tree_mod_seq() [all …]
|
| H A D | ref-verify.c | 196 static void __print_stack_trace(struct btrfs_fs_info *fs_info, in __print_stack_trace() argument 200 btrfs_err(fs_info, " ref-verify: no stacktrace"); in __print_stack_trace() 210 static inline void __print_stack_trace(struct btrfs_fs_info *fs_info, in __print_stack_trace() argument 213 btrfs_err(fs_info, " ref-verify: no stacktrace support"); in __print_stack_trace() 245 static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, in add_block_entry() argument 265 spin_lock(&fs_info->ref_verify_lock); in add_block_entry() 266 exist = insert_block_entry(&fs_info->block_tree, be); in add_block_entry() 294 static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root, in add_tree_block() argument 314 be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root); in add_tree_block() 334 spin_unlock(&fs_info->ref_verify_lock); in add_tree_block() [all …]
|
| H A D | zoned.c | 202 const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT; in emulate_report_zones() 272 btrfs_err(device->fs_info, in btrfs_get_dev_zones() 294 static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info) in calculate_emulated_zone_size() argument 297 struct btrfs_root *root = fs_info->dev_root; in calculate_emulated_zone_size() 326 fs_info->zone_size = btrfs_dev_extent_length(leaf, dext); in calculate_emulated_zone_size() 330 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) in btrfs_get_dev_zone_info_all_devices() argument 332 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; in btrfs_get_dev_zone_info_all_devices() 337 if (!btrfs_fs_incompat(fs_info, ZONED)) in btrfs_get_dev_zone_info_all_devices() 357 struct btrfs_fs_info *fs_info = device->fs_info; in btrfs_get_dev_zone_info() local 374 if (!btrfs_fs_incompat(fs_info, ZONED)) in btrfs_get_dev_zone_info() [all …]
|
| H A D | extent_io.c | 43 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb() local 46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_add_eb() 47 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb() 48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_add_eb() 53 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb() local 56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_del_eb() 58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_leak_debug_del_eb() 61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) in btrfs_extent_buffer_leak_debug_check() argument 70 if (!fs_info->allocated_ebs.next) in btrfs_extent_buffer_leak_debug_check() 73 WARN_ON(!list_empty(&fs_info->allocated_ebs)); in btrfs_extent_buffer_leak_debug_check() [all …]
|
| H A D | ref-verify.h | 19 int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info); 20 void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info); 21 int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, 23 void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start, 26 static inline void btrfs_init_ref_verify(struct btrfs_fs_info *fs_info) in btrfs_init_ref_verify() argument 28 spin_lock_init(&fs_info->ref_verify_lock); in btrfs_init_ref_verify() 29 fs_info->block_tree = RB_ROOT; in btrfs_init_ref_verify() 32 static inline int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info) in btrfs_build_ref_tree() argument 37 static inline void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info) in btrfs_free_ref_cache() argument 41 static inline int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, in btrfs_ref_tree_mod() argument [all …]
|
| H A D | tree-checker.c | 59 const struct btrfs_fs_info *fs_info = eb->fs_info; in generic_err() local 69 btrfs_crit(fs_info, in generic_err() 85 const struct btrfs_fs_info *fs_info = eb->fs_info; in file_extent_err() local 97 btrfs_crit(fs_info, in file_extent_err() 129 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); in file_extent_end() 146 const struct btrfs_fs_info *fs_info = eb->fs_info; in dir_item_err() local 158 btrfs_crit(fs_info, in dir_item_err() 211 struct btrfs_fs_info *fs_info = leaf->fs_info; in check_extent_data_item() local 213 u32 sectorsize = fs_info->sectorsize; in check_extent_data_item() 368 struct btrfs_fs_info *fs_info = leaf->fs_info; in check_csum_item() local [all …]
|
| H A D | ioctl.c | 221 static int check_fsflags_compatible(const struct btrfs_fs_info *fs_info, in check_fsflags_compatible() argument 224 if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL)) in check_fsflags_compatible() 261 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_fileattr_set() local 280 ret = check_fsflags_compatible(fs_info, fsflags); in btrfs_fileattr_set() 361 comp = btrfs_compress_type2str(fs_info->compress_type); in btrfs_fileattr_set() 409 static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, in btrfs_ioctl_fitrim() argument 426 if (btrfs_is_zoned(fs_info)) in btrfs_ioctl_fitrim() 436 if (btrfs_test_opt(fs_info, NOLOGREPLAY)) in btrfs_ioctl_fitrim() 440 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, in btrfs_ioctl_fitrim() 460 if (range.len < fs_info->sectorsize) in btrfs_ioctl_fitrim() [all …]
|
| H A D | lzo.c | 68 static u32 workspace_buf_length(const struct btrfs_fs_info *fs_info) in workspace_buf_length() argument 70 return lzo1x_worst_compress(fs_info->sectorsize); in workspace_buf_length() 72 static u32 workspace_cbuf_length(const struct btrfs_fs_info *fs_info) in workspace_cbuf_length() argument 74 return lzo1x_worst_compress(fs_info->sectorsize); in workspace_cbuf_length() 87 struct list_head *lzo_alloc_workspace(struct btrfs_fs_info *fs_info) in lzo_alloc_workspace() argument 96 workspace->buf = kvmalloc(workspace_buf_length(fs_info), GFP_KERNEL | __GFP_NOWARN); in lzo_alloc_workspace() 97 workspace->cbuf = kvmalloc(workspace_cbuf_length(fs_info), GFP_KERNEL | __GFP_NOWARN); in lzo_alloc_workspace() 135 static int copy_compressed_data_to_page(struct btrfs_fs_info *fs_info, in copy_compressed_data_to_page() argument 142 const u32 sectorsize = fs_info->sectorsize; in copy_compressed_data_to_page() 143 const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; in copy_compressed_data_to_page() [all …]
|
| H A D | relocation.c | 190 blocksize = rc->extent_root->fs_info->nodesize; in mark_block_processed() 292 root->fs_info->running_transaction->transid) in btrfs_should_ignore_reloc_root() 305 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr) in find_reloc_root() argument 307 struct reloc_control *rc = fs_info->reloc_ctl; in find_reloc_root() 418 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info); in build_backref_tree() 480 struct btrfs_fs_info *fs_info = root->fs_info; in __add_reloc_root() local 483 struct reloc_control *rc = fs_info->reloc_ctl; in __add_reloc_root() 496 btrfs_err(fs_info, in __add_reloc_root() 512 struct btrfs_fs_info *fs_info = root->fs_info; in __del_reloc_root() local 515 struct reloc_control *rc = fs_info->reloc_ctl; in __del_reloc_root() [all …]
|
| H A D | extent_map.c | 82 struct btrfs_fs_info *fs_info = inode->root->fs_info; in remove_em() local 87 if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root))) in remove_em() 88 percpu_counter_dec(&fs_info->evictable_extent_maps); in remove_em() 307 static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix, in dump_extent_map() argument 312 btrfs_crit(fs_info, in dump_extent_map() 320 static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em) in validate_extent_map() argument 326 dump_extent_map(fs_info, "zero disk_num_bytes", em); in validate_extent_map() 328 dump_extent_map(fs_info, "ram_bytes too small", em); in validate_extent_map() 331 dump_extent_map(fs_info, "disk_num_bytes too small", em); in validate_extent_map() 334 dump_extent_map(fs_info, in validate_extent_map() [all …]
|
| H A D | compression.c | 222 struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info) in btrfs_alloc_compr_folio() argument 227 if (fs_info->block_min_order) in btrfs_alloc_compr_folio() 242 return folio_alloc(GFP_NOFS, fs_info->block_min_order); in btrfs_alloc_compr_folio() 290 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); in end_compressed_writeback() local 312 btrfs_folio_clamp_clear_writeback(fs_info, folio, in end_compressed_writeback() 376 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_submit_compressed_write() local 379 ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); in btrfs_submit_compressed_write() 380 ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); in btrfs_submit_compressed_write() 414 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); in add_ra_bio_pages() local 440 if (fs_info->sectorsize < PAGE_SIZE) in add_ra_bio_pages() [all …]
|
| H A D | delayed-ref.h | 309 static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info, in btrfs_calc_delayed_ref_bytes() argument 314 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs); in btrfs_calc_delayed_ref_bytes() 324 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) in btrfs_calc_delayed_ref_bytes() 330 static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info, in btrfs_calc_delayed_ref_csum_bytes() argument 338 return btrfs_calc_metadata_size(fs_info, num_csum_items); in btrfs_calc_delayed_ref_csum_bytes() 386 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info, 391 btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info, 398 void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info, 403 const struct btrfs_fs_info *fs_info, 409 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq); [all …]
|
| H A D | inode.c | 96 struct btrfs_fs_info *fs_info; member 132 struct btrfs_fs_info *fs_info = warn->fs_info; in data_reloc_print_warning_inode() local 142 local_root = btrfs_get_fs_root(fs_info, root, true); in data_reloc_print_warning_inode() 176 btrfs_warn(fs_info, in data_reloc_print_warning_inode() 192 btrfs_warn(fs_info, in data_reloc_print_warning_inode() 195 fs_info->sectorsize, nlink, in data_reloc_print_warning_inode() 203 btrfs_warn(fs_info, in data_reloc_print_warning_inode() 220 struct btrfs_fs_info *fs_info = inode->root->fs_info; in print_data_reloc_error() local 225 const u32 csum_size = fs_info->csum_size; in print_data_reloc_error() 231 mutex_lock(&fs_info->reloc_mutex); in print_data_reloc_error() [all …]
|
| H A D | qgroup.h | 331 enum btrfs_qgroup_mode btrfs_qgroup_mode(const struct btrfs_fs_info *fs_info); 332 bool btrfs_qgroup_enabled(const struct btrfs_fs_info *fs_info); 333 bool btrfs_qgroup_full_accounting(const struct btrfs_fs_info *fs_info); 334 int btrfs_quota_enable(struct btrfs_fs_info *fs_info, 336 int btrfs_quota_disable(struct btrfs_fs_info *fs_info); 337 int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info); 338 void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info); 339 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, 347 int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 subvolid); 350 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info); [all …]
|
| H A D | free-space-tree.c | 33 if (btrfs_fs_incompat(block_group->fs_info, EXTENT_TREE_V2)) in btrfs_free_space_root() 35 return btrfs_global_root(block_group->fs_info, &key); in btrfs_free_space_root() 45 btrfs_warn(cache->fs_info, "block group %llu length is zero", in btrfs_set_free_space_tree_thresholds() 52 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; in btrfs_set_free_space_tree_thresholds() 102 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_search_free_space_info() local 115 btrfs_warn(fs_info, "missing free space info for %llu", in btrfs_search_free_space_info() 154 static inline u32 free_space_bitmap_size(const struct btrfs_fs_info *fs_info, in free_space_bitmap_size() argument 157 return DIV_ROUND_UP(size >> fs_info->sectorsize_bits, BITS_PER_BYTE); in free_space_bitmap_size() 203 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_convert_free_space_to_bitmaps() local 217 bitmap_size = free_space_bitmap_size(fs_info, block_group->length); in btrfs_convert_free_space_to_bitmaps() [all …]
|
| H A D | delayed-inode.c | 260 delayed_root = node->root->fs_info->delayed_root; in btrfs_next_delayed_node() 290 delayed_root = delayed_node->root->fs_info->delayed_root; in __btrfs_release_delayed_node() 428 atomic_inc(&delayed_node->root->fs_info->delayed_root->items); in __btrfs_add_delayed_item() 455 delayed_root = delayed_node->root->fs_info->delayed_root; in __btrfs_remove_delayed_item() 507 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_delayed_item_reserve_metadata() local 515 dst_rsv = &fs_info->delayed_block_rsv; in btrfs_delayed_item_reserve_metadata() 517 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); in btrfs_delayed_item_reserve_metadata() 526 trace_btrfs_space_reservation(fs_info, "delayed_item", in btrfs_delayed_item_reserve_metadata() 545 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_delayed_item_release_metadata() local 550 rsv = &fs_info->delayed_block_rsv; in btrfs_delayed_item_release_metadata() [all …]
|
| H A D | discard.c | 85 struct btrfs_fs_info *fs_info = container_of(discard_ctl, in btrfs_run_discard_work() local 89 return (!(fs_info->sb->s_flags & SB_RDONLY) && in btrfs_run_discard_work() 90 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags)); in btrfs_run_discard_work() 301 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_discard_check_filter() 304 discard_ctl = &block_group->fs_info->discard_ctl; in btrfs_discard_check_filter() 373 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_discard_queue_work() 657 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) || in btrfs_discard_update_discardable() 662 discard_ctl = &block_group->fs_info->discard_ctl; in btrfs_discard_update_discardable() 693 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info) in btrfs_discard_punt_unused_bgs_list() argument 697 spin_lock(&fs_info->unused_bgs_lock); in btrfs_discard_punt_unused_bgs_list() [all …]
|
| /linux/fs/btrfs/tests/ |
| H A D | raid-stripe-tree-tests.c | 40 struct btrfs_fs_info *fs_info = trans->fs_info; in test_punch_hole_3extents() local 54 bioc = alloc_btrfs_io_context(fs_info, logical1, RST_TEST_NUM_DEVICES); in test_punch_hole_3extents() 61 io_stripe.dev = btrfs_device_by_devid(fs_info->fs_devices, 0); in test_punch_hole_3extents() 70 stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i); in test_punch_hole_3extents() 91 stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i); in test_punch_hole_3extents() 112 stripe->dev = btrfs_device_by_devid(fs_info->fs_devices, i); in test_punch_hole_3extents() 141 ret = btrfs_get_raid_extent_offset(fs_info, logical1, &len1, map_type, in test_punch_hole_3extents() 164 ret = btrfs_get_raid_extent_offset(fs_info, logical2, &len2, map_type, in test_punch_hole_3extents() 175 ret = btrfs_get_raid_extent_offset(fs_info, logical3, &len3, map_type, in test_punch_hole_3extents() 218 struct btrfs_fs_info *fs_info = trans->fs_info; in test_delete_two_extents() local [all …]
|
| H A D | free-space-tree-tests.c | 21 struct btrfs_fs_info *fs_info, in __check_free_space_extents() argument 71 offset += fs_info->sectorsize; in __check_free_space_extents() 108 struct btrfs_fs_info *fs_info, in check_free_space_extents() argument 127 ret = __check_free_space_extents(trans, fs_info, cache, path, extents, in check_free_space_extents() 146 return __check_free_space_extents(trans, fs_info, cache, path, extents, in check_free_space_extents() 151 struct btrfs_fs_info *fs_info, in test_empty_block_group() argument 160 return check_free_space_extents(trans, fs_info, cache, path, in test_empty_block_group() 165 struct btrfs_fs_info *fs_info, in test_remove_all() argument 180 return check_free_space_extents(trans, fs_info, cache, path, in test_remove_all() 185 struct btrfs_fs_info *fs_info, in test_remove_beginning() argument [all …]
|
| /linux/include/trace/events/ |
| H A D | btrfs.h | 163 #define TP_fast_assign_fsid(fs_info) \ argument 165 if (fs_info) \ 166 memcpy(__entry->fsid, fs_info->fs_devices->fsid, \ 176 #define TP_fast_assign_btrfs(fs_info, args...) \ argument 178 TP_fast_assign_fsid(fs_info); \ 185 TP_PROTO(const struct btrfs_fs_info *fs_info), 187 TP_ARGS(fs_info), 194 TP_fast_assign_btrfs(fs_info, 195 __entry->generation = fs_info->generation; 299 TP_fast_assign_btrfs(root->fs_info, [all …]
|