Searched refs:delayed_refs (Results 1 – 9 of 9) sorted by relevance
/linux/fs/btrfs/ |
H A D | delayed-ref.c | 358 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, in btrfs_delayed_ref_lock() argument 361 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock() 366 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock() 369 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock() 380 struct btrfs_delayed_ref_root *delayed_refs, in drop_delayed_ref() argument 394 struct btrfs_delayed_ref_root *delayed_refs, in merge_ref() argument 423 drop_delayed_ref(fs_info, delayed_refs, head, next); in merge_ref() 426 drop_delayed_ref(fs_info, delayed_refs, head, ref); in merge_ref() 441 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_merge_delayed_refs() argument 464 if (merge_ref(fs_info, delayed_refs, head, ref, seq)) in btrfs_merge_delayed_refs() [all …]
|
H A D | transaction.h | 113 struct btrfs_delayed_ref_root delayed_refs; member 214 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_set_skip_qgroup() local 216 delayed_refs = &trans->transaction->delayed_refs; in btrfs_set_skip_qgroup() 217 WARN_ON(delayed_refs->qgroup_to_skip); in btrfs_set_skip_qgroup() 218 delayed_refs->qgroup_to_skip = qgroupid; in btrfs_set_skip_qgroup() 223 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_clear_skip_qgroup() local 225 delayed_refs = &trans->transaction->delayed_refs; in btrfs_clear_skip_qgroup() 226 WARN_ON(!delayed_refs->qgroup_to_skip); in btrfs_clear_skip_qgroup() 227 delayed_refs->qgroup_to_skip = 0; in btrfs_clear_skip_qgroup()
|
H A D | delayed-ref.h | 385 struct btrfs_delayed_ref_root *delayed_refs, 390 struct btrfs_delayed_ref_root *delayed_refs, 397 struct btrfs_delayed_ref_root *delayed_refs, 402 struct btrfs_delayed_ref_root *delayed_refs); 403 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
|
H A D | extent-tree.c | 105 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_lookup_extent_info() local 183 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info() 184 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info() 185 head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr); in btrfs_lookup_extent_info() 189 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info() 210 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info() 1863 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_cleanup_ref_head_accounting() argument 1875 spin_lock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting() 1876 delayed_refs->pending_csums -= head->num_bytes; in btrfs_cleanup_ref_head_accounting() 1877 spin_unlock(&delayed_refs->lock); in btrfs_cleanup_ref_head_accounting() [all …]
|
H A D | transaction.c | 144 WARN_ON(!xa_empty(&transaction->delayed_refs.head_refs)); in btrfs_put_transaction() 145 WARN_ON(!xa_empty(&transaction->delayed_refs.dirty_extents)); in btrfs_put_transaction() 146 if (transaction->delayed_refs.pending_csums) in btrfs_put_transaction() 149 transaction->delayed_refs.pending_csums); in btrfs_put_transaction() 349 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); in join_transaction() 351 xa_init(&cur_trans->delayed_refs.head_refs); in join_transaction() 352 xa_init(&cur_trans->delayed_refs.dirty_extents); in join_transaction() 365 spin_lock_init(&cur_trans->delayed_refs.lock); in join_transaction() 1004 test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) in btrfs_should_end_transaction() 2190 &cur_trans->delayed_refs.flags)) { in btrfs_commit_transaction()
|
H A D | qgroup.c | 2002 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_qgroup_trace_extent_nolock() argument 2024 xa_lock(&delayed_refs->dirty_extents); in btrfs_qgroup_trace_extent_nolock() 2025 existing = xa_load(&delayed_refs->dirty_extents, index); in btrfs_qgroup_trace_extent_nolock() 2031 xa_unlock(&delayed_refs->dirty_extents); in btrfs_qgroup_trace_extent_nolock() 2035 ret = __xa_store(&delayed_refs->dirty_extents, index, record, GFP_ATOMIC); in btrfs_qgroup_trace_extent_nolock() 2036 xa_unlock(&delayed_refs->dirty_extents); in btrfs_qgroup_trace_extent_nolock() 2141 struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs; in btrfs_qgroup_trace_extent() local 2151 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) { in btrfs_qgroup_trace_extent() 2158 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record, bytenr); in btrfs_qgroup_trace_extent() 2161 xa_release(&delayed_refs->dirty_extents, index); in btrfs_qgroup_trace_extent() [all …]
|
H A D | extent-tree.h | 107 struct btrfs_delayed_ref_root *delayed_refs,
|
H A D | qgroup.h | 352 struct btrfs_delayed_ref_root *delayed_refs,
|
H A D | backref.c | 1385 struct btrfs_delayed_ref_root *delayed_refs = NULL; in find_parent_nodes() local 1443 delayed_refs = &ctx->trans->transaction->delayed_refs; in find_parent_nodes() 1444 spin_lock(&delayed_refs->lock); in find_parent_nodes() 1445 head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs, in find_parent_nodes() 1450 spin_unlock(&delayed_refs->lock); in find_parent_nodes() 1463 spin_unlock(&delayed_refs->lock); in find_parent_nodes() 1470 spin_unlock(&delayed_refs->lock); in find_parent_nodes()
|