1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_TRANSACTION_H 7 #define BTRFS_TRANSACTION_H 8 9 #include <linux/refcount.h> 10 #include "btrfs_inode.h" 11 #include "delayed-ref.h" 12 #include "ctree.h" 13 14 enum btrfs_trans_state { 15 TRANS_STATE_RUNNING, 16 TRANS_STATE_BLOCKED, 17 TRANS_STATE_COMMIT_START, 18 TRANS_STATE_COMMIT_DOING, 19 TRANS_STATE_UNBLOCKED, 20 TRANS_STATE_COMPLETED, 21 TRANS_STATE_MAX, 22 }; 23 24 #define BTRFS_TRANS_HAVE_FREE_BGS 0 25 #define BTRFS_TRANS_DIRTY_BG_RUN 1 26 #define BTRFS_TRANS_CACHE_ENOSPC 2 27 28 struct btrfs_transaction { 29 u64 transid; 30 /* 31 * total external writers(USERSPACE/START/ATTACH) in this 32 * transaction, it must be zero before the transaction is 33 * being committed 34 */ 35 atomic_t num_extwriters; 36 /* 37 * total writers in this transaction, it must be zero before the 38 * transaction can end 39 */ 40 atomic_t num_writers; 41 refcount_t use_count; 42 43 unsigned long flags; 44 45 /* Be protected by fs_info->trans_lock when we want to change it. */ 46 enum btrfs_trans_state state; 47 int aborted; 48 struct list_head list; 49 struct extent_io_tree dirty_pages; 50 time64_t start_time; 51 wait_queue_head_t writer_wait; 52 wait_queue_head_t commit_wait; 53 struct list_head pending_snapshots; 54 struct list_head pending_chunks; 55 struct list_head switch_commits; 56 struct list_head dirty_bgs; 57 58 /* 59 * There is no explicit lock which protects io_bgs, rather its 60 * consistency is implied by the fact that all the sites which modify 61 * it do so under some form of transaction critical section, namely: 62 * 63 * - btrfs_start_dirty_block_groups - This function can only ever be 64 * run by one of the transaction committers. Refer to 65 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction 66 * 67 * - btrfs_write_dirty_blockgroups - this is called by 68 * commit_cowonly_roots from transaction critical section 69 * (TRANS_STATE_COMMIT_DOING) 70 * 71 * - btrfs_cleanup_dirty_bgs - called on transaction abort 72 */ 73 struct list_head io_bgs; 74 struct list_head dropped_roots; 75 76 /* 77 * we need to make sure block group deletion doesn't race with 78 * free space cache writeout. This mutex keeps them from stomping 79 * on each other 80 */ 81 struct mutex cache_write_mutex; 82 spinlock_t dirty_bgs_lock; 83 unsigned int num_dirty_bgs; 84 /* Protected by spin lock fs_info->unused_bgs_lock. */ 85 struct list_head deleted_bgs; 86 spinlock_t dropped_roots_lock; 87 struct btrfs_delayed_ref_root delayed_refs; 88 struct btrfs_fs_info *fs_info; 89 }; 90 91 #define __TRANS_FREEZABLE (1U << 0) 92 93 #define __TRANS_START (1U << 9) 94 #define __TRANS_ATTACH (1U << 10) 95 #define __TRANS_JOIN (1U << 11) 96 #define __TRANS_JOIN_NOLOCK (1U << 12) 97 #define __TRANS_DUMMY (1U << 13) 98 99 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) 100 #define TRANS_ATTACH (__TRANS_ATTACH) 101 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) 102 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) 103 104 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) 105 106 #define BTRFS_SEND_TRANS_STUB ((void *)1) 107 108 struct btrfs_trans_handle { 109 u64 transid; 110 u64 bytes_reserved; 111 u64 chunk_bytes_reserved; 112 unsigned long delayed_ref_updates; 113 struct btrfs_transaction *transaction; 114 struct btrfs_block_rsv *block_rsv; 115 struct btrfs_block_rsv *orig_rsv; 116 refcount_t use_count; 117 unsigned int type; 118 short aborted; 119 bool adding_csums; 120 bool allocating_chunk; 121 bool can_flush_pending_bgs; 122 bool reloc_reserved; 123 bool sync; 124 bool dirty; 125 struct btrfs_root *root; 126 struct btrfs_fs_info *fs_info; 127 struct list_head new_bgs; 128 }; 129 130 struct btrfs_pending_snapshot { 131 struct dentry *dentry; 132 struct inode *dir; 133 struct btrfs_root *root; 134 struct btrfs_root_item *root_item; 135 struct btrfs_root *snap; 136 struct btrfs_qgroup_inherit *inherit; 137 struct btrfs_path *path; 138 /* block reservation for the operation */ 139 struct btrfs_block_rsv block_rsv; 140 /* extra metadata reservation for relocation */ 141 int error; 142 bool readonly; 143 struct list_head list; 144 }; 145 146 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 147 struct inode *inode) 148 { 149 spin_lock(&BTRFS_I(inode)->lock); 150 BTRFS_I(inode)->last_trans = trans->transaction->transid; 151 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 152 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 153 spin_unlock(&BTRFS_I(inode)->lock); 154 } 155 156 /* 157 * Make qgroup codes to skip given qgroupid, means the old/new_roots for 158 * qgroup won't contain the qgroupid in it. 159 */ 160 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans, 161 u64 qgroupid) 162 { 163 struct btrfs_delayed_ref_root *delayed_refs; 164 165 delayed_refs = &trans->transaction->delayed_refs; 166 WARN_ON(delayed_refs->qgroup_to_skip); 167 delayed_refs->qgroup_to_skip = qgroupid; 168 } 169 170 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans) 171 { 172 struct btrfs_delayed_ref_root *delayed_refs; 173 174 delayed_refs = &trans->transaction->delayed_refs; 175 WARN_ON(!delayed_refs->qgroup_to_skip); 176 delayed_refs->qgroup_to_skip = 0; 177 } 178 179 int btrfs_end_transaction(struct btrfs_trans_handle *trans); 180 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 181 unsigned int num_items); 182 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 183 struct btrfs_root *root, 184 unsigned int num_items, 185 int min_factor); 186 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); 187 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); 188 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); 189 struct btrfs_trans_handle *btrfs_attach_transaction_barrier( 190 struct btrfs_root *root); 191 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid); 192 193 void btrfs_add_dead_root(struct btrfs_root *root); 194 int btrfs_defrag_root(struct btrfs_root *root); 195 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); 196 int btrfs_commit_transaction(struct btrfs_trans_handle *trans); 197 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 198 int wait_for_unblock); 199 200 /* 201 * Try to commit transaction asynchronously, so this is safe to call 202 * even holding a spinlock. 203 * 204 * It's done by informing transaction_kthread to commit transaction without 205 * waiting for commit interval. 206 */ 207 static inline void btrfs_commit_transaction_locksafe( 208 struct btrfs_fs_info *fs_info) 209 { 210 set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); 211 wake_up_process(fs_info->transaction_kthread); 212 } 213 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); 214 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); 215 void btrfs_throttle(struct btrfs_fs_info *fs_info); 216 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 217 struct btrfs_root *root); 218 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 219 struct extent_io_tree *dirty_pages, int mark); 220 int btrfs_wait_extents(struct btrfs_fs_info *fs_info, 221 struct extent_io_tree *dirty_pages); 222 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); 223 int btrfs_transaction_blocked(struct btrfs_fs_info *info); 224 int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 225 void btrfs_put_transaction(struct btrfs_transaction *transaction); 226 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); 227 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 228 struct btrfs_root *root); 229 230 #endif 231