1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 #ifndef __DELAYED_REF__ 19 #define __DELAYED_REF__ 20 21 /* these are the possible values of struct btrfs_delayed_ref_node->action */ 22 #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ 23 #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ 24 #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ 25 #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ 26 27 /* 28 * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the 29 * same ref_node structure. 30 * Ref_head is in a higher logic level than tree/data ref, and duplicated 31 * bytenr/num_bytes in ref_node is really a waste or memory, they should be 32 * referred from ref_head. 33 * This gets more disgusting after we use list to store tree/data ref in 34 * ref_head. Must clean this mess up later. 35 */ 36 struct btrfs_delayed_ref_node { 37 /* 38 * ref_head use rb tree, stored in ref_root->href. 39 * indexed by bytenr 40 */ 41 struct rb_node rb_node; 42 43 /*data/tree ref use list, stored in ref_head->ref_list. */ 44 struct list_head list; 45 46 /* the starting bytenr of the extent */ 47 u64 bytenr; 48 49 /* the size of the extent */ 50 u64 num_bytes; 51 52 /* seq number to keep track of insertion order */ 53 u64 seq; 54 55 /* ref count on this data structure */ 56 atomic_t refs; 57 58 /* 59 * how many refs is this entry adding or deleting. For 60 * head refs, this may be a negative number because it is keeping 61 * track of the total mods done to the reference count. 62 * For individual refs, this will always be a positive number 63 * 64 * It may be more than one, since it is possible for a single 65 * parent to have more than one ref on an extent 66 */ 67 int ref_mod; 68 69 unsigned int action:8; 70 unsigned int type:8; 71 unsigned int no_quota:1; 72 /* is this node still in the rbtree? */ 73 unsigned int is_head:1; 74 unsigned int in_tree:1; 75 }; 76 77 struct btrfs_delayed_extent_op { 78 struct btrfs_disk_key key; 79 u64 flags_to_set; 80 int level; 81 unsigned int update_key:1; 82 unsigned int update_flags:1; 83 unsigned int is_data:1; 84 }; 85 86 /* 87 * the head refs are used to hold a lock on a given extent, which allows us 88 * to make sure that only one process is running the delayed refs 89 * at a time for a single extent. They also store the sum of all the 90 * reference count modifications we've queued up. 91 */ 92 struct btrfs_delayed_ref_head { 93 struct btrfs_delayed_ref_node node; 94 95 /* 96 * the mutex is held while running the refs, and it is also 97 * held when checking the sum of reference modifications. 98 */ 99 struct mutex mutex; 100 101 spinlock_t lock; 102 struct list_head ref_list; 103 104 struct rb_node href_node; 105 106 struct btrfs_delayed_extent_op *extent_op; 107 108 /* 109 * This is used to track the final ref_mod from all the refs associated 110 * with this head ref, this is not adjusted as delayed refs are run, 111 * this is meant to track if we need to do the csum accounting or not. 112 */ 113 int total_ref_mod; 114 115 /* 116 * when a new extent is allocated, it is just reserved in memory 117 * The actual extent isn't inserted into the extent allocation tree 118 * until the delayed ref is processed. must_insert_reserved is 119 * used to flag a delayed ref so the accounting can be updated 120 * when a full insert is done. 121 * 122 * It is possible the extent will be freed before it is ever 123 * inserted into the extent allocation tree. In this case 124 * we need to update the in ram accounting to properly reflect 125 * the free has happened. 126 */ 127 unsigned int must_insert_reserved:1; 128 unsigned int is_data:1; 129 unsigned int processing:1; 130 }; 131 132 struct btrfs_delayed_tree_ref { 133 struct btrfs_delayed_ref_node node; 134 u64 root; 135 u64 parent; 136 int level; 137 }; 138 139 struct btrfs_delayed_data_ref { 140 struct btrfs_delayed_ref_node node; 141 u64 root; 142 u64 parent; 143 u64 objectid; 144 u64 offset; 145 }; 146 147 struct btrfs_delayed_ref_root { 148 /* head ref rbtree */ 149 struct rb_root href_root; 150 151 /* dirty extent records */ 152 struct rb_root dirty_extent_root; 153 154 /* this spin lock protects the rbtree and the entries inside */ 155 spinlock_t lock; 156 157 /* how many delayed ref updates we've queued, used by the 158 * throttling code 159 */ 160 atomic_t num_entries; 161 162 /* total number of head nodes in tree */ 163 unsigned long num_heads; 164 165 /* total number of head nodes ready for processing */ 166 unsigned long num_heads_ready; 167 168 u64 pending_csums; 169 170 /* 171 * set when the tree is flushing before a transaction commit, 172 * used by the throttling code to decide if new updates need 173 * to be run right away 174 */ 175 int flushing; 176 177 u64 run_delayed_start; 178 179 /* 180 * To make qgroup to skip given root. 181 * This is for snapshot, as btrfs_qgroup_inherit() will manully 182 * modify counters for snapshot and its source, so we should skip 183 * the snapshot in new_root/old_roots or it will get calculated twice 184 */ 185 u64 qgroup_to_skip; 186 }; 187 188 extern struct kmem_cache *btrfs_delayed_ref_head_cachep; 189 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; 190 extern struct kmem_cache *btrfs_delayed_data_ref_cachep; 191 extern struct kmem_cache *btrfs_delayed_extent_op_cachep; 192 193 int btrfs_delayed_ref_init(void); 194 void btrfs_delayed_ref_exit(void); 195 196 static inline struct btrfs_delayed_extent_op * 197 btrfs_alloc_delayed_extent_op(void) 198 { 199 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); 200 } 201 202 static inline void 203 btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) 204 { 205 if (op) 206 kmem_cache_free(btrfs_delayed_extent_op_cachep, op); 207 } 208 209 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) 210 { 211 WARN_ON(atomic_read(&ref->refs) == 0); 212 if (atomic_dec_and_test(&ref->refs)) { 213 WARN_ON(ref->in_tree); 214 switch (ref->type) { 215 case BTRFS_TREE_BLOCK_REF_KEY: 216 case BTRFS_SHARED_BLOCK_REF_KEY: 217 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); 218 break; 219 case BTRFS_EXTENT_DATA_REF_KEY: 220 case BTRFS_SHARED_DATA_REF_KEY: 221 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); 222 break; 223 case 0: 224 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); 225 break; 226 default: 227 BUG(); 228 } 229 } 230 } 231 232 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 233 struct btrfs_trans_handle *trans, 234 u64 bytenr, u64 num_bytes, u64 parent, 235 u64 ref_root, int level, int action, 236 struct btrfs_delayed_extent_op *extent_op, 237 int no_quota); 238 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, 239 struct btrfs_trans_handle *trans, 240 u64 bytenr, u64 num_bytes, 241 u64 parent, u64 ref_root, 242 u64 owner, u64 offset, int action, 243 struct btrfs_delayed_extent_op *extent_op, 244 int no_quota); 245 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 246 struct btrfs_trans_handle *trans, 247 u64 bytenr, u64 num_bytes, 248 struct btrfs_delayed_extent_op *extent_op); 249 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, 250 struct btrfs_fs_info *fs_info, 251 struct btrfs_delayed_ref_root *delayed_refs, 252 struct btrfs_delayed_ref_head *head); 253 254 struct btrfs_delayed_ref_head * 255 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 256 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, 257 struct btrfs_delayed_ref_head *head); 258 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) 259 { 260 mutex_unlock(&head->mutex); 261 } 262 263 264 struct btrfs_delayed_ref_head * 265 btrfs_select_ref_head(struct btrfs_trans_handle *trans); 266 267 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 268 struct btrfs_delayed_ref_root *delayed_refs, 269 u64 seq); 270 271 /* 272 * a node might live in a head or a regular ref, this lets you 273 * test for the proper type to use. 274 */ 275 static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) 276 { 277 return node->is_head; 278 } 279 280 /* 281 * helper functions to cast a node into its container 282 */ 283 static inline struct btrfs_delayed_tree_ref * 284 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) 285 { 286 WARN_ON(btrfs_delayed_ref_is_head(node)); 287 return container_of(node, struct btrfs_delayed_tree_ref, node); 288 } 289 290 static inline struct btrfs_delayed_data_ref * 291 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) 292 { 293 WARN_ON(btrfs_delayed_ref_is_head(node)); 294 return container_of(node, struct btrfs_delayed_data_ref, node); 295 } 296 297 static inline struct btrfs_delayed_ref_head * 298 btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) 299 { 300 WARN_ON(!btrfs_delayed_ref_is_head(node)); 301 return container_of(node, struct btrfs_delayed_ref_head, node); 302 } 303 #endif 304