1 #include <linux/err.h> 2 #include <linux/slab.h> 3 #include <linux/module.h> 4 #include <linux/spinlock.h> 5 #include <linux/hardirq.h> 6 #include "ctree.h" 7 #include "extent_map.h" 8 9 10 static struct kmem_cache *extent_map_cache; 11 12 int __init extent_map_init(void) 13 { 14 extent_map_cache = kmem_cache_create("btrfs_extent_map", 15 sizeof(struct extent_map), 0, 16 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 17 if (!extent_map_cache) 18 return -ENOMEM; 19 return 0; 20 } 21 22 void extent_map_exit(void) 23 { 24 if (extent_map_cache) 25 kmem_cache_destroy(extent_map_cache); 26 } 27 28 /** 29 * extent_map_tree_init - initialize extent map tree 30 * @tree: tree to initialize 31 * 32 * Initialize the extent tree @tree. Should be called for each new inode 33 * or other user of the extent_map interface. 34 */ 35 void extent_map_tree_init(struct extent_map_tree *tree) 36 { 37 tree->map = RB_ROOT; 38 INIT_LIST_HEAD(&tree->modified_extents); 39 rwlock_init(&tree->lock); 40 } 41 42 /** 43 * alloc_extent_map - allocate new extent map structure 44 * 45 * Allocate a new extent_map structure. The new structure is 46 * returned with a reference count of one and needs to be 47 * freed using free_extent_map() 48 */ 49 struct extent_map *alloc_extent_map(void) 50 { 51 struct extent_map *em; 52 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); 53 if (!em) 54 return NULL; 55 em->in_tree = 0; 56 em->flags = 0; 57 em->compress_type = BTRFS_COMPRESS_NONE; 58 em->generation = 0; 59 atomic_set(&em->refs, 1); 60 INIT_LIST_HEAD(&em->list); 61 return em; 62 } 63 64 /** 65 * free_extent_map - drop reference count of an extent_map 66 * @em: extent map beeing releasead 67 * 68 * Drops the reference out on @em by one and free the structure 69 * if the reference count hits zero. 70 */ 71 void free_extent_map(struct extent_map *em) 72 { 73 if (!em) 74 return; 75 WARN_ON(atomic_read(&em->refs) == 0); 76 if (atomic_dec_and_test(&em->refs)) { 77 WARN_ON(em->in_tree); 78 WARN_ON(!list_empty(&em->list)); 79 kmem_cache_free(extent_map_cache, em); 80 } 81 } 82 83 static struct rb_node *tree_insert(struct rb_root *root, u64 offset, 84 struct rb_node *node) 85 { 86 struct rb_node **p = &root->rb_node; 87 struct rb_node *parent = NULL; 88 struct extent_map *entry; 89 90 while (*p) { 91 parent = *p; 92 entry = rb_entry(parent, struct extent_map, rb_node); 93 94 WARN_ON(!entry->in_tree); 95 96 if (offset < entry->start) 97 p = &(*p)->rb_left; 98 else if (offset >= extent_map_end(entry)) 99 p = &(*p)->rb_right; 100 else 101 return parent; 102 } 103 104 entry = rb_entry(node, struct extent_map, rb_node); 105 entry->in_tree = 1; 106 rb_link_node(node, parent, p); 107 rb_insert_color(node, root); 108 return NULL; 109 } 110 111 /* 112 * search through the tree for an extent_map with a given offset. If 113 * it can't be found, try to find some neighboring extents 114 */ 115 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 116 struct rb_node **prev_ret, 117 struct rb_node **next_ret) 118 { 119 struct rb_node *n = root->rb_node; 120 struct rb_node *prev = NULL; 121 struct rb_node *orig_prev = NULL; 122 struct extent_map *entry; 123 struct extent_map *prev_entry = NULL; 124 125 while (n) { 126 entry = rb_entry(n, struct extent_map, rb_node); 127 prev = n; 128 prev_entry = entry; 129 130 WARN_ON(!entry->in_tree); 131 132 if (offset < entry->start) 133 n = n->rb_left; 134 else if (offset >= extent_map_end(entry)) 135 n = n->rb_right; 136 else 137 return n; 138 } 139 140 if (prev_ret) { 141 orig_prev = prev; 142 while (prev && offset >= extent_map_end(prev_entry)) { 143 prev = rb_next(prev); 144 prev_entry = rb_entry(prev, struct extent_map, rb_node); 145 } 146 *prev_ret = prev; 147 prev = orig_prev; 148 } 149 150 if (next_ret) { 151 prev_entry = rb_entry(prev, struct extent_map, rb_node); 152 while (prev && offset < prev_entry->start) { 153 prev = rb_prev(prev); 154 prev_entry = rb_entry(prev, struct extent_map, rb_node); 155 } 156 *next_ret = prev; 157 } 158 return NULL; 159 } 160 161 /* check to see if two extent_map structs are adjacent and safe to merge */ 162 static int mergable_maps(struct extent_map *prev, struct extent_map *next) 163 { 164 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) 165 return 0; 166 167 /* 168 * don't merge compressed extents, we need to know their 169 * actual size 170 */ 171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 172 return 0; 173 174 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 175 test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 176 return 0; 177 178 if (extent_map_end(prev) == next->start && 179 prev->flags == next->flags && 180 prev->bdev == next->bdev && 181 ((next->block_start == EXTENT_MAP_HOLE && 182 prev->block_start == EXTENT_MAP_HOLE) || 183 (next->block_start == EXTENT_MAP_INLINE && 184 prev->block_start == EXTENT_MAP_INLINE) || 185 (next->block_start == EXTENT_MAP_DELALLOC && 186 prev->block_start == EXTENT_MAP_DELALLOC) || 187 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && 188 next->block_start == extent_map_block_end(prev)))) { 189 return 1; 190 } 191 return 0; 192 } 193 194 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) 195 { 196 struct extent_map *merge = NULL; 197 struct rb_node *rb; 198 199 if (em->start != 0) { 200 rb = rb_prev(&em->rb_node); 201 if (rb) 202 merge = rb_entry(rb, struct extent_map, rb_node); 203 if (rb && mergable_maps(merge, em)) { 204 em->start = merge->start; 205 em->orig_start = merge->orig_start; 206 em->len += merge->len; 207 em->block_len += merge->block_len; 208 em->block_start = merge->block_start; 209 merge->in_tree = 0; 210 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; 211 em->mod_start = merge->mod_start; 212 em->generation = max(em->generation, merge->generation); 213 list_move(&em->list, &tree->modified_extents); 214 215 list_del_init(&merge->list); 216 rb_erase(&merge->rb_node, &tree->map); 217 free_extent_map(merge); 218 } 219 } 220 221 rb = rb_next(&em->rb_node); 222 if (rb) 223 merge = rb_entry(rb, struct extent_map, rb_node); 224 if (rb && mergable_maps(em, merge)) { 225 em->len += merge->len; 226 em->block_len += merge->len; 227 rb_erase(&merge->rb_node, &tree->map); 228 merge->in_tree = 0; 229 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; 230 em->generation = max(em->generation, merge->generation); 231 list_del_init(&merge->list); 232 free_extent_map(merge); 233 } 234 } 235 236 /** 237 * unpin_extent_cache - unpin an extent from the cache 238 * @tree: tree to unpin the extent in 239 * @start: logical offset in the file 240 * @len: length of the extent 241 * @gen: generation that this extent has been modified in 242 * 243 * Called after an extent has been written to disk properly. Set the generation 244 * to the generation that actually added the file item to the inode so we know 245 * we need to sync this extent when we call fsync(). 246 */ 247 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, 248 u64 gen) 249 { 250 int ret = 0; 251 struct extent_map *em; 252 bool prealloc = false; 253 254 write_lock(&tree->lock); 255 em = lookup_extent_mapping(tree, start, len); 256 257 WARN_ON(!em || em->start != start); 258 259 if (!em) 260 goto out; 261 262 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 263 list_move(&em->list, &tree->modified_extents); 264 em->generation = gen; 265 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 266 em->mod_start = em->start; 267 em->mod_len = em->len; 268 269 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { 270 prealloc = true; 271 clear_bit(EXTENT_FLAG_FILLING, &em->flags); 272 } 273 274 try_merge_map(tree, em); 275 276 if (prealloc) { 277 em->mod_start = em->start; 278 em->mod_len = em->len; 279 } 280 281 free_extent_map(em); 282 out: 283 write_unlock(&tree->lock); 284 return ret; 285 286 } 287 288 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 289 { 290 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 291 if (em->in_tree) 292 try_merge_map(tree, em); 293 } 294 295 /** 296 * add_extent_mapping - add new extent map to the extent tree 297 * @tree: tree to insert new map in 298 * @em: map to insert 299 * 300 * Insert @em into @tree or perform a simple forward/backward merge with 301 * existing mappings. The extent_map struct passed in will be inserted 302 * into the tree directly, with an additional reference taken, or a 303 * reference dropped if the merge attempt was successful. 304 */ 305 int add_extent_mapping(struct extent_map_tree *tree, 306 struct extent_map *em) 307 { 308 int ret = 0; 309 struct rb_node *rb; 310 struct extent_map *exist; 311 312 exist = lookup_extent_mapping(tree, em->start, em->len); 313 if (exist) { 314 free_extent_map(exist); 315 ret = -EEXIST; 316 goto out; 317 } 318 rb = tree_insert(&tree->map, em->start, &em->rb_node); 319 if (rb) { 320 ret = -EEXIST; 321 goto out; 322 } 323 atomic_inc(&em->refs); 324 325 em->mod_start = em->start; 326 em->mod_len = em->len; 327 328 try_merge_map(tree, em); 329 out: 330 return ret; 331 } 332 333 /* simple helper to do math around the end of an extent, handling wrap */ 334 static u64 range_end(u64 start, u64 len) 335 { 336 if (start + len < start) 337 return (u64)-1; 338 return start + len; 339 } 340 341 struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree, 342 u64 start, u64 len, int strict) 343 { 344 struct extent_map *em; 345 struct rb_node *rb_node; 346 struct rb_node *prev = NULL; 347 struct rb_node *next = NULL; 348 u64 end = range_end(start, len); 349 350 rb_node = __tree_search(&tree->map, start, &prev, &next); 351 if (!rb_node) { 352 if (prev) 353 rb_node = prev; 354 else if (next) 355 rb_node = next; 356 else 357 return NULL; 358 } 359 360 em = rb_entry(rb_node, struct extent_map, rb_node); 361 362 if (strict && !(end > em->start && start < extent_map_end(em))) 363 return NULL; 364 365 atomic_inc(&em->refs); 366 return em; 367 } 368 369 /** 370 * lookup_extent_mapping - lookup extent_map 371 * @tree: tree to lookup in 372 * @start: byte offset to start the search 373 * @len: length of the lookup range 374 * 375 * Find and return the first extent_map struct in @tree that intersects the 376 * [start, len] range. There may be additional objects in the tree that 377 * intersect, so check the object returned carefully to make sure that no 378 * additional lookups are needed. 379 */ 380 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 381 u64 start, u64 len) 382 { 383 return __lookup_extent_mapping(tree, start, len, 1); 384 } 385 386 /** 387 * search_extent_mapping - find a nearby extent map 388 * @tree: tree to lookup in 389 * @start: byte offset to start the search 390 * @len: length of the lookup range 391 * 392 * Find and return the first extent_map struct in @tree that intersects the 393 * [start, len] range. 394 * 395 * If one can't be found, any nearby extent may be returned 396 */ 397 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 398 u64 start, u64 len) 399 { 400 return __lookup_extent_mapping(tree, start, len, 0); 401 } 402 403 /** 404 * remove_extent_mapping - removes an extent_map from the extent tree 405 * @tree: extent tree to remove from 406 * @em: extent map beeing removed 407 * 408 * Removes @em from @tree. No reference counts are dropped, and no checks 409 * are done to see if the range is in use 410 */ 411 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) 412 { 413 int ret = 0; 414 415 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 416 rb_erase(&em->rb_node, &tree->map); 417 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 418 list_del_init(&em->list); 419 em->in_tree = 0; 420 return ret; 421 } 422