xref: /linux/fs/btrfs/extent-tree.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 
39 #undef SCRAMBLE_DELAYED_REFS
40 
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56 	CHUNK_ALLOC_NO_FORCE = 0,
57 	CHUNK_ALLOC_LIMITED = 1,
58 	CHUNK_ALLOC_FORCE = 2,
59 };
60 
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71 	RESERVE_FREE = 0,
72 	RESERVE_ALLOC = 1,
73 	RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75 
76 static int update_block_group(struct btrfs_root *root,
77 			      u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79 				struct btrfs_root *root,
80 				u64 bytenr, u64 num_bytes, u64 parent,
81 				u64 root_objectid, u64 owner_objectid,
82 				u64 owner_offset, int refs_to_drop,
83 				struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85 				    struct extent_buffer *leaf,
86 				    struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88 				      struct btrfs_root *root,
89 				      u64 parent, u64 root_objectid,
90 				      u64 flags, u64 owner, u64 offset,
91 				      struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93 				     struct btrfs_root *root,
94 				     u64 parent, u64 root_objectid,
95 				     u64 flags, struct btrfs_disk_key *key,
96 				     int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98 			  struct btrfs_root *extent_root, u64 flags,
99 			  int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101 			 struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103 			    int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105 				       u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107 			       u64 num_bytes);
108 
109 static noinline int
110 block_group_cache_done(struct btrfs_block_group_cache *cache)
111 {
112 	smp_mb();
113 	return cache->cached == BTRFS_CACHE_FINISHED;
114 }
115 
116 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
117 {
118 	return (cache->flags & bits) == bits;
119 }
120 
121 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
122 {
123 	atomic_inc(&cache->count);
124 }
125 
126 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
127 {
128 	if (atomic_dec_and_test(&cache->count)) {
129 		WARN_ON(cache->pinned > 0);
130 		WARN_ON(cache->reserved > 0);
131 		kfree(cache->free_space_ctl);
132 		kfree(cache);
133 	}
134 }
135 
136 /*
137  * this adds the block group to the fs_info rb tree for the block group
138  * cache
139  */
140 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
141 				struct btrfs_block_group_cache *block_group)
142 {
143 	struct rb_node **p;
144 	struct rb_node *parent = NULL;
145 	struct btrfs_block_group_cache *cache;
146 
147 	spin_lock(&info->block_group_cache_lock);
148 	p = &info->block_group_cache_tree.rb_node;
149 
150 	while (*p) {
151 		parent = *p;
152 		cache = rb_entry(parent, struct btrfs_block_group_cache,
153 				 cache_node);
154 		if (block_group->key.objectid < cache->key.objectid) {
155 			p = &(*p)->rb_left;
156 		} else if (block_group->key.objectid > cache->key.objectid) {
157 			p = &(*p)->rb_right;
158 		} else {
159 			spin_unlock(&info->block_group_cache_lock);
160 			return -EEXIST;
161 		}
162 	}
163 
164 	rb_link_node(&block_group->cache_node, parent, p);
165 	rb_insert_color(&block_group->cache_node,
166 			&info->block_group_cache_tree);
167 
168 	if (info->first_logical_byte > block_group->key.objectid)
169 		info->first_logical_byte = block_group->key.objectid;
170 
171 	spin_unlock(&info->block_group_cache_lock);
172 
173 	return 0;
174 }
175 
176 /*
177  * This will return the block group at or after bytenr if contains is 0, else
178  * it will return the block group that contains the bytenr
179  */
180 static struct btrfs_block_group_cache *
181 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
182 			      int contains)
183 {
184 	struct btrfs_block_group_cache *cache, *ret = NULL;
185 	struct rb_node *n;
186 	u64 end, start;
187 
188 	spin_lock(&info->block_group_cache_lock);
189 	n = info->block_group_cache_tree.rb_node;
190 
191 	while (n) {
192 		cache = rb_entry(n, struct btrfs_block_group_cache,
193 				 cache_node);
194 		end = cache->key.objectid + cache->key.offset - 1;
195 		start = cache->key.objectid;
196 
197 		if (bytenr < start) {
198 			if (!contains && (!ret || start < ret->key.objectid))
199 				ret = cache;
200 			n = n->rb_left;
201 		} else if (bytenr > start) {
202 			if (contains && bytenr <= end) {
203 				ret = cache;
204 				break;
205 			}
206 			n = n->rb_right;
207 		} else {
208 			ret = cache;
209 			break;
210 		}
211 	}
212 	if (ret) {
213 		btrfs_get_block_group(ret);
214 		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
215 			info->first_logical_byte = ret->key.objectid;
216 	}
217 	spin_unlock(&info->block_group_cache_lock);
218 
219 	return ret;
220 }
221 
222 static int add_excluded_extent(struct btrfs_root *root,
223 			       u64 start, u64 num_bytes)
224 {
225 	u64 end = start + num_bytes - 1;
226 	set_extent_bits(&root->fs_info->freed_extents[0],
227 			start, end, EXTENT_UPTODATE, GFP_NOFS);
228 	set_extent_bits(&root->fs_info->freed_extents[1],
229 			start, end, EXTENT_UPTODATE, GFP_NOFS);
230 	return 0;
231 }
232 
233 static void free_excluded_extents(struct btrfs_root *root,
234 				  struct btrfs_block_group_cache *cache)
235 {
236 	u64 start, end;
237 
238 	start = cache->key.objectid;
239 	end = start + cache->key.offset - 1;
240 
241 	clear_extent_bits(&root->fs_info->freed_extents[0],
242 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
243 	clear_extent_bits(&root->fs_info->freed_extents[1],
244 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
245 }
246 
247 static int exclude_super_stripes(struct btrfs_root *root,
248 				 struct btrfs_block_group_cache *cache)
249 {
250 	u64 bytenr;
251 	u64 *logical;
252 	int stripe_len;
253 	int i, nr, ret;
254 
255 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
256 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
257 		cache->bytes_super += stripe_len;
258 		ret = add_excluded_extent(root, cache->key.objectid,
259 					  stripe_len);
260 		BUG_ON(ret); /* -ENOMEM */
261 	}
262 
263 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
264 		bytenr = btrfs_sb_offset(i);
265 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
266 				       cache->key.objectid, bytenr,
267 				       0, &logical, &nr, &stripe_len);
268 		BUG_ON(ret); /* -ENOMEM */
269 
270 		while (nr--) {
271 			cache->bytes_super += stripe_len;
272 			ret = add_excluded_extent(root, logical[nr],
273 						  stripe_len);
274 			BUG_ON(ret); /* -ENOMEM */
275 		}
276 
277 		kfree(logical);
278 	}
279 	return 0;
280 }
281 
282 static struct btrfs_caching_control *
283 get_caching_control(struct btrfs_block_group_cache *cache)
284 {
285 	struct btrfs_caching_control *ctl;
286 
287 	spin_lock(&cache->lock);
288 	if (cache->cached != BTRFS_CACHE_STARTED) {
289 		spin_unlock(&cache->lock);
290 		return NULL;
291 	}
292 
293 	/* We're loading it the fast way, so we don't have a caching_ctl. */
294 	if (!cache->caching_ctl) {
295 		spin_unlock(&cache->lock);
296 		return NULL;
297 	}
298 
299 	ctl = cache->caching_ctl;
300 	atomic_inc(&ctl->count);
301 	spin_unlock(&cache->lock);
302 	return ctl;
303 }
304 
305 static void put_caching_control(struct btrfs_caching_control *ctl)
306 {
307 	if (atomic_dec_and_test(&ctl->count))
308 		kfree(ctl);
309 }
310 
311 /*
312  * this is only called by cache_block_group, since we could have freed extents
313  * we need to check the pinned_extents for any extents that can't be used yet
314  * since their free space will be released as soon as the transaction commits.
315  */
316 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
317 			      struct btrfs_fs_info *info, u64 start, u64 end)
318 {
319 	u64 extent_start, extent_end, size, total_added = 0;
320 	int ret;
321 
322 	while (start < end) {
323 		ret = find_first_extent_bit(info->pinned_extents, start,
324 					    &extent_start, &extent_end,
325 					    EXTENT_DIRTY | EXTENT_UPTODATE,
326 					    NULL);
327 		if (ret)
328 			break;
329 
330 		if (extent_start <= start) {
331 			start = extent_end + 1;
332 		} else if (extent_start > start && extent_start < end) {
333 			size = extent_start - start;
334 			total_added += size;
335 			ret = btrfs_add_free_space(block_group, start,
336 						   size);
337 			BUG_ON(ret); /* -ENOMEM or logic error */
338 			start = extent_end + 1;
339 		} else {
340 			break;
341 		}
342 	}
343 
344 	if (start < end) {
345 		size = end - start;
346 		total_added += size;
347 		ret = btrfs_add_free_space(block_group, start, size);
348 		BUG_ON(ret); /* -ENOMEM or logic error */
349 	}
350 
351 	return total_added;
352 }
353 
354 static noinline void caching_thread(struct btrfs_work *work)
355 {
356 	struct btrfs_block_group_cache *block_group;
357 	struct btrfs_fs_info *fs_info;
358 	struct btrfs_caching_control *caching_ctl;
359 	struct btrfs_root *extent_root;
360 	struct btrfs_path *path;
361 	struct extent_buffer *leaf;
362 	struct btrfs_key key;
363 	u64 total_found = 0;
364 	u64 last = 0;
365 	u32 nritems;
366 	int ret = 0;
367 
368 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
369 	block_group = caching_ctl->block_group;
370 	fs_info = block_group->fs_info;
371 	extent_root = fs_info->extent_root;
372 
373 	path = btrfs_alloc_path();
374 	if (!path)
375 		goto out;
376 
377 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
378 
379 	/*
380 	 * We don't want to deadlock with somebody trying to allocate a new
381 	 * extent for the extent root while also trying to search the extent
382 	 * root to add free space.  So we skip locking and search the commit
383 	 * root, since its read-only
384 	 */
385 	path->skip_locking = 1;
386 	path->search_commit_root = 1;
387 	path->reada = 1;
388 
389 	key.objectid = last;
390 	key.offset = 0;
391 	key.type = BTRFS_EXTENT_ITEM_KEY;
392 again:
393 	mutex_lock(&caching_ctl->mutex);
394 	/* need to make sure the commit_root doesn't disappear */
395 	down_read(&fs_info->extent_commit_sem);
396 
397 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
398 	if (ret < 0)
399 		goto err;
400 
401 	leaf = path->nodes[0];
402 	nritems = btrfs_header_nritems(leaf);
403 
404 	while (1) {
405 		if (btrfs_fs_closing(fs_info) > 1) {
406 			last = (u64)-1;
407 			break;
408 		}
409 
410 		if (path->slots[0] < nritems) {
411 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
412 		} else {
413 			ret = find_next_key(path, 0, &key);
414 			if (ret)
415 				break;
416 
417 			if (need_resched() ||
418 			    btrfs_next_leaf(extent_root, path)) {
419 				caching_ctl->progress = last;
420 				btrfs_release_path(path);
421 				up_read(&fs_info->extent_commit_sem);
422 				mutex_unlock(&caching_ctl->mutex);
423 				cond_resched();
424 				goto again;
425 			}
426 			leaf = path->nodes[0];
427 			nritems = btrfs_header_nritems(leaf);
428 			continue;
429 		}
430 
431 		if (key.objectid < block_group->key.objectid) {
432 			path->slots[0]++;
433 			continue;
434 		}
435 
436 		if (key.objectid >= block_group->key.objectid +
437 		    block_group->key.offset)
438 			break;
439 
440 		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
441 			total_found += add_new_free_space(block_group,
442 							  fs_info, last,
443 							  key.objectid);
444 			last = key.objectid + key.offset;
445 
446 			if (total_found > (1024 * 1024 * 2)) {
447 				total_found = 0;
448 				wake_up(&caching_ctl->wait);
449 			}
450 		}
451 		path->slots[0]++;
452 	}
453 	ret = 0;
454 
455 	total_found += add_new_free_space(block_group, fs_info, last,
456 					  block_group->key.objectid +
457 					  block_group->key.offset);
458 	caching_ctl->progress = (u64)-1;
459 
460 	spin_lock(&block_group->lock);
461 	block_group->caching_ctl = NULL;
462 	block_group->cached = BTRFS_CACHE_FINISHED;
463 	spin_unlock(&block_group->lock);
464 
465 err:
466 	btrfs_free_path(path);
467 	up_read(&fs_info->extent_commit_sem);
468 
469 	free_excluded_extents(extent_root, block_group);
470 
471 	mutex_unlock(&caching_ctl->mutex);
472 out:
473 	wake_up(&caching_ctl->wait);
474 
475 	put_caching_control(caching_ctl);
476 	btrfs_put_block_group(block_group);
477 }
478 
479 static int cache_block_group(struct btrfs_block_group_cache *cache,
480 			     int load_cache_only)
481 {
482 	DEFINE_WAIT(wait);
483 	struct btrfs_fs_info *fs_info = cache->fs_info;
484 	struct btrfs_caching_control *caching_ctl;
485 	int ret = 0;
486 
487 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
488 	if (!caching_ctl)
489 		return -ENOMEM;
490 
491 	INIT_LIST_HEAD(&caching_ctl->list);
492 	mutex_init(&caching_ctl->mutex);
493 	init_waitqueue_head(&caching_ctl->wait);
494 	caching_ctl->block_group = cache;
495 	caching_ctl->progress = cache->key.objectid;
496 	atomic_set(&caching_ctl->count, 1);
497 	caching_ctl->work.func = caching_thread;
498 
499 	spin_lock(&cache->lock);
500 	/*
501 	 * This should be a rare occasion, but this could happen I think in the
502 	 * case where one thread starts to load the space cache info, and then
503 	 * some other thread starts a transaction commit which tries to do an
504 	 * allocation while the other thread is still loading the space cache
505 	 * info.  The previous loop should have kept us from choosing this block
506 	 * group, but if we've moved to the state where we will wait on caching
507 	 * block groups we need to first check if we're doing a fast load here,
508 	 * so we can wait for it to finish, otherwise we could end up allocating
509 	 * from a block group who's cache gets evicted for one reason or
510 	 * another.
511 	 */
512 	while (cache->cached == BTRFS_CACHE_FAST) {
513 		struct btrfs_caching_control *ctl;
514 
515 		ctl = cache->caching_ctl;
516 		atomic_inc(&ctl->count);
517 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
518 		spin_unlock(&cache->lock);
519 
520 		schedule();
521 
522 		finish_wait(&ctl->wait, &wait);
523 		put_caching_control(ctl);
524 		spin_lock(&cache->lock);
525 	}
526 
527 	if (cache->cached != BTRFS_CACHE_NO) {
528 		spin_unlock(&cache->lock);
529 		kfree(caching_ctl);
530 		return 0;
531 	}
532 	WARN_ON(cache->caching_ctl);
533 	cache->caching_ctl = caching_ctl;
534 	cache->cached = BTRFS_CACHE_FAST;
535 	spin_unlock(&cache->lock);
536 
537 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
538 		ret = load_free_space_cache(fs_info, cache);
539 
540 		spin_lock(&cache->lock);
541 		if (ret == 1) {
542 			cache->caching_ctl = NULL;
543 			cache->cached = BTRFS_CACHE_FINISHED;
544 			cache->last_byte_to_unpin = (u64)-1;
545 		} else {
546 			if (load_cache_only) {
547 				cache->caching_ctl = NULL;
548 				cache->cached = BTRFS_CACHE_NO;
549 			} else {
550 				cache->cached = BTRFS_CACHE_STARTED;
551 			}
552 		}
553 		spin_unlock(&cache->lock);
554 		wake_up(&caching_ctl->wait);
555 		if (ret == 1) {
556 			put_caching_control(caching_ctl);
557 			free_excluded_extents(fs_info->extent_root, cache);
558 			return 0;
559 		}
560 	} else {
561 		/*
562 		 * We are not going to do the fast caching, set cached to the
563 		 * appropriate value and wakeup any waiters.
564 		 */
565 		spin_lock(&cache->lock);
566 		if (load_cache_only) {
567 			cache->caching_ctl = NULL;
568 			cache->cached = BTRFS_CACHE_NO;
569 		} else {
570 			cache->cached = BTRFS_CACHE_STARTED;
571 		}
572 		spin_unlock(&cache->lock);
573 		wake_up(&caching_ctl->wait);
574 	}
575 
576 	if (load_cache_only) {
577 		put_caching_control(caching_ctl);
578 		return 0;
579 	}
580 
581 	down_write(&fs_info->extent_commit_sem);
582 	atomic_inc(&caching_ctl->count);
583 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
584 	up_write(&fs_info->extent_commit_sem);
585 
586 	btrfs_get_block_group(cache);
587 
588 	btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
589 
590 	return ret;
591 }
592 
593 /*
594  * return the block group that starts at or after bytenr
595  */
596 static struct btrfs_block_group_cache *
597 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
598 {
599 	struct btrfs_block_group_cache *cache;
600 
601 	cache = block_group_cache_tree_search(info, bytenr, 0);
602 
603 	return cache;
604 }
605 
606 /*
607  * return the block group that contains the given bytenr
608  */
609 struct btrfs_block_group_cache *btrfs_lookup_block_group(
610 						 struct btrfs_fs_info *info,
611 						 u64 bytenr)
612 {
613 	struct btrfs_block_group_cache *cache;
614 
615 	cache = block_group_cache_tree_search(info, bytenr, 1);
616 
617 	return cache;
618 }
619 
620 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
621 						  u64 flags)
622 {
623 	struct list_head *head = &info->space_info;
624 	struct btrfs_space_info *found;
625 
626 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
627 
628 	rcu_read_lock();
629 	list_for_each_entry_rcu(found, head, list) {
630 		if (found->flags & flags) {
631 			rcu_read_unlock();
632 			return found;
633 		}
634 	}
635 	rcu_read_unlock();
636 	return NULL;
637 }
638 
639 /*
640  * after adding space to the filesystem, we need to clear the full flags
641  * on all the space infos.
642  */
643 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
644 {
645 	struct list_head *head = &info->space_info;
646 	struct btrfs_space_info *found;
647 
648 	rcu_read_lock();
649 	list_for_each_entry_rcu(found, head, list)
650 		found->full = 0;
651 	rcu_read_unlock();
652 }
653 
654 u64 btrfs_find_block_group(struct btrfs_root *root,
655 			   u64 search_start, u64 search_hint, int owner)
656 {
657 	struct btrfs_block_group_cache *cache;
658 	u64 used;
659 	u64 last = max(search_hint, search_start);
660 	u64 group_start = 0;
661 	int full_search = 0;
662 	int factor = 9;
663 	int wrapped = 0;
664 again:
665 	while (1) {
666 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
667 		if (!cache)
668 			break;
669 
670 		spin_lock(&cache->lock);
671 		last = cache->key.objectid + cache->key.offset;
672 		used = btrfs_block_group_used(&cache->item);
673 
674 		if ((full_search || !cache->ro) &&
675 		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
676 			if (used + cache->pinned + cache->reserved <
677 			    div_factor(cache->key.offset, factor)) {
678 				group_start = cache->key.objectid;
679 				spin_unlock(&cache->lock);
680 				btrfs_put_block_group(cache);
681 				goto found;
682 			}
683 		}
684 		spin_unlock(&cache->lock);
685 		btrfs_put_block_group(cache);
686 		cond_resched();
687 	}
688 	if (!wrapped) {
689 		last = search_start;
690 		wrapped = 1;
691 		goto again;
692 	}
693 	if (!full_search && factor < 10) {
694 		last = search_start;
695 		full_search = 1;
696 		factor = 10;
697 		goto again;
698 	}
699 found:
700 	return group_start;
701 }
702 
703 /* simple helper to search for an existing extent at a given offset */
704 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
705 {
706 	int ret;
707 	struct btrfs_key key;
708 	struct btrfs_path *path;
709 
710 	path = btrfs_alloc_path();
711 	if (!path)
712 		return -ENOMEM;
713 
714 	key.objectid = start;
715 	key.offset = len;
716 	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
717 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
718 				0, 0);
719 	btrfs_free_path(path);
720 	return ret;
721 }
722 
723 /*
724  * helper function to lookup reference count and flags of extent.
725  *
726  * the head node for delayed ref is used to store the sum of all the
727  * reference count modifications queued up in the rbtree. the head
728  * node may also store the extent flags to set. This way you can check
729  * to see what the reference count and extent flags would be if all of
730  * the delayed refs are not processed.
731  */
732 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
733 			     struct btrfs_root *root, u64 bytenr,
734 			     u64 num_bytes, u64 *refs, u64 *flags)
735 {
736 	struct btrfs_delayed_ref_head *head;
737 	struct btrfs_delayed_ref_root *delayed_refs;
738 	struct btrfs_path *path;
739 	struct btrfs_extent_item *ei;
740 	struct extent_buffer *leaf;
741 	struct btrfs_key key;
742 	u32 item_size;
743 	u64 num_refs;
744 	u64 extent_flags;
745 	int ret;
746 
747 	path = btrfs_alloc_path();
748 	if (!path)
749 		return -ENOMEM;
750 
751 	key.objectid = bytenr;
752 	key.type = BTRFS_EXTENT_ITEM_KEY;
753 	key.offset = num_bytes;
754 	if (!trans) {
755 		path->skip_locking = 1;
756 		path->search_commit_root = 1;
757 	}
758 again:
759 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
760 				&key, path, 0, 0);
761 	if (ret < 0)
762 		goto out_free;
763 
764 	if (ret == 0) {
765 		leaf = path->nodes[0];
766 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
767 		if (item_size >= sizeof(*ei)) {
768 			ei = btrfs_item_ptr(leaf, path->slots[0],
769 					    struct btrfs_extent_item);
770 			num_refs = btrfs_extent_refs(leaf, ei);
771 			extent_flags = btrfs_extent_flags(leaf, ei);
772 		} else {
773 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
774 			struct btrfs_extent_item_v0 *ei0;
775 			BUG_ON(item_size != sizeof(*ei0));
776 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
777 					     struct btrfs_extent_item_v0);
778 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
779 			/* FIXME: this isn't correct for data */
780 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
781 #else
782 			BUG();
783 #endif
784 		}
785 		BUG_ON(num_refs == 0);
786 	} else {
787 		num_refs = 0;
788 		extent_flags = 0;
789 		ret = 0;
790 	}
791 
792 	if (!trans)
793 		goto out;
794 
795 	delayed_refs = &trans->transaction->delayed_refs;
796 	spin_lock(&delayed_refs->lock);
797 	head = btrfs_find_delayed_ref_head(trans, bytenr);
798 	if (head) {
799 		if (!mutex_trylock(&head->mutex)) {
800 			atomic_inc(&head->node.refs);
801 			spin_unlock(&delayed_refs->lock);
802 
803 			btrfs_release_path(path);
804 
805 			/*
806 			 * Mutex was contended, block until it's released and try
807 			 * again
808 			 */
809 			mutex_lock(&head->mutex);
810 			mutex_unlock(&head->mutex);
811 			btrfs_put_delayed_ref(&head->node);
812 			goto again;
813 		}
814 		if (head->extent_op && head->extent_op->update_flags)
815 			extent_flags |= head->extent_op->flags_to_set;
816 		else
817 			BUG_ON(num_refs == 0);
818 
819 		num_refs += head->node.ref_mod;
820 		mutex_unlock(&head->mutex);
821 	}
822 	spin_unlock(&delayed_refs->lock);
823 out:
824 	WARN_ON(num_refs == 0);
825 	if (refs)
826 		*refs = num_refs;
827 	if (flags)
828 		*flags = extent_flags;
829 out_free:
830 	btrfs_free_path(path);
831 	return ret;
832 }
833 
834 /*
835  * Back reference rules.  Back refs have three main goals:
836  *
837  * 1) differentiate between all holders of references to an extent so that
838  *    when a reference is dropped we can make sure it was a valid reference
839  *    before freeing the extent.
840  *
841  * 2) Provide enough information to quickly find the holders of an extent
842  *    if we notice a given block is corrupted or bad.
843  *
844  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
845  *    maintenance.  This is actually the same as #2, but with a slightly
846  *    different use case.
847  *
848  * There are two kinds of back refs. The implicit back refs is optimized
849  * for pointers in non-shared tree blocks. For a given pointer in a block,
850  * back refs of this kind provide information about the block's owner tree
851  * and the pointer's key. These information allow us to find the block by
852  * b-tree searching. The full back refs is for pointers in tree blocks not
853  * referenced by their owner trees. The location of tree block is recorded
854  * in the back refs. Actually the full back refs is generic, and can be
855  * used in all cases the implicit back refs is used. The major shortcoming
856  * of the full back refs is its overhead. Every time a tree block gets
857  * COWed, we have to update back refs entry for all pointers in it.
858  *
859  * For a newly allocated tree block, we use implicit back refs for
860  * pointers in it. This means most tree related operations only involve
861  * implicit back refs. For a tree block created in old transaction, the
862  * only way to drop a reference to it is COW it. So we can detect the
863  * event that tree block loses its owner tree's reference and do the
864  * back refs conversion.
865  *
866  * When a tree block is COW'd through a tree, there are four cases:
867  *
868  * The reference count of the block is one and the tree is the block's
869  * owner tree. Nothing to do in this case.
870  *
871  * The reference count of the block is one and the tree is not the
872  * block's owner tree. In this case, full back refs is used for pointers
873  * in the block. Remove these full back refs, add implicit back refs for
874  * every pointers in the new block.
875  *
876  * The reference count of the block is greater than one and the tree is
877  * the block's owner tree. In this case, implicit back refs is used for
878  * pointers in the block. Add full back refs for every pointers in the
879  * block, increase lower level extents' reference counts. The original
880  * implicit back refs are entailed to the new block.
881  *
882  * The reference count of the block is greater than one and the tree is
883  * not the block's owner tree. Add implicit back refs for every pointer in
884  * the new block, increase lower level extents' reference count.
885  *
886  * Back Reference Key composing:
887  *
888  * The key objectid corresponds to the first byte in the extent,
889  * The key type is used to differentiate between types of back refs.
890  * There are different meanings of the key offset for different types
891  * of back refs.
892  *
893  * File extents can be referenced by:
894  *
895  * - multiple snapshots, subvolumes, or different generations in one subvol
896  * - different files inside a single subvolume
897  * - different offsets inside a file (bookend extents in file.c)
898  *
899  * The extent ref structure for the implicit back refs has fields for:
900  *
901  * - Objectid of the subvolume root
902  * - objectid of the file holding the reference
903  * - original offset in the file
904  * - how many bookend extents
905  *
906  * The key offset for the implicit back refs is hash of the first
907  * three fields.
908  *
909  * The extent ref structure for the full back refs has field for:
910  *
911  * - number of pointers in the tree leaf
912  *
913  * The key offset for the implicit back refs is the first byte of
914  * the tree leaf
915  *
916  * When a file extent is allocated, The implicit back refs is used.
917  * the fields are filled in:
918  *
919  *     (root_key.objectid, inode objectid, offset in file, 1)
920  *
921  * When a file extent is removed file truncation, we find the
922  * corresponding implicit back refs and check the following fields:
923  *
924  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
925  *
926  * Btree extents can be referenced by:
927  *
928  * - Different subvolumes
929  *
930  * Both the implicit back refs and the full back refs for tree blocks
931  * only consist of key. The key offset for the implicit back refs is
932  * objectid of block's owner tree. The key offset for the full back refs
933  * is the first byte of parent block.
934  *
935  * When implicit back refs is used, information about the lowest key and
936  * level of the tree block are required. These information are stored in
937  * tree block info structure.
938  */
939 
940 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
941 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
942 				  struct btrfs_root *root,
943 				  struct btrfs_path *path,
944 				  u64 owner, u32 extra_size)
945 {
946 	struct btrfs_extent_item *item;
947 	struct btrfs_extent_item_v0 *ei0;
948 	struct btrfs_extent_ref_v0 *ref0;
949 	struct btrfs_tree_block_info *bi;
950 	struct extent_buffer *leaf;
951 	struct btrfs_key key;
952 	struct btrfs_key found_key;
953 	u32 new_size = sizeof(*item);
954 	u64 refs;
955 	int ret;
956 
957 	leaf = path->nodes[0];
958 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
959 
960 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
961 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
962 			     struct btrfs_extent_item_v0);
963 	refs = btrfs_extent_refs_v0(leaf, ei0);
964 
965 	if (owner == (u64)-1) {
966 		while (1) {
967 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
968 				ret = btrfs_next_leaf(root, path);
969 				if (ret < 0)
970 					return ret;
971 				BUG_ON(ret > 0); /* Corruption */
972 				leaf = path->nodes[0];
973 			}
974 			btrfs_item_key_to_cpu(leaf, &found_key,
975 					      path->slots[0]);
976 			BUG_ON(key.objectid != found_key.objectid);
977 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
978 				path->slots[0]++;
979 				continue;
980 			}
981 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
982 					      struct btrfs_extent_ref_v0);
983 			owner = btrfs_ref_objectid_v0(leaf, ref0);
984 			break;
985 		}
986 	}
987 	btrfs_release_path(path);
988 
989 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
990 		new_size += sizeof(*bi);
991 
992 	new_size -= sizeof(*ei0);
993 	ret = btrfs_search_slot(trans, root, &key, path,
994 				new_size + extra_size, 1);
995 	if (ret < 0)
996 		return ret;
997 	BUG_ON(ret); /* Corruption */
998 
999 	btrfs_extend_item(trans, root, path, new_size);
1000 
1001 	leaf = path->nodes[0];
1002 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1003 	btrfs_set_extent_refs(leaf, item, refs);
1004 	/* FIXME: get real generation */
1005 	btrfs_set_extent_generation(leaf, item, 0);
1006 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1007 		btrfs_set_extent_flags(leaf, item,
1008 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1009 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1010 		bi = (struct btrfs_tree_block_info *)(item + 1);
1011 		/* FIXME: get first key of the block */
1012 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1013 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1014 	} else {
1015 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1016 	}
1017 	btrfs_mark_buffer_dirty(leaf);
1018 	return 0;
1019 }
1020 #endif
1021 
1022 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1023 {
1024 	u32 high_crc = ~(u32)0;
1025 	u32 low_crc = ~(u32)0;
1026 	__le64 lenum;
1027 
1028 	lenum = cpu_to_le64(root_objectid);
1029 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1030 	lenum = cpu_to_le64(owner);
1031 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1032 	lenum = cpu_to_le64(offset);
1033 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1034 
1035 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1036 }
1037 
1038 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1039 				     struct btrfs_extent_data_ref *ref)
1040 {
1041 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1042 				    btrfs_extent_data_ref_objectid(leaf, ref),
1043 				    btrfs_extent_data_ref_offset(leaf, ref));
1044 }
1045 
1046 static int match_extent_data_ref(struct extent_buffer *leaf,
1047 				 struct btrfs_extent_data_ref *ref,
1048 				 u64 root_objectid, u64 owner, u64 offset)
1049 {
1050 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1051 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1052 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1053 		return 0;
1054 	return 1;
1055 }
1056 
1057 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1058 					   struct btrfs_root *root,
1059 					   struct btrfs_path *path,
1060 					   u64 bytenr, u64 parent,
1061 					   u64 root_objectid,
1062 					   u64 owner, u64 offset)
1063 {
1064 	struct btrfs_key key;
1065 	struct btrfs_extent_data_ref *ref;
1066 	struct extent_buffer *leaf;
1067 	u32 nritems;
1068 	int ret;
1069 	int recow;
1070 	int err = -ENOENT;
1071 
1072 	key.objectid = bytenr;
1073 	if (parent) {
1074 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1075 		key.offset = parent;
1076 	} else {
1077 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1078 		key.offset = hash_extent_data_ref(root_objectid,
1079 						  owner, offset);
1080 	}
1081 again:
1082 	recow = 0;
1083 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1084 	if (ret < 0) {
1085 		err = ret;
1086 		goto fail;
1087 	}
1088 
1089 	if (parent) {
1090 		if (!ret)
1091 			return 0;
1092 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1093 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1094 		btrfs_release_path(path);
1095 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1096 		if (ret < 0) {
1097 			err = ret;
1098 			goto fail;
1099 		}
1100 		if (!ret)
1101 			return 0;
1102 #endif
1103 		goto fail;
1104 	}
1105 
1106 	leaf = path->nodes[0];
1107 	nritems = btrfs_header_nritems(leaf);
1108 	while (1) {
1109 		if (path->slots[0] >= nritems) {
1110 			ret = btrfs_next_leaf(root, path);
1111 			if (ret < 0)
1112 				err = ret;
1113 			if (ret)
1114 				goto fail;
1115 
1116 			leaf = path->nodes[0];
1117 			nritems = btrfs_header_nritems(leaf);
1118 			recow = 1;
1119 		}
1120 
1121 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1122 		if (key.objectid != bytenr ||
1123 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1124 			goto fail;
1125 
1126 		ref = btrfs_item_ptr(leaf, path->slots[0],
1127 				     struct btrfs_extent_data_ref);
1128 
1129 		if (match_extent_data_ref(leaf, ref, root_objectid,
1130 					  owner, offset)) {
1131 			if (recow) {
1132 				btrfs_release_path(path);
1133 				goto again;
1134 			}
1135 			err = 0;
1136 			break;
1137 		}
1138 		path->slots[0]++;
1139 	}
1140 fail:
1141 	return err;
1142 }
1143 
1144 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1145 					   struct btrfs_root *root,
1146 					   struct btrfs_path *path,
1147 					   u64 bytenr, u64 parent,
1148 					   u64 root_objectid, u64 owner,
1149 					   u64 offset, int refs_to_add)
1150 {
1151 	struct btrfs_key key;
1152 	struct extent_buffer *leaf;
1153 	u32 size;
1154 	u32 num_refs;
1155 	int ret;
1156 
1157 	key.objectid = bytenr;
1158 	if (parent) {
1159 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1160 		key.offset = parent;
1161 		size = sizeof(struct btrfs_shared_data_ref);
1162 	} else {
1163 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1164 		key.offset = hash_extent_data_ref(root_objectid,
1165 						  owner, offset);
1166 		size = sizeof(struct btrfs_extent_data_ref);
1167 	}
1168 
1169 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1170 	if (ret && ret != -EEXIST)
1171 		goto fail;
1172 
1173 	leaf = path->nodes[0];
1174 	if (parent) {
1175 		struct btrfs_shared_data_ref *ref;
1176 		ref = btrfs_item_ptr(leaf, path->slots[0],
1177 				     struct btrfs_shared_data_ref);
1178 		if (ret == 0) {
1179 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1180 		} else {
1181 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1182 			num_refs += refs_to_add;
1183 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1184 		}
1185 	} else {
1186 		struct btrfs_extent_data_ref *ref;
1187 		while (ret == -EEXIST) {
1188 			ref = btrfs_item_ptr(leaf, path->slots[0],
1189 					     struct btrfs_extent_data_ref);
1190 			if (match_extent_data_ref(leaf, ref, root_objectid,
1191 						  owner, offset))
1192 				break;
1193 			btrfs_release_path(path);
1194 			key.offset++;
1195 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1196 						      size);
1197 			if (ret && ret != -EEXIST)
1198 				goto fail;
1199 
1200 			leaf = path->nodes[0];
1201 		}
1202 		ref = btrfs_item_ptr(leaf, path->slots[0],
1203 				     struct btrfs_extent_data_ref);
1204 		if (ret == 0) {
1205 			btrfs_set_extent_data_ref_root(leaf, ref,
1206 						       root_objectid);
1207 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1208 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1209 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1210 		} else {
1211 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1212 			num_refs += refs_to_add;
1213 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1214 		}
1215 	}
1216 	btrfs_mark_buffer_dirty(leaf);
1217 	ret = 0;
1218 fail:
1219 	btrfs_release_path(path);
1220 	return ret;
1221 }
1222 
1223 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1224 					   struct btrfs_root *root,
1225 					   struct btrfs_path *path,
1226 					   int refs_to_drop)
1227 {
1228 	struct btrfs_key key;
1229 	struct btrfs_extent_data_ref *ref1 = NULL;
1230 	struct btrfs_shared_data_ref *ref2 = NULL;
1231 	struct extent_buffer *leaf;
1232 	u32 num_refs = 0;
1233 	int ret = 0;
1234 
1235 	leaf = path->nodes[0];
1236 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1237 
1238 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1239 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1240 				      struct btrfs_extent_data_ref);
1241 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1242 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1243 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1244 				      struct btrfs_shared_data_ref);
1245 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1246 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1247 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1248 		struct btrfs_extent_ref_v0 *ref0;
1249 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1250 				      struct btrfs_extent_ref_v0);
1251 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1252 #endif
1253 	} else {
1254 		BUG();
1255 	}
1256 
1257 	BUG_ON(num_refs < refs_to_drop);
1258 	num_refs -= refs_to_drop;
1259 
1260 	if (num_refs == 0) {
1261 		ret = btrfs_del_item(trans, root, path);
1262 	} else {
1263 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1264 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1265 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1266 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1267 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1268 		else {
1269 			struct btrfs_extent_ref_v0 *ref0;
1270 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1271 					struct btrfs_extent_ref_v0);
1272 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1273 		}
1274 #endif
1275 		btrfs_mark_buffer_dirty(leaf);
1276 	}
1277 	return ret;
1278 }
1279 
1280 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1281 					  struct btrfs_path *path,
1282 					  struct btrfs_extent_inline_ref *iref)
1283 {
1284 	struct btrfs_key key;
1285 	struct extent_buffer *leaf;
1286 	struct btrfs_extent_data_ref *ref1;
1287 	struct btrfs_shared_data_ref *ref2;
1288 	u32 num_refs = 0;
1289 
1290 	leaf = path->nodes[0];
1291 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1292 	if (iref) {
1293 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1294 		    BTRFS_EXTENT_DATA_REF_KEY) {
1295 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1296 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1297 		} else {
1298 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1299 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1300 		}
1301 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1302 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1303 				      struct btrfs_extent_data_ref);
1304 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1305 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1306 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1307 				      struct btrfs_shared_data_ref);
1308 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1310 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1311 		struct btrfs_extent_ref_v0 *ref0;
1312 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1313 				      struct btrfs_extent_ref_v0);
1314 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1315 #endif
1316 	} else {
1317 		WARN_ON(1);
1318 	}
1319 	return num_refs;
1320 }
1321 
1322 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1323 					  struct btrfs_root *root,
1324 					  struct btrfs_path *path,
1325 					  u64 bytenr, u64 parent,
1326 					  u64 root_objectid)
1327 {
1328 	struct btrfs_key key;
1329 	int ret;
1330 
1331 	key.objectid = bytenr;
1332 	if (parent) {
1333 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1334 		key.offset = parent;
1335 	} else {
1336 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1337 		key.offset = root_objectid;
1338 	}
1339 
1340 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1341 	if (ret > 0)
1342 		ret = -ENOENT;
1343 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1344 	if (ret == -ENOENT && parent) {
1345 		btrfs_release_path(path);
1346 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1347 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1348 		if (ret > 0)
1349 			ret = -ENOENT;
1350 	}
1351 #endif
1352 	return ret;
1353 }
1354 
1355 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1356 					  struct btrfs_root *root,
1357 					  struct btrfs_path *path,
1358 					  u64 bytenr, u64 parent,
1359 					  u64 root_objectid)
1360 {
1361 	struct btrfs_key key;
1362 	int ret;
1363 
1364 	key.objectid = bytenr;
1365 	if (parent) {
1366 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1367 		key.offset = parent;
1368 	} else {
1369 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1370 		key.offset = root_objectid;
1371 	}
1372 
1373 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1374 	btrfs_release_path(path);
1375 	return ret;
1376 }
1377 
1378 static inline int extent_ref_type(u64 parent, u64 owner)
1379 {
1380 	int type;
1381 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1382 		if (parent > 0)
1383 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1384 		else
1385 			type = BTRFS_TREE_BLOCK_REF_KEY;
1386 	} else {
1387 		if (parent > 0)
1388 			type = BTRFS_SHARED_DATA_REF_KEY;
1389 		else
1390 			type = BTRFS_EXTENT_DATA_REF_KEY;
1391 	}
1392 	return type;
1393 }
1394 
1395 static int find_next_key(struct btrfs_path *path, int level,
1396 			 struct btrfs_key *key)
1397 
1398 {
1399 	for (; level < BTRFS_MAX_LEVEL; level++) {
1400 		if (!path->nodes[level])
1401 			break;
1402 		if (path->slots[level] + 1 >=
1403 		    btrfs_header_nritems(path->nodes[level]))
1404 			continue;
1405 		if (level == 0)
1406 			btrfs_item_key_to_cpu(path->nodes[level], key,
1407 					      path->slots[level] + 1);
1408 		else
1409 			btrfs_node_key_to_cpu(path->nodes[level], key,
1410 					      path->slots[level] + 1);
1411 		return 0;
1412 	}
1413 	return 1;
1414 }
1415 
1416 /*
1417  * look for inline back ref. if back ref is found, *ref_ret is set
1418  * to the address of inline back ref, and 0 is returned.
1419  *
1420  * if back ref isn't found, *ref_ret is set to the address where it
1421  * should be inserted, and -ENOENT is returned.
1422  *
1423  * if insert is true and there are too many inline back refs, the path
1424  * points to the extent item, and -EAGAIN is returned.
1425  *
1426  * NOTE: inline back refs are ordered in the same way that back ref
1427  *	 items in the tree are ordered.
1428  */
1429 static noinline_for_stack
1430 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1431 				 struct btrfs_root *root,
1432 				 struct btrfs_path *path,
1433 				 struct btrfs_extent_inline_ref **ref_ret,
1434 				 u64 bytenr, u64 num_bytes,
1435 				 u64 parent, u64 root_objectid,
1436 				 u64 owner, u64 offset, int insert)
1437 {
1438 	struct btrfs_key key;
1439 	struct extent_buffer *leaf;
1440 	struct btrfs_extent_item *ei;
1441 	struct btrfs_extent_inline_ref *iref;
1442 	u64 flags;
1443 	u64 item_size;
1444 	unsigned long ptr;
1445 	unsigned long end;
1446 	int extra_size;
1447 	int type;
1448 	int want;
1449 	int ret;
1450 	int err = 0;
1451 
1452 	key.objectid = bytenr;
1453 	key.type = BTRFS_EXTENT_ITEM_KEY;
1454 	key.offset = num_bytes;
1455 
1456 	want = extent_ref_type(parent, owner);
1457 	if (insert) {
1458 		extra_size = btrfs_extent_inline_ref_size(want);
1459 		path->keep_locks = 1;
1460 	} else
1461 		extra_size = -1;
1462 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1463 	if (ret < 0) {
1464 		err = ret;
1465 		goto out;
1466 	}
1467 	if (ret && !insert) {
1468 		err = -ENOENT;
1469 		goto out;
1470 	} else if (ret) {
1471 		err = -EIO;
1472 		WARN_ON(1);
1473 		goto out;
1474 	}
1475 
1476 	leaf = path->nodes[0];
1477 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1478 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1479 	if (item_size < sizeof(*ei)) {
1480 		if (!insert) {
1481 			err = -ENOENT;
1482 			goto out;
1483 		}
1484 		ret = convert_extent_item_v0(trans, root, path, owner,
1485 					     extra_size);
1486 		if (ret < 0) {
1487 			err = ret;
1488 			goto out;
1489 		}
1490 		leaf = path->nodes[0];
1491 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1492 	}
1493 #endif
1494 	BUG_ON(item_size < sizeof(*ei));
1495 
1496 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1497 	flags = btrfs_extent_flags(leaf, ei);
1498 
1499 	ptr = (unsigned long)(ei + 1);
1500 	end = (unsigned long)ei + item_size;
1501 
1502 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1503 		ptr += sizeof(struct btrfs_tree_block_info);
1504 		BUG_ON(ptr > end);
1505 	} else {
1506 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1507 	}
1508 
1509 	err = -ENOENT;
1510 	while (1) {
1511 		if (ptr >= end) {
1512 			WARN_ON(ptr > end);
1513 			break;
1514 		}
1515 		iref = (struct btrfs_extent_inline_ref *)ptr;
1516 		type = btrfs_extent_inline_ref_type(leaf, iref);
1517 		if (want < type)
1518 			break;
1519 		if (want > type) {
1520 			ptr += btrfs_extent_inline_ref_size(type);
1521 			continue;
1522 		}
1523 
1524 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1525 			struct btrfs_extent_data_ref *dref;
1526 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1527 			if (match_extent_data_ref(leaf, dref, root_objectid,
1528 						  owner, offset)) {
1529 				err = 0;
1530 				break;
1531 			}
1532 			if (hash_extent_data_ref_item(leaf, dref) <
1533 			    hash_extent_data_ref(root_objectid, owner, offset))
1534 				break;
1535 		} else {
1536 			u64 ref_offset;
1537 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1538 			if (parent > 0) {
1539 				if (parent == ref_offset) {
1540 					err = 0;
1541 					break;
1542 				}
1543 				if (ref_offset < parent)
1544 					break;
1545 			} else {
1546 				if (root_objectid == ref_offset) {
1547 					err = 0;
1548 					break;
1549 				}
1550 				if (ref_offset < root_objectid)
1551 					break;
1552 			}
1553 		}
1554 		ptr += btrfs_extent_inline_ref_size(type);
1555 	}
1556 	if (err == -ENOENT && insert) {
1557 		if (item_size + extra_size >=
1558 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1559 			err = -EAGAIN;
1560 			goto out;
1561 		}
1562 		/*
1563 		 * To add new inline back ref, we have to make sure
1564 		 * there is no corresponding back ref item.
1565 		 * For simplicity, we just do not add new inline back
1566 		 * ref if there is any kind of item for this block
1567 		 */
1568 		if (find_next_key(path, 0, &key) == 0 &&
1569 		    key.objectid == bytenr &&
1570 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1571 			err = -EAGAIN;
1572 			goto out;
1573 		}
1574 	}
1575 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1576 out:
1577 	if (insert) {
1578 		path->keep_locks = 0;
1579 		btrfs_unlock_up_safe(path, 1);
1580 	}
1581 	return err;
1582 }
1583 
1584 /*
1585  * helper to add new inline back ref
1586  */
1587 static noinline_for_stack
1588 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1589 				 struct btrfs_root *root,
1590 				 struct btrfs_path *path,
1591 				 struct btrfs_extent_inline_ref *iref,
1592 				 u64 parent, u64 root_objectid,
1593 				 u64 owner, u64 offset, int refs_to_add,
1594 				 struct btrfs_delayed_extent_op *extent_op)
1595 {
1596 	struct extent_buffer *leaf;
1597 	struct btrfs_extent_item *ei;
1598 	unsigned long ptr;
1599 	unsigned long end;
1600 	unsigned long item_offset;
1601 	u64 refs;
1602 	int size;
1603 	int type;
1604 
1605 	leaf = path->nodes[0];
1606 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1607 	item_offset = (unsigned long)iref - (unsigned long)ei;
1608 
1609 	type = extent_ref_type(parent, owner);
1610 	size = btrfs_extent_inline_ref_size(type);
1611 
1612 	btrfs_extend_item(trans, root, path, size);
1613 
1614 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1615 	refs = btrfs_extent_refs(leaf, ei);
1616 	refs += refs_to_add;
1617 	btrfs_set_extent_refs(leaf, ei, refs);
1618 	if (extent_op)
1619 		__run_delayed_extent_op(extent_op, leaf, ei);
1620 
1621 	ptr = (unsigned long)ei + item_offset;
1622 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1623 	if (ptr < end - size)
1624 		memmove_extent_buffer(leaf, ptr + size, ptr,
1625 				      end - size - ptr);
1626 
1627 	iref = (struct btrfs_extent_inline_ref *)ptr;
1628 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1629 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1630 		struct btrfs_extent_data_ref *dref;
1631 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1632 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1633 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1634 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1635 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1636 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1637 		struct btrfs_shared_data_ref *sref;
1638 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1639 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1640 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1641 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1642 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1643 	} else {
1644 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1645 	}
1646 	btrfs_mark_buffer_dirty(leaf);
1647 }
1648 
1649 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1650 				 struct btrfs_root *root,
1651 				 struct btrfs_path *path,
1652 				 struct btrfs_extent_inline_ref **ref_ret,
1653 				 u64 bytenr, u64 num_bytes, u64 parent,
1654 				 u64 root_objectid, u64 owner, u64 offset)
1655 {
1656 	int ret;
1657 
1658 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1659 					   bytenr, num_bytes, parent,
1660 					   root_objectid, owner, offset, 0);
1661 	if (ret != -ENOENT)
1662 		return ret;
1663 
1664 	btrfs_release_path(path);
1665 	*ref_ret = NULL;
1666 
1667 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1668 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1669 					    root_objectid);
1670 	} else {
1671 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1672 					     root_objectid, owner, offset);
1673 	}
1674 	return ret;
1675 }
1676 
1677 /*
1678  * helper to update/remove inline back ref
1679  */
1680 static noinline_for_stack
1681 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1682 				  struct btrfs_root *root,
1683 				  struct btrfs_path *path,
1684 				  struct btrfs_extent_inline_ref *iref,
1685 				  int refs_to_mod,
1686 				  struct btrfs_delayed_extent_op *extent_op)
1687 {
1688 	struct extent_buffer *leaf;
1689 	struct btrfs_extent_item *ei;
1690 	struct btrfs_extent_data_ref *dref = NULL;
1691 	struct btrfs_shared_data_ref *sref = NULL;
1692 	unsigned long ptr;
1693 	unsigned long end;
1694 	u32 item_size;
1695 	int size;
1696 	int type;
1697 	u64 refs;
1698 
1699 	leaf = path->nodes[0];
1700 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1701 	refs = btrfs_extent_refs(leaf, ei);
1702 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1703 	refs += refs_to_mod;
1704 	btrfs_set_extent_refs(leaf, ei, refs);
1705 	if (extent_op)
1706 		__run_delayed_extent_op(extent_op, leaf, ei);
1707 
1708 	type = btrfs_extent_inline_ref_type(leaf, iref);
1709 
1710 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1712 		refs = btrfs_extent_data_ref_count(leaf, dref);
1713 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1714 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1715 		refs = btrfs_shared_data_ref_count(leaf, sref);
1716 	} else {
1717 		refs = 1;
1718 		BUG_ON(refs_to_mod != -1);
1719 	}
1720 
1721 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1722 	refs += refs_to_mod;
1723 
1724 	if (refs > 0) {
1725 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1726 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1727 		else
1728 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1729 	} else {
1730 		size =  btrfs_extent_inline_ref_size(type);
1731 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1732 		ptr = (unsigned long)iref;
1733 		end = (unsigned long)ei + item_size;
1734 		if (ptr + size < end)
1735 			memmove_extent_buffer(leaf, ptr, ptr + size,
1736 					      end - ptr - size);
1737 		item_size -= size;
1738 		btrfs_truncate_item(trans, root, path, item_size, 1);
1739 	}
1740 	btrfs_mark_buffer_dirty(leaf);
1741 }
1742 
1743 static noinline_for_stack
1744 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1745 				 struct btrfs_root *root,
1746 				 struct btrfs_path *path,
1747 				 u64 bytenr, u64 num_bytes, u64 parent,
1748 				 u64 root_objectid, u64 owner,
1749 				 u64 offset, int refs_to_add,
1750 				 struct btrfs_delayed_extent_op *extent_op)
1751 {
1752 	struct btrfs_extent_inline_ref *iref;
1753 	int ret;
1754 
1755 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1756 					   bytenr, num_bytes, parent,
1757 					   root_objectid, owner, offset, 1);
1758 	if (ret == 0) {
1759 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1760 		update_inline_extent_backref(trans, root, path, iref,
1761 					     refs_to_add, extent_op);
1762 	} else if (ret == -ENOENT) {
1763 		setup_inline_extent_backref(trans, root, path, iref, parent,
1764 					    root_objectid, owner, offset,
1765 					    refs_to_add, extent_op);
1766 		ret = 0;
1767 	}
1768 	return ret;
1769 }
1770 
1771 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1772 				 struct btrfs_root *root,
1773 				 struct btrfs_path *path,
1774 				 u64 bytenr, u64 parent, u64 root_objectid,
1775 				 u64 owner, u64 offset, int refs_to_add)
1776 {
1777 	int ret;
1778 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1779 		BUG_ON(refs_to_add != 1);
1780 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1781 					    parent, root_objectid);
1782 	} else {
1783 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1784 					     parent, root_objectid,
1785 					     owner, offset, refs_to_add);
1786 	}
1787 	return ret;
1788 }
1789 
1790 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1791 				 struct btrfs_root *root,
1792 				 struct btrfs_path *path,
1793 				 struct btrfs_extent_inline_ref *iref,
1794 				 int refs_to_drop, int is_data)
1795 {
1796 	int ret = 0;
1797 
1798 	BUG_ON(!is_data && refs_to_drop != 1);
1799 	if (iref) {
1800 		update_inline_extent_backref(trans, root, path, iref,
1801 					     -refs_to_drop, NULL);
1802 	} else if (is_data) {
1803 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1804 	} else {
1805 		ret = btrfs_del_item(trans, root, path);
1806 	}
1807 	return ret;
1808 }
1809 
1810 static int btrfs_issue_discard(struct block_device *bdev,
1811 				u64 start, u64 len)
1812 {
1813 	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1814 }
1815 
1816 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1817 				u64 num_bytes, u64 *actual_bytes)
1818 {
1819 	int ret;
1820 	u64 discarded_bytes = 0;
1821 	struct btrfs_bio *bbio = NULL;
1822 
1823 
1824 	/* Tell the block device(s) that the sectors can be discarded */
1825 	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1826 			      bytenr, &num_bytes, &bbio, 0);
1827 	/* Error condition is -ENOMEM */
1828 	if (!ret) {
1829 		struct btrfs_bio_stripe *stripe = bbio->stripes;
1830 		int i;
1831 
1832 
1833 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1834 			if (!stripe->dev->can_discard)
1835 				continue;
1836 
1837 			ret = btrfs_issue_discard(stripe->dev->bdev,
1838 						  stripe->physical,
1839 						  stripe->length);
1840 			if (!ret)
1841 				discarded_bytes += stripe->length;
1842 			else if (ret != -EOPNOTSUPP)
1843 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1844 
1845 			/*
1846 			 * Just in case we get back EOPNOTSUPP for some reason,
1847 			 * just ignore the return value so we don't screw up
1848 			 * people calling discard_extent.
1849 			 */
1850 			ret = 0;
1851 		}
1852 		kfree(bbio);
1853 	}
1854 
1855 	if (actual_bytes)
1856 		*actual_bytes = discarded_bytes;
1857 
1858 
1859 	if (ret == -EOPNOTSUPP)
1860 		ret = 0;
1861 	return ret;
1862 }
1863 
1864 /* Can return -ENOMEM */
1865 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1866 			 struct btrfs_root *root,
1867 			 u64 bytenr, u64 num_bytes, u64 parent,
1868 			 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1869 {
1870 	int ret;
1871 	struct btrfs_fs_info *fs_info = root->fs_info;
1872 
1873 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1874 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
1875 
1876 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1877 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1878 					num_bytes,
1879 					parent, root_objectid, (int)owner,
1880 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1881 	} else {
1882 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1883 					num_bytes,
1884 					parent, root_objectid, owner, offset,
1885 					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1886 	}
1887 	return ret;
1888 }
1889 
1890 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1891 				  struct btrfs_root *root,
1892 				  u64 bytenr, u64 num_bytes,
1893 				  u64 parent, u64 root_objectid,
1894 				  u64 owner, u64 offset, int refs_to_add,
1895 				  struct btrfs_delayed_extent_op *extent_op)
1896 {
1897 	struct btrfs_path *path;
1898 	struct extent_buffer *leaf;
1899 	struct btrfs_extent_item *item;
1900 	u64 refs;
1901 	int ret;
1902 	int err = 0;
1903 
1904 	path = btrfs_alloc_path();
1905 	if (!path)
1906 		return -ENOMEM;
1907 
1908 	path->reada = 1;
1909 	path->leave_spinning = 1;
1910 	/* this will setup the path even if it fails to insert the back ref */
1911 	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1912 					   path, bytenr, num_bytes, parent,
1913 					   root_objectid, owner, offset,
1914 					   refs_to_add, extent_op);
1915 	if (ret == 0)
1916 		goto out;
1917 
1918 	if (ret != -EAGAIN) {
1919 		err = ret;
1920 		goto out;
1921 	}
1922 
1923 	leaf = path->nodes[0];
1924 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1925 	refs = btrfs_extent_refs(leaf, item);
1926 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1927 	if (extent_op)
1928 		__run_delayed_extent_op(extent_op, leaf, item);
1929 
1930 	btrfs_mark_buffer_dirty(leaf);
1931 	btrfs_release_path(path);
1932 
1933 	path->reada = 1;
1934 	path->leave_spinning = 1;
1935 
1936 	/* now insert the actual backref */
1937 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
1938 				    path, bytenr, parent, root_objectid,
1939 				    owner, offset, refs_to_add);
1940 	if (ret)
1941 		btrfs_abort_transaction(trans, root, ret);
1942 out:
1943 	btrfs_free_path(path);
1944 	return err;
1945 }
1946 
1947 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1948 				struct btrfs_root *root,
1949 				struct btrfs_delayed_ref_node *node,
1950 				struct btrfs_delayed_extent_op *extent_op,
1951 				int insert_reserved)
1952 {
1953 	int ret = 0;
1954 	struct btrfs_delayed_data_ref *ref;
1955 	struct btrfs_key ins;
1956 	u64 parent = 0;
1957 	u64 ref_root = 0;
1958 	u64 flags = 0;
1959 
1960 	ins.objectid = node->bytenr;
1961 	ins.offset = node->num_bytes;
1962 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1963 
1964 	ref = btrfs_delayed_node_to_data_ref(node);
1965 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1966 		parent = ref->parent;
1967 	else
1968 		ref_root = ref->root;
1969 
1970 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1971 		if (extent_op) {
1972 			BUG_ON(extent_op->update_key);
1973 			flags |= extent_op->flags_to_set;
1974 		}
1975 		ret = alloc_reserved_file_extent(trans, root,
1976 						 parent, ref_root, flags,
1977 						 ref->objectid, ref->offset,
1978 						 &ins, node->ref_mod);
1979 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1980 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1981 					     node->num_bytes, parent,
1982 					     ref_root, ref->objectid,
1983 					     ref->offset, node->ref_mod,
1984 					     extent_op);
1985 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1986 		ret = __btrfs_free_extent(trans, root, node->bytenr,
1987 					  node->num_bytes, parent,
1988 					  ref_root, ref->objectid,
1989 					  ref->offset, node->ref_mod,
1990 					  extent_op);
1991 	} else {
1992 		BUG();
1993 	}
1994 	return ret;
1995 }
1996 
1997 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1998 				    struct extent_buffer *leaf,
1999 				    struct btrfs_extent_item *ei)
2000 {
2001 	u64 flags = btrfs_extent_flags(leaf, ei);
2002 	if (extent_op->update_flags) {
2003 		flags |= extent_op->flags_to_set;
2004 		btrfs_set_extent_flags(leaf, ei, flags);
2005 	}
2006 
2007 	if (extent_op->update_key) {
2008 		struct btrfs_tree_block_info *bi;
2009 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2010 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2011 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2012 	}
2013 }
2014 
2015 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2016 				 struct btrfs_root *root,
2017 				 struct btrfs_delayed_ref_node *node,
2018 				 struct btrfs_delayed_extent_op *extent_op)
2019 {
2020 	struct btrfs_key key;
2021 	struct btrfs_path *path;
2022 	struct btrfs_extent_item *ei;
2023 	struct extent_buffer *leaf;
2024 	u32 item_size;
2025 	int ret;
2026 	int err = 0;
2027 
2028 	if (trans->aborted)
2029 		return 0;
2030 
2031 	path = btrfs_alloc_path();
2032 	if (!path)
2033 		return -ENOMEM;
2034 
2035 	key.objectid = node->bytenr;
2036 	key.type = BTRFS_EXTENT_ITEM_KEY;
2037 	key.offset = node->num_bytes;
2038 
2039 	path->reada = 1;
2040 	path->leave_spinning = 1;
2041 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2042 				path, 0, 1);
2043 	if (ret < 0) {
2044 		err = ret;
2045 		goto out;
2046 	}
2047 	if (ret > 0) {
2048 		err = -EIO;
2049 		goto out;
2050 	}
2051 
2052 	leaf = path->nodes[0];
2053 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2054 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2055 	if (item_size < sizeof(*ei)) {
2056 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2057 					     path, (u64)-1, 0);
2058 		if (ret < 0) {
2059 			err = ret;
2060 			goto out;
2061 		}
2062 		leaf = path->nodes[0];
2063 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2064 	}
2065 #endif
2066 	BUG_ON(item_size < sizeof(*ei));
2067 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2068 	__run_delayed_extent_op(extent_op, leaf, ei);
2069 
2070 	btrfs_mark_buffer_dirty(leaf);
2071 out:
2072 	btrfs_free_path(path);
2073 	return err;
2074 }
2075 
2076 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2077 				struct btrfs_root *root,
2078 				struct btrfs_delayed_ref_node *node,
2079 				struct btrfs_delayed_extent_op *extent_op,
2080 				int insert_reserved)
2081 {
2082 	int ret = 0;
2083 	struct btrfs_delayed_tree_ref *ref;
2084 	struct btrfs_key ins;
2085 	u64 parent = 0;
2086 	u64 ref_root = 0;
2087 
2088 	ins.objectid = node->bytenr;
2089 	ins.offset = node->num_bytes;
2090 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2091 
2092 	ref = btrfs_delayed_node_to_tree_ref(node);
2093 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2094 		parent = ref->parent;
2095 	else
2096 		ref_root = ref->root;
2097 
2098 	BUG_ON(node->ref_mod != 1);
2099 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2100 		BUG_ON(!extent_op || !extent_op->update_flags ||
2101 		       !extent_op->update_key);
2102 		ret = alloc_reserved_tree_block(trans, root,
2103 						parent, ref_root,
2104 						extent_op->flags_to_set,
2105 						&extent_op->key,
2106 						ref->level, &ins);
2107 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2108 		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2109 					     node->num_bytes, parent, ref_root,
2110 					     ref->level, 0, 1, extent_op);
2111 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2112 		ret = __btrfs_free_extent(trans, root, node->bytenr,
2113 					  node->num_bytes, parent, ref_root,
2114 					  ref->level, 0, 1, extent_op);
2115 	} else {
2116 		BUG();
2117 	}
2118 	return ret;
2119 }
2120 
2121 /* helper function to actually process a single delayed ref entry */
2122 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2123 			       struct btrfs_root *root,
2124 			       struct btrfs_delayed_ref_node *node,
2125 			       struct btrfs_delayed_extent_op *extent_op,
2126 			       int insert_reserved)
2127 {
2128 	int ret = 0;
2129 
2130 	if (trans->aborted)
2131 		return 0;
2132 
2133 	if (btrfs_delayed_ref_is_head(node)) {
2134 		struct btrfs_delayed_ref_head *head;
2135 		/*
2136 		 * we've hit the end of the chain and we were supposed
2137 		 * to insert this extent into the tree.  But, it got
2138 		 * deleted before we ever needed to insert it, so all
2139 		 * we have to do is clean up the accounting
2140 		 */
2141 		BUG_ON(extent_op);
2142 		head = btrfs_delayed_node_to_head(node);
2143 		if (insert_reserved) {
2144 			btrfs_pin_extent(root, node->bytenr,
2145 					 node->num_bytes, 1);
2146 			if (head->is_data) {
2147 				ret = btrfs_del_csums(trans, root,
2148 						      node->bytenr,
2149 						      node->num_bytes);
2150 			}
2151 		}
2152 		return ret;
2153 	}
2154 
2155 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2156 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2158 					   insert_reserved);
2159 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2160 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2161 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2162 					   insert_reserved);
2163 	else
2164 		BUG();
2165 	return ret;
2166 }
2167 
2168 static noinline struct btrfs_delayed_ref_node *
2169 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2170 {
2171 	struct rb_node *node;
2172 	struct btrfs_delayed_ref_node *ref;
2173 	int action = BTRFS_ADD_DELAYED_REF;
2174 again:
2175 	/*
2176 	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2177 	 * this prevents ref count from going down to zero when
2178 	 * there still are pending delayed ref.
2179 	 */
2180 	node = rb_prev(&head->node.rb_node);
2181 	while (1) {
2182 		if (!node)
2183 			break;
2184 		ref = rb_entry(node, struct btrfs_delayed_ref_node,
2185 				rb_node);
2186 		if (ref->bytenr != head->node.bytenr)
2187 			break;
2188 		if (ref->action == action)
2189 			return ref;
2190 		node = rb_prev(node);
2191 	}
2192 	if (action == BTRFS_ADD_DELAYED_REF) {
2193 		action = BTRFS_DROP_DELAYED_REF;
2194 		goto again;
2195 	}
2196 	return NULL;
2197 }
2198 
2199 /*
2200  * Returns 0 on success or if called with an already aborted transaction.
2201  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2202  */
2203 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2204 				       struct btrfs_root *root,
2205 				       struct list_head *cluster)
2206 {
2207 	struct btrfs_delayed_ref_root *delayed_refs;
2208 	struct btrfs_delayed_ref_node *ref;
2209 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2210 	struct btrfs_delayed_extent_op *extent_op;
2211 	struct btrfs_fs_info *fs_info = root->fs_info;
2212 	int ret;
2213 	int count = 0;
2214 	int must_insert_reserved = 0;
2215 
2216 	delayed_refs = &trans->transaction->delayed_refs;
2217 	while (1) {
2218 		if (!locked_ref) {
2219 			/* pick a new head ref from the cluster list */
2220 			if (list_empty(cluster))
2221 				break;
2222 
2223 			locked_ref = list_entry(cluster->next,
2224 				     struct btrfs_delayed_ref_head, cluster);
2225 
2226 			/* grab the lock that says we are going to process
2227 			 * all the refs for this head */
2228 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2229 
2230 			/*
2231 			 * we may have dropped the spin lock to get the head
2232 			 * mutex lock, and that might have given someone else
2233 			 * time to free the head.  If that's true, it has been
2234 			 * removed from our list and we can move on.
2235 			 */
2236 			if (ret == -EAGAIN) {
2237 				locked_ref = NULL;
2238 				count++;
2239 				continue;
2240 			}
2241 		}
2242 
2243 		/*
2244 		 * We need to try and merge add/drops of the same ref since we
2245 		 * can run into issues with relocate dropping the implicit ref
2246 		 * and then it being added back again before the drop can
2247 		 * finish.  If we merged anything we need to re-loop so we can
2248 		 * get a good ref.
2249 		 */
2250 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2251 					 locked_ref);
2252 
2253 		/*
2254 		 * locked_ref is the head node, so we have to go one
2255 		 * node back for any delayed ref updates
2256 		 */
2257 		ref = select_delayed_ref(locked_ref);
2258 
2259 		if (ref && ref->seq &&
2260 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2261 			/*
2262 			 * there are still refs with lower seq numbers in the
2263 			 * process of being added. Don't run this ref yet.
2264 			 */
2265 			list_del_init(&locked_ref->cluster);
2266 			btrfs_delayed_ref_unlock(locked_ref);
2267 			locked_ref = NULL;
2268 			delayed_refs->num_heads_ready++;
2269 			spin_unlock(&delayed_refs->lock);
2270 			cond_resched();
2271 			spin_lock(&delayed_refs->lock);
2272 			continue;
2273 		}
2274 
2275 		/*
2276 		 * record the must insert reserved flag before we
2277 		 * drop the spin lock.
2278 		 */
2279 		must_insert_reserved = locked_ref->must_insert_reserved;
2280 		locked_ref->must_insert_reserved = 0;
2281 
2282 		extent_op = locked_ref->extent_op;
2283 		locked_ref->extent_op = NULL;
2284 
2285 		if (!ref) {
2286 			/* All delayed refs have been processed, Go ahead
2287 			 * and send the head node to run_one_delayed_ref,
2288 			 * so that any accounting fixes can happen
2289 			 */
2290 			ref = &locked_ref->node;
2291 
2292 			if (extent_op && must_insert_reserved) {
2293 				btrfs_free_delayed_extent_op(extent_op);
2294 				extent_op = NULL;
2295 			}
2296 
2297 			if (extent_op) {
2298 				spin_unlock(&delayed_refs->lock);
2299 
2300 				ret = run_delayed_extent_op(trans, root,
2301 							    ref, extent_op);
2302 				btrfs_free_delayed_extent_op(extent_op);
2303 
2304 				if (ret) {
2305 					printk(KERN_DEBUG
2306 					       "btrfs: run_delayed_extent_op "
2307 					       "returned %d\n", ret);
2308 					spin_lock(&delayed_refs->lock);
2309 					btrfs_delayed_ref_unlock(locked_ref);
2310 					return ret;
2311 				}
2312 
2313 				goto next;
2314 			}
2315 		}
2316 
2317 		ref->in_tree = 0;
2318 		rb_erase(&ref->rb_node, &delayed_refs->root);
2319 		delayed_refs->num_entries--;
2320 		if (!btrfs_delayed_ref_is_head(ref)) {
2321 			/*
2322 			 * when we play the delayed ref, also correct the
2323 			 * ref_mod on head
2324 			 */
2325 			switch (ref->action) {
2326 			case BTRFS_ADD_DELAYED_REF:
2327 			case BTRFS_ADD_DELAYED_EXTENT:
2328 				locked_ref->node.ref_mod -= ref->ref_mod;
2329 				break;
2330 			case BTRFS_DROP_DELAYED_REF:
2331 				locked_ref->node.ref_mod += ref->ref_mod;
2332 				break;
2333 			default:
2334 				WARN_ON(1);
2335 			}
2336 		}
2337 		spin_unlock(&delayed_refs->lock);
2338 
2339 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2340 					  must_insert_reserved);
2341 
2342 		btrfs_free_delayed_extent_op(extent_op);
2343 		if (ret) {
2344 			btrfs_delayed_ref_unlock(locked_ref);
2345 			btrfs_put_delayed_ref(ref);
2346 			printk(KERN_DEBUG
2347 			       "btrfs: run_one_delayed_ref returned %d\n", ret);
2348 			spin_lock(&delayed_refs->lock);
2349 			return ret;
2350 		}
2351 
2352 		/*
2353 		 * If this node is a head, that means all the refs in this head
2354 		 * have been dealt with, and we will pick the next head to deal
2355 		 * with, so we must unlock the head and drop it from the cluster
2356 		 * list before we release it.
2357 		 */
2358 		if (btrfs_delayed_ref_is_head(ref)) {
2359 			list_del_init(&locked_ref->cluster);
2360 			btrfs_delayed_ref_unlock(locked_ref);
2361 			locked_ref = NULL;
2362 		}
2363 		btrfs_put_delayed_ref(ref);
2364 		count++;
2365 next:
2366 		cond_resched();
2367 		spin_lock(&delayed_refs->lock);
2368 	}
2369 	return count;
2370 }
2371 
2372 #ifdef SCRAMBLE_DELAYED_REFS
2373 /*
2374  * Normally delayed refs get processed in ascending bytenr order. This
2375  * correlates in most cases to the order added. To expose dependencies on this
2376  * order, we start to process the tree in the middle instead of the beginning
2377  */
2378 static u64 find_middle(struct rb_root *root)
2379 {
2380 	struct rb_node *n = root->rb_node;
2381 	struct btrfs_delayed_ref_node *entry;
2382 	int alt = 1;
2383 	u64 middle;
2384 	u64 first = 0, last = 0;
2385 
2386 	n = rb_first(root);
2387 	if (n) {
2388 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2389 		first = entry->bytenr;
2390 	}
2391 	n = rb_last(root);
2392 	if (n) {
2393 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2394 		last = entry->bytenr;
2395 	}
2396 	n = root->rb_node;
2397 
2398 	while (n) {
2399 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2400 		WARN_ON(!entry->in_tree);
2401 
2402 		middle = entry->bytenr;
2403 
2404 		if (alt)
2405 			n = n->rb_left;
2406 		else
2407 			n = n->rb_right;
2408 
2409 		alt = 1 - alt;
2410 	}
2411 	return middle;
2412 }
2413 #endif
2414 
2415 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2416 					 struct btrfs_fs_info *fs_info)
2417 {
2418 	struct qgroup_update *qgroup_update;
2419 	int ret = 0;
2420 
2421 	if (list_empty(&trans->qgroup_ref_list) !=
2422 	    !trans->delayed_ref_elem.seq) {
2423 		/* list without seq or seq without list */
2424 		printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2425 			list_empty(&trans->qgroup_ref_list) ? "" : " not",
2426 			trans->delayed_ref_elem.seq);
2427 		BUG();
2428 	}
2429 
2430 	if (!trans->delayed_ref_elem.seq)
2431 		return 0;
2432 
2433 	while (!list_empty(&trans->qgroup_ref_list)) {
2434 		qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2435 						 struct qgroup_update, list);
2436 		list_del(&qgroup_update->list);
2437 		if (!ret)
2438 			ret = btrfs_qgroup_account_ref(
2439 					trans, fs_info, qgroup_update->node,
2440 					qgroup_update->extent_op);
2441 		kfree(qgroup_update);
2442 	}
2443 
2444 	btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2445 
2446 	return ret;
2447 }
2448 
2449 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2450 		      int count)
2451 {
2452 	int val = atomic_read(&delayed_refs->ref_seq);
2453 
2454 	if (val < seq || val >= seq + count)
2455 		return 1;
2456 	return 0;
2457 }
2458 
2459 /*
2460  * this starts processing the delayed reference count updates and
2461  * extent insertions we have queued up so far.  count can be
2462  * 0, which means to process everything in the tree at the start
2463  * of the run (but not newly added entries), or it can be some target
2464  * number you'd like to process.
2465  *
2466  * Returns 0 on success or if called with an aborted transaction
2467  * Returns <0 on error and aborts the transaction
2468  */
2469 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2470 			   struct btrfs_root *root, unsigned long count)
2471 {
2472 	struct rb_node *node;
2473 	struct btrfs_delayed_ref_root *delayed_refs;
2474 	struct btrfs_delayed_ref_node *ref;
2475 	struct list_head cluster;
2476 	int ret;
2477 	u64 delayed_start;
2478 	int run_all = count == (unsigned long)-1;
2479 	int run_most = 0;
2480 	int loops;
2481 
2482 	/* We'll clean this up in btrfs_cleanup_transaction */
2483 	if (trans->aborted)
2484 		return 0;
2485 
2486 	if (root == root->fs_info->extent_root)
2487 		root = root->fs_info->tree_root;
2488 
2489 	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2490 
2491 	delayed_refs = &trans->transaction->delayed_refs;
2492 	INIT_LIST_HEAD(&cluster);
2493 	if (count == 0) {
2494 		count = delayed_refs->num_entries * 2;
2495 		run_most = 1;
2496 	}
2497 
2498 	if (!run_all && !run_most) {
2499 		int old;
2500 		int seq = atomic_read(&delayed_refs->ref_seq);
2501 
2502 progress:
2503 		old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2504 		if (old) {
2505 			DEFINE_WAIT(__wait);
2506 			if (delayed_refs->num_entries < 16348)
2507 				return 0;
2508 
2509 			prepare_to_wait(&delayed_refs->wait, &__wait,
2510 					TASK_UNINTERRUPTIBLE);
2511 
2512 			old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2513 			if (old) {
2514 				schedule();
2515 				finish_wait(&delayed_refs->wait, &__wait);
2516 
2517 				if (!refs_newer(delayed_refs, seq, 256))
2518 					goto progress;
2519 				else
2520 					return 0;
2521 			} else {
2522 				finish_wait(&delayed_refs->wait, &__wait);
2523 				goto again;
2524 			}
2525 		}
2526 
2527 	} else {
2528 		atomic_inc(&delayed_refs->procs_running_refs);
2529 	}
2530 
2531 again:
2532 	loops = 0;
2533 	spin_lock(&delayed_refs->lock);
2534 
2535 #ifdef SCRAMBLE_DELAYED_REFS
2536 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2537 #endif
2538 
2539 	while (1) {
2540 		if (!(run_all || run_most) &&
2541 		    delayed_refs->num_heads_ready < 64)
2542 			break;
2543 
2544 		/*
2545 		 * go find something we can process in the rbtree.  We start at
2546 		 * the beginning of the tree, and then build a cluster
2547 		 * of refs to process starting at the first one we are able to
2548 		 * lock
2549 		 */
2550 		delayed_start = delayed_refs->run_delayed_start;
2551 		ret = btrfs_find_ref_cluster(trans, &cluster,
2552 					     delayed_refs->run_delayed_start);
2553 		if (ret)
2554 			break;
2555 
2556 		ret = run_clustered_refs(trans, root, &cluster);
2557 		if (ret < 0) {
2558 			btrfs_release_ref_cluster(&cluster);
2559 			spin_unlock(&delayed_refs->lock);
2560 			btrfs_abort_transaction(trans, root, ret);
2561 			atomic_dec(&delayed_refs->procs_running_refs);
2562 			return ret;
2563 		}
2564 
2565 		atomic_add(ret, &delayed_refs->ref_seq);
2566 
2567 		count -= min_t(unsigned long, ret, count);
2568 
2569 		if (count == 0)
2570 			break;
2571 
2572 		if (delayed_start >= delayed_refs->run_delayed_start) {
2573 			if (loops == 0) {
2574 				/*
2575 				 * btrfs_find_ref_cluster looped. let's do one
2576 				 * more cycle. if we don't run any delayed ref
2577 				 * during that cycle (because we can't because
2578 				 * all of them are blocked), bail out.
2579 				 */
2580 				loops = 1;
2581 			} else {
2582 				/*
2583 				 * no runnable refs left, stop trying
2584 				 */
2585 				BUG_ON(run_all);
2586 				break;
2587 			}
2588 		}
2589 		if (ret) {
2590 			/* refs were run, let's reset staleness detection */
2591 			loops = 0;
2592 		}
2593 	}
2594 
2595 	if (run_all) {
2596 		if (!list_empty(&trans->new_bgs)) {
2597 			spin_unlock(&delayed_refs->lock);
2598 			btrfs_create_pending_block_groups(trans, root);
2599 			spin_lock(&delayed_refs->lock);
2600 		}
2601 
2602 		node = rb_first(&delayed_refs->root);
2603 		if (!node)
2604 			goto out;
2605 		count = (unsigned long)-1;
2606 
2607 		while (node) {
2608 			ref = rb_entry(node, struct btrfs_delayed_ref_node,
2609 				       rb_node);
2610 			if (btrfs_delayed_ref_is_head(ref)) {
2611 				struct btrfs_delayed_ref_head *head;
2612 
2613 				head = btrfs_delayed_node_to_head(ref);
2614 				atomic_inc(&ref->refs);
2615 
2616 				spin_unlock(&delayed_refs->lock);
2617 				/*
2618 				 * Mutex was contended, block until it's
2619 				 * released and try again
2620 				 */
2621 				mutex_lock(&head->mutex);
2622 				mutex_unlock(&head->mutex);
2623 
2624 				btrfs_put_delayed_ref(ref);
2625 				cond_resched();
2626 				goto again;
2627 			}
2628 			node = rb_next(node);
2629 		}
2630 		spin_unlock(&delayed_refs->lock);
2631 		schedule_timeout(1);
2632 		goto again;
2633 	}
2634 out:
2635 	atomic_dec(&delayed_refs->procs_running_refs);
2636 	smp_mb();
2637 	if (waitqueue_active(&delayed_refs->wait))
2638 		wake_up(&delayed_refs->wait);
2639 
2640 	spin_unlock(&delayed_refs->lock);
2641 	assert_qgroups_uptodate(trans);
2642 	return 0;
2643 }
2644 
2645 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2646 				struct btrfs_root *root,
2647 				u64 bytenr, u64 num_bytes, u64 flags,
2648 				int is_data)
2649 {
2650 	struct btrfs_delayed_extent_op *extent_op;
2651 	int ret;
2652 
2653 	extent_op = btrfs_alloc_delayed_extent_op();
2654 	if (!extent_op)
2655 		return -ENOMEM;
2656 
2657 	extent_op->flags_to_set = flags;
2658 	extent_op->update_flags = 1;
2659 	extent_op->update_key = 0;
2660 	extent_op->is_data = is_data ? 1 : 0;
2661 
2662 	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2663 					  num_bytes, extent_op);
2664 	if (ret)
2665 		btrfs_free_delayed_extent_op(extent_op);
2666 	return ret;
2667 }
2668 
2669 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2670 				      struct btrfs_root *root,
2671 				      struct btrfs_path *path,
2672 				      u64 objectid, u64 offset, u64 bytenr)
2673 {
2674 	struct btrfs_delayed_ref_head *head;
2675 	struct btrfs_delayed_ref_node *ref;
2676 	struct btrfs_delayed_data_ref *data_ref;
2677 	struct btrfs_delayed_ref_root *delayed_refs;
2678 	struct rb_node *node;
2679 	int ret = 0;
2680 
2681 	ret = -ENOENT;
2682 	delayed_refs = &trans->transaction->delayed_refs;
2683 	spin_lock(&delayed_refs->lock);
2684 	head = btrfs_find_delayed_ref_head(trans, bytenr);
2685 	if (!head)
2686 		goto out;
2687 
2688 	if (!mutex_trylock(&head->mutex)) {
2689 		atomic_inc(&head->node.refs);
2690 		spin_unlock(&delayed_refs->lock);
2691 
2692 		btrfs_release_path(path);
2693 
2694 		/*
2695 		 * Mutex was contended, block until it's released and let
2696 		 * caller try again
2697 		 */
2698 		mutex_lock(&head->mutex);
2699 		mutex_unlock(&head->mutex);
2700 		btrfs_put_delayed_ref(&head->node);
2701 		return -EAGAIN;
2702 	}
2703 
2704 	node = rb_prev(&head->node.rb_node);
2705 	if (!node)
2706 		goto out_unlock;
2707 
2708 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2709 
2710 	if (ref->bytenr != bytenr)
2711 		goto out_unlock;
2712 
2713 	ret = 1;
2714 	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2715 		goto out_unlock;
2716 
2717 	data_ref = btrfs_delayed_node_to_data_ref(ref);
2718 
2719 	node = rb_prev(node);
2720 	if (node) {
2721 		int seq = ref->seq;
2722 
2723 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2724 		if (ref->bytenr == bytenr && ref->seq == seq)
2725 			goto out_unlock;
2726 	}
2727 
2728 	if (data_ref->root != root->root_key.objectid ||
2729 	    data_ref->objectid != objectid || data_ref->offset != offset)
2730 		goto out_unlock;
2731 
2732 	ret = 0;
2733 out_unlock:
2734 	mutex_unlock(&head->mutex);
2735 out:
2736 	spin_unlock(&delayed_refs->lock);
2737 	return ret;
2738 }
2739 
2740 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2741 					struct btrfs_root *root,
2742 					struct btrfs_path *path,
2743 					u64 objectid, u64 offset, u64 bytenr)
2744 {
2745 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2746 	struct extent_buffer *leaf;
2747 	struct btrfs_extent_data_ref *ref;
2748 	struct btrfs_extent_inline_ref *iref;
2749 	struct btrfs_extent_item *ei;
2750 	struct btrfs_key key;
2751 	u32 item_size;
2752 	int ret;
2753 
2754 	key.objectid = bytenr;
2755 	key.offset = (u64)-1;
2756 	key.type = BTRFS_EXTENT_ITEM_KEY;
2757 
2758 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2759 	if (ret < 0)
2760 		goto out;
2761 	BUG_ON(ret == 0); /* Corruption */
2762 
2763 	ret = -ENOENT;
2764 	if (path->slots[0] == 0)
2765 		goto out;
2766 
2767 	path->slots[0]--;
2768 	leaf = path->nodes[0];
2769 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2770 
2771 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2772 		goto out;
2773 
2774 	ret = 1;
2775 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2776 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2777 	if (item_size < sizeof(*ei)) {
2778 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2779 		goto out;
2780 	}
2781 #endif
2782 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2783 
2784 	if (item_size != sizeof(*ei) +
2785 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2786 		goto out;
2787 
2788 	if (btrfs_extent_generation(leaf, ei) <=
2789 	    btrfs_root_last_snapshot(&root->root_item))
2790 		goto out;
2791 
2792 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2793 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
2794 	    BTRFS_EXTENT_DATA_REF_KEY)
2795 		goto out;
2796 
2797 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2798 	if (btrfs_extent_refs(leaf, ei) !=
2799 	    btrfs_extent_data_ref_count(leaf, ref) ||
2800 	    btrfs_extent_data_ref_root(leaf, ref) !=
2801 	    root->root_key.objectid ||
2802 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2803 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2804 		goto out;
2805 
2806 	ret = 0;
2807 out:
2808 	return ret;
2809 }
2810 
2811 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2812 			  struct btrfs_root *root,
2813 			  u64 objectid, u64 offset, u64 bytenr)
2814 {
2815 	struct btrfs_path *path;
2816 	int ret;
2817 	int ret2;
2818 
2819 	path = btrfs_alloc_path();
2820 	if (!path)
2821 		return -ENOENT;
2822 
2823 	do {
2824 		ret = check_committed_ref(trans, root, path, objectid,
2825 					  offset, bytenr);
2826 		if (ret && ret != -ENOENT)
2827 			goto out;
2828 
2829 		ret2 = check_delayed_ref(trans, root, path, objectid,
2830 					 offset, bytenr);
2831 	} while (ret2 == -EAGAIN);
2832 
2833 	if (ret2 && ret2 != -ENOENT) {
2834 		ret = ret2;
2835 		goto out;
2836 	}
2837 
2838 	if (ret != -ENOENT || ret2 != -ENOENT)
2839 		ret = 0;
2840 out:
2841 	btrfs_free_path(path);
2842 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2843 		WARN_ON(ret > 0);
2844 	return ret;
2845 }
2846 
2847 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2848 			   struct btrfs_root *root,
2849 			   struct extent_buffer *buf,
2850 			   int full_backref, int inc, int for_cow)
2851 {
2852 	u64 bytenr;
2853 	u64 num_bytes;
2854 	u64 parent;
2855 	u64 ref_root;
2856 	u32 nritems;
2857 	struct btrfs_key key;
2858 	struct btrfs_file_extent_item *fi;
2859 	int i;
2860 	int level;
2861 	int ret = 0;
2862 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2863 			    u64, u64, u64, u64, u64, u64, int);
2864 
2865 	ref_root = btrfs_header_owner(buf);
2866 	nritems = btrfs_header_nritems(buf);
2867 	level = btrfs_header_level(buf);
2868 
2869 	if (!root->ref_cows && level == 0)
2870 		return 0;
2871 
2872 	if (inc)
2873 		process_func = btrfs_inc_extent_ref;
2874 	else
2875 		process_func = btrfs_free_extent;
2876 
2877 	if (full_backref)
2878 		parent = buf->start;
2879 	else
2880 		parent = 0;
2881 
2882 	for (i = 0; i < nritems; i++) {
2883 		if (level == 0) {
2884 			btrfs_item_key_to_cpu(buf, &key, i);
2885 			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2886 				continue;
2887 			fi = btrfs_item_ptr(buf, i,
2888 					    struct btrfs_file_extent_item);
2889 			if (btrfs_file_extent_type(buf, fi) ==
2890 			    BTRFS_FILE_EXTENT_INLINE)
2891 				continue;
2892 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2893 			if (bytenr == 0)
2894 				continue;
2895 
2896 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2897 			key.offset -= btrfs_file_extent_offset(buf, fi);
2898 			ret = process_func(trans, root, bytenr, num_bytes,
2899 					   parent, ref_root, key.objectid,
2900 					   key.offset, for_cow);
2901 			if (ret)
2902 				goto fail;
2903 		} else {
2904 			bytenr = btrfs_node_blockptr(buf, i);
2905 			num_bytes = btrfs_level_size(root, level - 1);
2906 			ret = process_func(trans, root, bytenr, num_bytes,
2907 					   parent, ref_root, level - 1, 0,
2908 					   for_cow);
2909 			if (ret)
2910 				goto fail;
2911 		}
2912 	}
2913 	return 0;
2914 fail:
2915 	return ret;
2916 }
2917 
2918 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2919 		  struct extent_buffer *buf, int full_backref, int for_cow)
2920 {
2921 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2922 }
2923 
2924 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2925 		  struct extent_buffer *buf, int full_backref, int for_cow)
2926 {
2927 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2928 }
2929 
2930 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2931 				 struct btrfs_root *root,
2932 				 struct btrfs_path *path,
2933 				 struct btrfs_block_group_cache *cache)
2934 {
2935 	int ret;
2936 	struct btrfs_root *extent_root = root->fs_info->extent_root;
2937 	unsigned long bi;
2938 	struct extent_buffer *leaf;
2939 
2940 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2941 	if (ret < 0)
2942 		goto fail;
2943 	BUG_ON(ret); /* Corruption */
2944 
2945 	leaf = path->nodes[0];
2946 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2947 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2948 	btrfs_mark_buffer_dirty(leaf);
2949 	btrfs_release_path(path);
2950 fail:
2951 	if (ret) {
2952 		btrfs_abort_transaction(trans, root, ret);
2953 		return ret;
2954 	}
2955 	return 0;
2956 
2957 }
2958 
2959 static struct btrfs_block_group_cache *
2960 next_block_group(struct btrfs_root *root,
2961 		 struct btrfs_block_group_cache *cache)
2962 {
2963 	struct rb_node *node;
2964 	spin_lock(&root->fs_info->block_group_cache_lock);
2965 	node = rb_next(&cache->cache_node);
2966 	btrfs_put_block_group(cache);
2967 	if (node) {
2968 		cache = rb_entry(node, struct btrfs_block_group_cache,
2969 				 cache_node);
2970 		btrfs_get_block_group(cache);
2971 	} else
2972 		cache = NULL;
2973 	spin_unlock(&root->fs_info->block_group_cache_lock);
2974 	return cache;
2975 }
2976 
2977 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2978 			    struct btrfs_trans_handle *trans,
2979 			    struct btrfs_path *path)
2980 {
2981 	struct btrfs_root *root = block_group->fs_info->tree_root;
2982 	struct inode *inode = NULL;
2983 	u64 alloc_hint = 0;
2984 	int dcs = BTRFS_DC_ERROR;
2985 	int num_pages = 0;
2986 	int retries = 0;
2987 	int ret = 0;
2988 
2989 	/*
2990 	 * If this block group is smaller than 100 megs don't bother caching the
2991 	 * block group.
2992 	 */
2993 	if (block_group->key.offset < (100 * 1024 * 1024)) {
2994 		spin_lock(&block_group->lock);
2995 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2996 		spin_unlock(&block_group->lock);
2997 		return 0;
2998 	}
2999 
3000 again:
3001 	inode = lookup_free_space_inode(root, block_group, path);
3002 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3003 		ret = PTR_ERR(inode);
3004 		btrfs_release_path(path);
3005 		goto out;
3006 	}
3007 
3008 	if (IS_ERR(inode)) {
3009 		BUG_ON(retries);
3010 		retries++;
3011 
3012 		if (block_group->ro)
3013 			goto out_free;
3014 
3015 		ret = create_free_space_inode(root, trans, block_group, path);
3016 		if (ret)
3017 			goto out_free;
3018 		goto again;
3019 	}
3020 
3021 	/* We've already setup this transaction, go ahead and exit */
3022 	if (block_group->cache_generation == trans->transid &&
3023 	    i_size_read(inode)) {
3024 		dcs = BTRFS_DC_SETUP;
3025 		goto out_put;
3026 	}
3027 
3028 	/*
3029 	 * We want to set the generation to 0, that way if anything goes wrong
3030 	 * from here on out we know not to trust this cache when we load up next
3031 	 * time.
3032 	 */
3033 	BTRFS_I(inode)->generation = 0;
3034 	ret = btrfs_update_inode(trans, root, inode);
3035 	WARN_ON(ret);
3036 
3037 	if (i_size_read(inode) > 0) {
3038 		ret = btrfs_truncate_free_space_cache(root, trans, path,
3039 						      inode);
3040 		if (ret)
3041 			goto out_put;
3042 	}
3043 
3044 	spin_lock(&block_group->lock);
3045 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3046 	    !btrfs_test_opt(root, SPACE_CACHE)) {
3047 		/*
3048 		 * don't bother trying to write stuff out _if_
3049 		 * a) we're not cached,
3050 		 * b) we're with nospace_cache mount option.
3051 		 */
3052 		dcs = BTRFS_DC_WRITTEN;
3053 		spin_unlock(&block_group->lock);
3054 		goto out_put;
3055 	}
3056 	spin_unlock(&block_group->lock);
3057 
3058 	/*
3059 	 * Try to preallocate enough space based on how big the block group is.
3060 	 * Keep in mind this has to include any pinned space which could end up
3061 	 * taking up quite a bit since it's not folded into the other space
3062 	 * cache.
3063 	 */
3064 	num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3065 	if (!num_pages)
3066 		num_pages = 1;
3067 
3068 	num_pages *= 16;
3069 	num_pages *= PAGE_CACHE_SIZE;
3070 
3071 	ret = btrfs_check_data_free_space(inode, num_pages);
3072 	if (ret)
3073 		goto out_put;
3074 
3075 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3076 					      num_pages, num_pages,
3077 					      &alloc_hint);
3078 	if (!ret)
3079 		dcs = BTRFS_DC_SETUP;
3080 	btrfs_free_reserved_data_space(inode, num_pages);
3081 
3082 out_put:
3083 	iput(inode);
3084 out_free:
3085 	btrfs_release_path(path);
3086 out:
3087 	spin_lock(&block_group->lock);
3088 	if (!ret && dcs == BTRFS_DC_SETUP)
3089 		block_group->cache_generation = trans->transid;
3090 	block_group->disk_cache_state = dcs;
3091 	spin_unlock(&block_group->lock);
3092 
3093 	return ret;
3094 }
3095 
3096 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3097 				   struct btrfs_root *root)
3098 {
3099 	struct btrfs_block_group_cache *cache;
3100 	int err = 0;
3101 	struct btrfs_path *path;
3102 	u64 last = 0;
3103 
3104 	path = btrfs_alloc_path();
3105 	if (!path)
3106 		return -ENOMEM;
3107 
3108 again:
3109 	while (1) {
3110 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3111 		while (cache) {
3112 			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3113 				break;
3114 			cache = next_block_group(root, cache);
3115 		}
3116 		if (!cache) {
3117 			if (last == 0)
3118 				break;
3119 			last = 0;
3120 			continue;
3121 		}
3122 		err = cache_save_setup(cache, trans, path);
3123 		last = cache->key.objectid + cache->key.offset;
3124 		btrfs_put_block_group(cache);
3125 	}
3126 
3127 	while (1) {
3128 		if (last == 0) {
3129 			err = btrfs_run_delayed_refs(trans, root,
3130 						     (unsigned long)-1);
3131 			if (err) /* File system offline */
3132 				goto out;
3133 		}
3134 
3135 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3136 		while (cache) {
3137 			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3138 				btrfs_put_block_group(cache);
3139 				goto again;
3140 			}
3141 
3142 			if (cache->dirty)
3143 				break;
3144 			cache = next_block_group(root, cache);
3145 		}
3146 		if (!cache) {
3147 			if (last == 0)
3148 				break;
3149 			last = 0;
3150 			continue;
3151 		}
3152 
3153 		if (cache->disk_cache_state == BTRFS_DC_SETUP)
3154 			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3155 		cache->dirty = 0;
3156 		last = cache->key.objectid + cache->key.offset;
3157 
3158 		err = write_one_cache_group(trans, root, path, cache);
3159 		if (err) /* File system offline */
3160 			goto out;
3161 
3162 		btrfs_put_block_group(cache);
3163 	}
3164 
3165 	while (1) {
3166 		/*
3167 		 * I don't think this is needed since we're just marking our
3168 		 * preallocated extent as written, but just in case it can't
3169 		 * hurt.
3170 		 */
3171 		if (last == 0) {
3172 			err = btrfs_run_delayed_refs(trans, root,
3173 						     (unsigned long)-1);
3174 			if (err) /* File system offline */
3175 				goto out;
3176 		}
3177 
3178 		cache = btrfs_lookup_first_block_group(root->fs_info, last);
3179 		while (cache) {
3180 			/*
3181 			 * Really this shouldn't happen, but it could if we
3182 			 * couldn't write the entire preallocated extent and
3183 			 * splitting the extent resulted in a new block.
3184 			 */
3185 			if (cache->dirty) {
3186 				btrfs_put_block_group(cache);
3187 				goto again;
3188 			}
3189 			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3190 				break;
3191 			cache = next_block_group(root, cache);
3192 		}
3193 		if (!cache) {
3194 			if (last == 0)
3195 				break;
3196 			last = 0;
3197 			continue;
3198 		}
3199 
3200 		err = btrfs_write_out_cache(root, trans, cache, path);
3201 
3202 		/*
3203 		 * If we didn't have an error then the cache state is still
3204 		 * NEED_WRITE, so we can set it to WRITTEN.
3205 		 */
3206 		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3207 			cache->disk_cache_state = BTRFS_DC_WRITTEN;
3208 		last = cache->key.objectid + cache->key.offset;
3209 		btrfs_put_block_group(cache);
3210 	}
3211 out:
3212 
3213 	btrfs_free_path(path);
3214 	return err;
3215 }
3216 
3217 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3218 {
3219 	struct btrfs_block_group_cache *block_group;
3220 	int readonly = 0;
3221 
3222 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3223 	if (!block_group || block_group->ro)
3224 		readonly = 1;
3225 	if (block_group)
3226 		btrfs_put_block_group(block_group);
3227 	return readonly;
3228 }
3229 
3230 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3231 			     u64 total_bytes, u64 bytes_used,
3232 			     struct btrfs_space_info **space_info)
3233 {
3234 	struct btrfs_space_info *found;
3235 	int i;
3236 	int factor;
3237 
3238 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3239 		     BTRFS_BLOCK_GROUP_RAID10))
3240 		factor = 2;
3241 	else
3242 		factor = 1;
3243 
3244 	found = __find_space_info(info, flags);
3245 	if (found) {
3246 		spin_lock(&found->lock);
3247 		found->total_bytes += total_bytes;
3248 		found->disk_total += total_bytes * factor;
3249 		found->bytes_used += bytes_used;
3250 		found->disk_used += bytes_used * factor;
3251 		found->full = 0;
3252 		spin_unlock(&found->lock);
3253 		*space_info = found;
3254 		return 0;
3255 	}
3256 	found = kzalloc(sizeof(*found), GFP_NOFS);
3257 	if (!found)
3258 		return -ENOMEM;
3259 
3260 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3261 		INIT_LIST_HEAD(&found->block_groups[i]);
3262 	init_rwsem(&found->groups_sem);
3263 	spin_lock_init(&found->lock);
3264 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3265 	found->total_bytes = total_bytes;
3266 	found->disk_total = total_bytes * factor;
3267 	found->bytes_used = bytes_used;
3268 	found->disk_used = bytes_used * factor;
3269 	found->bytes_pinned = 0;
3270 	found->bytes_reserved = 0;
3271 	found->bytes_readonly = 0;
3272 	found->bytes_may_use = 0;
3273 	found->full = 0;
3274 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3275 	found->chunk_alloc = 0;
3276 	found->flush = 0;
3277 	init_waitqueue_head(&found->wait);
3278 	*space_info = found;
3279 	list_add_rcu(&found->list, &info->space_info);
3280 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3281 		info->data_sinfo = found;
3282 	return 0;
3283 }
3284 
3285 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3286 {
3287 	u64 extra_flags = chunk_to_extended(flags) &
3288 				BTRFS_EXTENDED_PROFILE_MASK;
3289 
3290 	write_seqlock(&fs_info->profiles_lock);
3291 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3292 		fs_info->avail_data_alloc_bits |= extra_flags;
3293 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3294 		fs_info->avail_metadata_alloc_bits |= extra_flags;
3295 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3296 		fs_info->avail_system_alloc_bits |= extra_flags;
3297 	write_sequnlock(&fs_info->profiles_lock);
3298 }
3299 
3300 /*
3301  * returns target flags in extended format or 0 if restripe for this
3302  * chunk_type is not in progress
3303  *
3304  * should be called with either volume_mutex or balance_lock held
3305  */
3306 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3307 {
3308 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3309 	u64 target = 0;
3310 
3311 	if (!bctl)
3312 		return 0;
3313 
3314 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3315 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3316 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3317 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3318 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3319 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3320 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3321 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3322 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3323 	}
3324 
3325 	return target;
3326 }
3327 
3328 /*
3329  * @flags: available profiles in extended format (see ctree.h)
3330  *
3331  * Returns reduced profile in chunk format.  If profile changing is in
3332  * progress (either running or paused) picks the target profile (if it's
3333  * already available), otherwise falls back to plain reducing.
3334  */
3335 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3336 {
3337 	/*
3338 	 * we add in the count of missing devices because we want
3339 	 * to make sure that any RAID levels on a degraded FS
3340 	 * continue to be honored.
3341 	 */
3342 	u64 num_devices = root->fs_info->fs_devices->rw_devices +
3343 		root->fs_info->fs_devices->missing_devices;
3344 	u64 target;
3345 	u64 tmp;
3346 
3347 	/*
3348 	 * see if restripe for this chunk_type is in progress, if so
3349 	 * try to reduce to the target profile
3350 	 */
3351 	spin_lock(&root->fs_info->balance_lock);
3352 	target = get_restripe_target(root->fs_info, flags);
3353 	if (target) {
3354 		/* pick target profile only if it's already available */
3355 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3356 			spin_unlock(&root->fs_info->balance_lock);
3357 			return extended_to_chunk(target);
3358 		}
3359 	}
3360 	spin_unlock(&root->fs_info->balance_lock);
3361 
3362 	/* First, mask out the RAID levels which aren't possible */
3363 	if (num_devices == 1)
3364 		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3365 			   BTRFS_BLOCK_GROUP_RAID5);
3366 	if (num_devices < 3)
3367 		flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3368 	if (num_devices < 4)
3369 		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3370 
3371 	tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3372 		       BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3373 		       BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3374 	flags &= ~tmp;
3375 
3376 	if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3377 		tmp = BTRFS_BLOCK_GROUP_RAID6;
3378 	else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3379 		tmp = BTRFS_BLOCK_GROUP_RAID5;
3380 	else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3381 		tmp = BTRFS_BLOCK_GROUP_RAID10;
3382 	else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3383 		tmp = BTRFS_BLOCK_GROUP_RAID1;
3384 	else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3385 		tmp = BTRFS_BLOCK_GROUP_RAID0;
3386 
3387 	return extended_to_chunk(flags | tmp);
3388 }
3389 
3390 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3391 {
3392 	unsigned seq;
3393 
3394 	do {
3395 		seq = read_seqbegin(&root->fs_info->profiles_lock);
3396 
3397 		if (flags & BTRFS_BLOCK_GROUP_DATA)
3398 			flags |= root->fs_info->avail_data_alloc_bits;
3399 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3400 			flags |= root->fs_info->avail_system_alloc_bits;
3401 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3402 			flags |= root->fs_info->avail_metadata_alloc_bits;
3403 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3404 
3405 	return btrfs_reduce_alloc_profile(root, flags);
3406 }
3407 
3408 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3409 {
3410 	u64 flags;
3411 	u64 ret;
3412 
3413 	if (data)
3414 		flags = BTRFS_BLOCK_GROUP_DATA;
3415 	else if (root == root->fs_info->chunk_root)
3416 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
3417 	else
3418 		flags = BTRFS_BLOCK_GROUP_METADATA;
3419 
3420 	ret = get_alloc_profile(root, flags);
3421 	return ret;
3422 }
3423 
3424 /*
3425  * This will check the space that the inode allocates from to make sure we have
3426  * enough space for bytes.
3427  */
3428 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3429 {
3430 	struct btrfs_space_info *data_sinfo;
3431 	struct btrfs_root *root = BTRFS_I(inode)->root;
3432 	struct btrfs_fs_info *fs_info = root->fs_info;
3433 	u64 used;
3434 	int ret = 0, committed = 0, alloc_chunk = 1;
3435 
3436 	/* make sure bytes are sectorsize aligned */
3437 	bytes = ALIGN(bytes, root->sectorsize);
3438 
3439 	if (root == root->fs_info->tree_root ||
3440 	    BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3441 		alloc_chunk = 0;
3442 		committed = 1;
3443 	}
3444 
3445 	data_sinfo = fs_info->data_sinfo;
3446 	if (!data_sinfo)
3447 		goto alloc;
3448 
3449 again:
3450 	/* make sure we have enough space to handle the data first */
3451 	spin_lock(&data_sinfo->lock);
3452 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3453 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3454 		data_sinfo->bytes_may_use;
3455 
3456 	if (used + bytes > data_sinfo->total_bytes) {
3457 		struct btrfs_trans_handle *trans;
3458 
3459 		/*
3460 		 * if we don't have enough free bytes in this space then we need
3461 		 * to alloc a new chunk.
3462 		 */
3463 		if (!data_sinfo->full && alloc_chunk) {
3464 			u64 alloc_target;
3465 
3466 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3467 			spin_unlock(&data_sinfo->lock);
3468 alloc:
3469 			alloc_target = btrfs_get_alloc_profile(root, 1);
3470 			trans = btrfs_join_transaction(root);
3471 			if (IS_ERR(trans))
3472 				return PTR_ERR(trans);
3473 
3474 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3475 					     alloc_target,
3476 					     CHUNK_ALLOC_NO_FORCE);
3477 			btrfs_end_transaction(trans, root);
3478 			if (ret < 0) {
3479 				if (ret != -ENOSPC)
3480 					return ret;
3481 				else
3482 					goto commit_trans;
3483 			}
3484 
3485 			if (!data_sinfo)
3486 				data_sinfo = fs_info->data_sinfo;
3487 
3488 			goto again;
3489 		}
3490 
3491 		/*
3492 		 * If we have less pinned bytes than we want to allocate then
3493 		 * don't bother committing the transaction, it won't help us.
3494 		 */
3495 		if (data_sinfo->bytes_pinned < bytes)
3496 			committed = 1;
3497 		spin_unlock(&data_sinfo->lock);
3498 
3499 		/* commit the current transaction and try again */
3500 commit_trans:
3501 		if (!committed &&
3502 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3503 			committed = 1;
3504 			trans = btrfs_join_transaction(root);
3505 			if (IS_ERR(trans))
3506 				return PTR_ERR(trans);
3507 			ret = btrfs_commit_transaction(trans, root);
3508 			if (ret)
3509 				return ret;
3510 			goto again;
3511 		}
3512 
3513 		return -ENOSPC;
3514 	}
3515 	data_sinfo->bytes_may_use += bytes;
3516 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3517 				      data_sinfo->flags, bytes, 1);
3518 	spin_unlock(&data_sinfo->lock);
3519 
3520 	return 0;
3521 }
3522 
3523 /*
3524  * Called if we need to clear a data reservation for this inode.
3525  */
3526 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3527 {
3528 	struct btrfs_root *root = BTRFS_I(inode)->root;
3529 	struct btrfs_space_info *data_sinfo;
3530 
3531 	/* make sure bytes are sectorsize aligned */
3532 	bytes = ALIGN(bytes, root->sectorsize);
3533 
3534 	data_sinfo = root->fs_info->data_sinfo;
3535 	spin_lock(&data_sinfo->lock);
3536 	data_sinfo->bytes_may_use -= bytes;
3537 	trace_btrfs_space_reservation(root->fs_info, "space_info",
3538 				      data_sinfo->flags, bytes, 0);
3539 	spin_unlock(&data_sinfo->lock);
3540 }
3541 
3542 static void force_metadata_allocation(struct btrfs_fs_info *info)
3543 {
3544 	struct list_head *head = &info->space_info;
3545 	struct btrfs_space_info *found;
3546 
3547 	rcu_read_lock();
3548 	list_for_each_entry_rcu(found, head, list) {
3549 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3550 			found->force_alloc = CHUNK_ALLOC_FORCE;
3551 	}
3552 	rcu_read_unlock();
3553 }
3554 
3555 static int should_alloc_chunk(struct btrfs_root *root,
3556 			      struct btrfs_space_info *sinfo, int force)
3557 {
3558 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3559 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3560 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3561 	u64 thresh;
3562 
3563 	if (force == CHUNK_ALLOC_FORCE)
3564 		return 1;
3565 
3566 	/*
3567 	 * We need to take into account the global rsv because for all intents
3568 	 * and purposes it's used space.  Don't worry about locking the
3569 	 * global_rsv, it doesn't change except when the transaction commits.
3570 	 */
3571 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3572 		num_allocated += global_rsv->size;
3573 
3574 	/*
3575 	 * in limited mode, we want to have some free space up to
3576 	 * about 1% of the FS size.
3577 	 */
3578 	if (force == CHUNK_ALLOC_LIMITED) {
3579 		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3580 		thresh = max_t(u64, 64 * 1024 * 1024,
3581 			       div_factor_fine(thresh, 1));
3582 
3583 		if (num_bytes - num_allocated < thresh)
3584 			return 1;
3585 	}
3586 
3587 	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3588 		return 0;
3589 	return 1;
3590 }
3591 
3592 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3593 {
3594 	u64 num_dev;
3595 
3596 	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3597 		    BTRFS_BLOCK_GROUP_RAID0 |
3598 		    BTRFS_BLOCK_GROUP_RAID5 |
3599 		    BTRFS_BLOCK_GROUP_RAID6))
3600 		num_dev = root->fs_info->fs_devices->rw_devices;
3601 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
3602 		num_dev = 2;
3603 	else
3604 		num_dev = 1;	/* DUP or single */
3605 
3606 	/* metadata for updaing devices and chunk tree */
3607 	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3608 }
3609 
3610 static void check_system_chunk(struct btrfs_trans_handle *trans,
3611 			       struct btrfs_root *root, u64 type)
3612 {
3613 	struct btrfs_space_info *info;
3614 	u64 left;
3615 	u64 thresh;
3616 
3617 	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3618 	spin_lock(&info->lock);
3619 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3620 		info->bytes_reserved - info->bytes_readonly;
3621 	spin_unlock(&info->lock);
3622 
3623 	thresh = get_system_chunk_thresh(root, type);
3624 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3625 		printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3626 		       left, thresh, type);
3627 		dump_space_info(info, 0, 0);
3628 	}
3629 
3630 	if (left < thresh) {
3631 		u64 flags;
3632 
3633 		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3634 		btrfs_alloc_chunk(trans, root, flags);
3635 	}
3636 }
3637 
3638 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3639 			  struct btrfs_root *extent_root, u64 flags, int force)
3640 {
3641 	struct btrfs_space_info *space_info;
3642 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3643 	int wait_for_alloc = 0;
3644 	int ret = 0;
3645 
3646 	/* Don't re-enter if we're already allocating a chunk */
3647 	if (trans->allocating_chunk)
3648 		return -ENOSPC;
3649 
3650 	space_info = __find_space_info(extent_root->fs_info, flags);
3651 	if (!space_info) {
3652 		ret = update_space_info(extent_root->fs_info, flags,
3653 					0, 0, &space_info);
3654 		BUG_ON(ret); /* -ENOMEM */
3655 	}
3656 	BUG_ON(!space_info); /* Logic error */
3657 
3658 again:
3659 	spin_lock(&space_info->lock);
3660 	if (force < space_info->force_alloc)
3661 		force = space_info->force_alloc;
3662 	if (space_info->full) {
3663 		spin_unlock(&space_info->lock);
3664 		return 0;
3665 	}
3666 
3667 	if (!should_alloc_chunk(extent_root, space_info, force)) {
3668 		spin_unlock(&space_info->lock);
3669 		return 0;
3670 	} else if (space_info->chunk_alloc) {
3671 		wait_for_alloc = 1;
3672 	} else {
3673 		space_info->chunk_alloc = 1;
3674 	}
3675 
3676 	spin_unlock(&space_info->lock);
3677 
3678 	mutex_lock(&fs_info->chunk_mutex);
3679 
3680 	/*
3681 	 * The chunk_mutex is held throughout the entirety of a chunk
3682 	 * allocation, so once we've acquired the chunk_mutex we know that the
3683 	 * other guy is done and we need to recheck and see if we should
3684 	 * allocate.
3685 	 */
3686 	if (wait_for_alloc) {
3687 		mutex_unlock(&fs_info->chunk_mutex);
3688 		wait_for_alloc = 0;
3689 		goto again;
3690 	}
3691 
3692 	trans->allocating_chunk = true;
3693 
3694 	/*
3695 	 * If we have mixed data/metadata chunks we want to make sure we keep
3696 	 * allocating mixed chunks instead of individual chunks.
3697 	 */
3698 	if (btrfs_mixed_space_info(space_info))
3699 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3700 
3701 	/*
3702 	 * if we're doing a data chunk, go ahead and make sure that
3703 	 * we keep a reasonable number of metadata chunks allocated in the
3704 	 * FS as well.
3705 	 */
3706 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3707 		fs_info->data_chunk_allocations++;
3708 		if (!(fs_info->data_chunk_allocations %
3709 		      fs_info->metadata_ratio))
3710 			force_metadata_allocation(fs_info);
3711 	}
3712 
3713 	/*
3714 	 * Check if we have enough space in SYSTEM chunk because we may need
3715 	 * to update devices.
3716 	 */
3717 	check_system_chunk(trans, extent_root, flags);
3718 
3719 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3720 	trans->allocating_chunk = false;
3721 
3722 	spin_lock(&space_info->lock);
3723 	if (ret < 0 && ret != -ENOSPC)
3724 		goto out;
3725 	if (ret)
3726 		space_info->full = 1;
3727 	else
3728 		ret = 1;
3729 
3730 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3731 out:
3732 	space_info->chunk_alloc = 0;
3733 	spin_unlock(&space_info->lock);
3734 	mutex_unlock(&fs_info->chunk_mutex);
3735 	return ret;
3736 }
3737 
3738 static int can_overcommit(struct btrfs_root *root,
3739 			  struct btrfs_space_info *space_info, u64 bytes,
3740 			  enum btrfs_reserve_flush_enum flush)
3741 {
3742 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3743 	u64 profile = btrfs_get_alloc_profile(root, 0);
3744 	u64 rsv_size = 0;
3745 	u64 avail;
3746 	u64 used;
3747 	u64 to_add;
3748 
3749 	used = space_info->bytes_used + space_info->bytes_reserved +
3750 		space_info->bytes_pinned + space_info->bytes_readonly;
3751 
3752 	spin_lock(&global_rsv->lock);
3753 	rsv_size = global_rsv->size;
3754 	spin_unlock(&global_rsv->lock);
3755 
3756 	/*
3757 	 * We only want to allow over committing if we have lots of actual space
3758 	 * free, but if we don't have enough space to handle the global reserve
3759 	 * space then we could end up having a real enospc problem when trying
3760 	 * to allocate a chunk or some other such important allocation.
3761 	 */
3762 	rsv_size <<= 1;
3763 	if (used + rsv_size >= space_info->total_bytes)
3764 		return 0;
3765 
3766 	used += space_info->bytes_may_use;
3767 
3768 	spin_lock(&root->fs_info->free_chunk_lock);
3769 	avail = root->fs_info->free_chunk_space;
3770 	spin_unlock(&root->fs_info->free_chunk_lock);
3771 
3772 	/*
3773 	 * If we have dup, raid1 or raid10 then only half of the free
3774 	 * space is actually useable.  For raid56, the space info used
3775 	 * doesn't include the parity drive, so we don't have to
3776 	 * change the math
3777 	 */
3778 	if (profile & (BTRFS_BLOCK_GROUP_DUP |
3779 		       BTRFS_BLOCK_GROUP_RAID1 |
3780 		       BTRFS_BLOCK_GROUP_RAID10))
3781 		avail >>= 1;
3782 
3783 	to_add = space_info->total_bytes;
3784 
3785 	/*
3786 	 * If we aren't flushing all things, let us overcommit up to
3787 	 * 1/2th of the space. If we can flush, don't let us overcommit
3788 	 * too much, let it overcommit up to 1/8 of the space.
3789 	 */
3790 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
3791 		to_add >>= 3;
3792 	else
3793 		to_add >>= 1;
3794 
3795 	/*
3796 	 * Limit the overcommit to the amount of free space we could possibly
3797 	 * allocate for chunks.
3798 	 */
3799 	to_add = min(avail, to_add);
3800 
3801 	if (used + bytes < space_info->total_bytes + to_add)
3802 		return 1;
3803 	return 0;
3804 }
3805 
3806 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3807 				  unsigned long nr_pages)
3808 {
3809 	struct super_block *sb = root->fs_info->sb;
3810 	int started;
3811 
3812 	/* If we can not start writeback, just sync all the delalloc file. */
3813 	started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3814 						      WB_REASON_FS_FREE_SPACE);
3815 	if (!started) {
3816 		/*
3817 		 * We needn't worry the filesystem going from r/w to r/o though
3818 		 * we don't acquire ->s_umount mutex, because the filesystem
3819 		 * should guarantee the delalloc inodes list be empty after
3820 		 * the filesystem is readonly(all dirty pages are written to
3821 		 * the disk).
3822 		 */
3823 		btrfs_start_delalloc_inodes(root, 0);
3824 		btrfs_wait_ordered_extents(root, 0);
3825 	}
3826 }
3827 
3828 /*
3829  * shrink metadata reservation for delalloc
3830  */
3831 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3832 			    bool wait_ordered)
3833 {
3834 	struct btrfs_block_rsv *block_rsv;
3835 	struct btrfs_space_info *space_info;
3836 	struct btrfs_trans_handle *trans;
3837 	u64 delalloc_bytes;
3838 	u64 max_reclaim;
3839 	long time_left;
3840 	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3841 	int loops = 0;
3842 	enum btrfs_reserve_flush_enum flush;
3843 
3844 	trans = (struct btrfs_trans_handle *)current->journal_info;
3845 	block_rsv = &root->fs_info->delalloc_block_rsv;
3846 	space_info = block_rsv->space_info;
3847 
3848 	smp_mb();
3849 	delalloc_bytes = percpu_counter_sum_positive(
3850 						&root->fs_info->delalloc_bytes);
3851 	if (delalloc_bytes == 0) {
3852 		if (trans)
3853 			return;
3854 		btrfs_wait_ordered_extents(root, 0);
3855 		return;
3856 	}
3857 
3858 	while (delalloc_bytes && loops < 3) {
3859 		max_reclaim = min(delalloc_bytes, to_reclaim);
3860 		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3861 		btrfs_writeback_inodes_sb_nr(root, nr_pages);
3862 		/*
3863 		 * We need to wait for the async pages to actually start before
3864 		 * we do anything.
3865 		 */
3866 		wait_event(root->fs_info->async_submit_wait,
3867 			   !atomic_read(&root->fs_info->async_delalloc_pages));
3868 
3869 		if (!trans)
3870 			flush = BTRFS_RESERVE_FLUSH_ALL;
3871 		else
3872 			flush = BTRFS_RESERVE_NO_FLUSH;
3873 		spin_lock(&space_info->lock);
3874 		if (can_overcommit(root, space_info, orig, flush)) {
3875 			spin_unlock(&space_info->lock);
3876 			break;
3877 		}
3878 		spin_unlock(&space_info->lock);
3879 
3880 		loops++;
3881 		if (wait_ordered && !trans) {
3882 			btrfs_wait_ordered_extents(root, 0);
3883 		} else {
3884 			time_left = schedule_timeout_killable(1);
3885 			if (time_left)
3886 				break;
3887 		}
3888 		smp_mb();
3889 		delalloc_bytes = percpu_counter_sum_positive(
3890 						&root->fs_info->delalloc_bytes);
3891 	}
3892 }
3893 
3894 /**
3895  * maybe_commit_transaction - possibly commit the transaction if its ok to
3896  * @root - the root we're allocating for
3897  * @bytes - the number of bytes we want to reserve
3898  * @force - force the commit
3899  *
3900  * This will check to make sure that committing the transaction will actually
3901  * get us somewhere and then commit the transaction if it does.  Otherwise it
3902  * will return -ENOSPC.
3903  */
3904 static int may_commit_transaction(struct btrfs_root *root,
3905 				  struct btrfs_space_info *space_info,
3906 				  u64 bytes, int force)
3907 {
3908 	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3909 	struct btrfs_trans_handle *trans;
3910 
3911 	trans = (struct btrfs_trans_handle *)current->journal_info;
3912 	if (trans)
3913 		return -EAGAIN;
3914 
3915 	if (force)
3916 		goto commit;
3917 
3918 	/* See if there is enough pinned space to make this reservation */
3919 	spin_lock(&space_info->lock);
3920 	if (space_info->bytes_pinned >= bytes) {
3921 		spin_unlock(&space_info->lock);
3922 		goto commit;
3923 	}
3924 	spin_unlock(&space_info->lock);
3925 
3926 	/*
3927 	 * See if there is some space in the delayed insertion reservation for
3928 	 * this reservation.
3929 	 */
3930 	if (space_info != delayed_rsv->space_info)
3931 		return -ENOSPC;
3932 
3933 	spin_lock(&space_info->lock);
3934 	spin_lock(&delayed_rsv->lock);
3935 	if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3936 		spin_unlock(&delayed_rsv->lock);
3937 		spin_unlock(&space_info->lock);
3938 		return -ENOSPC;
3939 	}
3940 	spin_unlock(&delayed_rsv->lock);
3941 	spin_unlock(&space_info->lock);
3942 
3943 commit:
3944 	trans = btrfs_join_transaction(root);
3945 	if (IS_ERR(trans))
3946 		return -ENOSPC;
3947 
3948 	return btrfs_commit_transaction(trans, root);
3949 }
3950 
3951 enum flush_state {
3952 	FLUSH_DELAYED_ITEMS_NR	=	1,
3953 	FLUSH_DELAYED_ITEMS	=	2,
3954 	FLUSH_DELALLOC		=	3,
3955 	FLUSH_DELALLOC_WAIT	=	4,
3956 	ALLOC_CHUNK		=	5,
3957 	COMMIT_TRANS		=	6,
3958 };
3959 
3960 static int flush_space(struct btrfs_root *root,
3961 		       struct btrfs_space_info *space_info, u64 num_bytes,
3962 		       u64 orig_bytes, int state)
3963 {
3964 	struct btrfs_trans_handle *trans;
3965 	int nr;
3966 	int ret = 0;
3967 
3968 	switch (state) {
3969 	case FLUSH_DELAYED_ITEMS_NR:
3970 	case FLUSH_DELAYED_ITEMS:
3971 		if (state == FLUSH_DELAYED_ITEMS_NR) {
3972 			u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3973 
3974 			nr = (int)div64_u64(num_bytes, bytes);
3975 			if (!nr)
3976 				nr = 1;
3977 			nr *= 2;
3978 		} else {
3979 			nr = -1;
3980 		}
3981 		trans = btrfs_join_transaction(root);
3982 		if (IS_ERR(trans)) {
3983 			ret = PTR_ERR(trans);
3984 			break;
3985 		}
3986 		ret = btrfs_run_delayed_items_nr(trans, root, nr);
3987 		btrfs_end_transaction(trans, root);
3988 		break;
3989 	case FLUSH_DELALLOC:
3990 	case FLUSH_DELALLOC_WAIT:
3991 		shrink_delalloc(root, num_bytes, orig_bytes,
3992 				state == FLUSH_DELALLOC_WAIT);
3993 		break;
3994 	case ALLOC_CHUNK:
3995 		trans = btrfs_join_transaction(root);
3996 		if (IS_ERR(trans)) {
3997 			ret = PTR_ERR(trans);
3998 			break;
3999 		}
4000 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4001 				     btrfs_get_alloc_profile(root, 0),
4002 				     CHUNK_ALLOC_NO_FORCE);
4003 		btrfs_end_transaction(trans, root);
4004 		if (ret == -ENOSPC)
4005 			ret = 0;
4006 		break;
4007 	case COMMIT_TRANS:
4008 		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4009 		break;
4010 	default:
4011 		ret = -ENOSPC;
4012 		break;
4013 	}
4014 
4015 	return ret;
4016 }
4017 /**
4018  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4019  * @root - the root we're allocating for
4020  * @block_rsv - the block_rsv we're allocating for
4021  * @orig_bytes - the number of bytes we want
4022  * @flush - whether or not we can flush to make our reservation
4023  *
4024  * This will reserve orgi_bytes number of bytes from the space info associated
4025  * with the block_rsv.  If there is not enough space it will make an attempt to
4026  * flush out space to make room.  It will do this by flushing delalloc if
4027  * possible or committing the transaction.  If flush is 0 then no attempts to
4028  * regain reservations will be made and this will fail if there is not enough
4029  * space already.
4030  */
4031 static int reserve_metadata_bytes(struct btrfs_root *root,
4032 				  struct btrfs_block_rsv *block_rsv,
4033 				  u64 orig_bytes,
4034 				  enum btrfs_reserve_flush_enum flush)
4035 {
4036 	struct btrfs_space_info *space_info = block_rsv->space_info;
4037 	u64 used;
4038 	u64 num_bytes = orig_bytes;
4039 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4040 	int ret = 0;
4041 	bool flushing = false;
4042 
4043 again:
4044 	ret = 0;
4045 	spin_lock(&space_info->lock);
4046 	/*
4047 	 * We only want to wait if somebody other than us is flushing and we
4048 	 * are actually allowed to flush all things.
4049 	 */
4050 	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4051 	       space_info->flush) {
4052 		spin_unlock(&space_info->lock);
4053 		/*
4054 		 * If we have a trans handle we can't wait because the flusher
4055 		 * may have to commit the transaction, which would mean we would
4056 		 * deadlock since we are waiting for the flusher to finish, but
4057 		 * hold the current transaction open.
4058 		 */
4059 		if (current->journal_info)
4060 			return -EAGAIN;
4061 		ret = wait_event_killable(space_info->wait, !space_info->flush);
4062 		/* Must have been killed, return */
4063 		if (ret)
4064 			return -EINTR;
4065 
4066 		spin_lock(&space_info->lock);
4067 	}
4068 
4069 	ret = -ENOSPC;
4070 	used = space_info->bytes_used + space_info->bytes_reserved +
4071 		space_info->bytes_pinned + space_info->bytes_readonly +
4072 		space_info->bytes_may_use;
4073 
4074 	/*
4075 	 * The idea here is that we've not already over-reserved the block group
4076 	 * then we can go ahead and save our reservation first and then start
4077 	 * flushing if we need to.  Otherwise if we've already overcommitted
4078 	 * lets start flushing stuff first and then come back and try to make
4079 	 * our reservation.
4080 	 */
4081 	if (used <= space_info->total_bytes) {
4082 		if (used + orig_bytes <= space_info->total_bytes) {
4083 			space_info->bytes_may_use += orig_bytes;
4084 			trace_btrfs_space_reservation(root->fs_info,
4085 				"space_info", space_info->flags, orig_bytes, 1);
4086 			ret = 0;
4087 		} else {
4088 			/*
4089 			 * Ok set num_bytes to orig_bytes since we aren't
4090 			 * overocmmitted, this way we only try and reclaim what
4091 			 * we need.
4092 			 */
4093 			num_bytes = orig_bytes;
4094 		}
4095 	} else {
4096 		/*
4097 		 * Ok we're over committed, set num_bytes to the overcommitted
4098 		 * amount plus the amount of bytes that we need for this
4099 		 * reservation.
4100 		 */
4101 		num_bytes = used - space_info->total_bytes +
4102 			(orig_bytes * 2);
4103 	}
4104 
4105 	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4106 		space_info->bytes_may_use += orig_bytes;
4107 		trace_btrfs_space_reservation(root->fs_info, "space_info",
4108 					      space_info->flags, orig_bytes,
4109 					      1);
4110 		ret = 0;
4111 	}
4112 
4113 	/*
4114 	 * Couldn't make our reservation, save our place so while we're trying
4115 	 * to reclaim space we can actually use it instead of somebody else
4116 	 * stealing it from us.
4117 	 *
4118 	 * We make the other tasks wait for the flush only when we can flush
4119 	 * all things.
4120 	 */
4121 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4122 		flushing = true;
4123 		space_info->flush = 1;
4124 	}
4125 
4126 	spin_unlock(&space_info->lock);
4127 
4128 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4129 		goto out;
4130 
4131 	ret = flush_space(root, space_info, num_bytes, orig_bytes,
4132 			  flush_state);
4133 	flush_state++;
4134 
4135 	/*
4136 	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4137 	 * would happen. So skip delalloc flush.
4138 	 */
4139 	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4140 	    (flush_state == FLUSH_DELALLOC ||
4141 	     flush_state == FLUSH_DELALLOC_WAIT))
4142 		flush_state = ALLOC_CHUNK;
4143 
4144 	if (!ret)
4145 		goto again;
4146 	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4147 		 flush_state < COMMIT_TRANS)
4148 		goto again;
4149 	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4150 		 flush_state <= COMMIT_TRANS)
4151 		goto again;
4152 
4153 out:
4154 	if (ret == -ENOSPC &&
4155 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4156 		struct btrfs_block_rsv *global_rsv =
4157 			&root->fs_info->global_block_rsv;
4158 
4159 		if (block_rsv != global_rsv &&
4160 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
4161 			ret = 0;
4162 	}
4163 	if (flushing) {
4164 		spin_lock(&space_info->lock);
4165 		space_info->flush = 0;
4166 		wake_up_all(&space_info->wait);
4167 		spin_unlock(&space_info->lock);
4168 	}
4169 	return ret;
4170 }
4171 
4172 static struct btrfs_block_rsv *get_block_rsv(
4173 					const struct btrfs_trans_handle *trans,
4174 					const struct btrfs_root *root)
4175 {
4176 	struct btrfs_block_rsv *block_rsv = NULL;
4177 
4178 	if (root->ref_cows)
4179 		block_rsv = trans->block_rsv;
4180 
4181 	if (root == root->fs_info->csum_root && trans->adding_csums)
4182 		block_rsv = trans->block_rsv;
4183 
4184 	if (!block_rsv)
4185 		block_rsv = root->block_rsv;
4186 
4187 	if (!block_rsv)
4188 		block_rsv = &root->fs_info->empty_block_rsv;
4189 
4190 	return block_rsv;
4191 }
4192 
4193 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4194 			       u64 num_bytes)
4195 {
4196 	int ret = -ENOSPC;
4197 	spin_lock(&block_rsv->lock);
4198 	if (block_rsv->reserved >= num_bytes) {
4199 		block_rsv->reserved -= num_bytes;
4200 		if (block_rsv->reserved < block_rsv->size)
4201 			block_rsv->full = 0;
4202 		ret = 0;
4203 	}
4204 	spin_unlock(&block_rsv->lock);
4205 	return ret;
4206 }
4207 
4208 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4209 				u64 num_bytes, int update_size)
4210 {
4211 	spin_lock(&block_rsv->lock);
4212 	block_rsv->reserved += num_bytes;
4213 	if (update_size)
4214 		block_rsv->size += num_bytes;
4215 	else if (block_rsv->reserved >= block_rsv->size)
4216 		block_rsv->full = 1;
4217 	spin_unlock(&block_rsv->lock);
4218 }
4219 
4220 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4221 				    struct btrfs_block_rsv *block_rsv,
4222 				    struct btrfs_block_rsv *dest, u64 num_bytes)
4223 {
4224 	struct btrfs_space_info *space_info = block_rsv->space_info;
4225 
4226 	spin_lock(&block_rsv->lock);
4227 	if (num_bytes == (u64)-1)
4228 		num_bytes = block_rsv->size;
4229 	block_rsv->size -= num_bytes;
4230 	if (block_rsv->reserved >= block_rsv->size) {
4231 		num_bytes = block_rsv->reserved - block_rsv->size;
4232 		block_rsv->reserved = block_rsv->size;
4233 		block_rsv->full = 1;
4234 	} else {
4235 		num_bytes = 0;
4236 	}
4237 	spin_unlock(&block_rsv->lock);
4238 
4239 	if (num_bytes > 0) {
4240 		if (dest) {
4241 			spin_lock(&dest->lock);
4242 			if (!dest->full) {
4243 				u64 bytes_to_add;
4244 
4245 				bytes_to_add = dest->size - dest->reserved;
4246 				bytes_to_add = min(num_bytes, bytes_to_add);
4247 				dest->reserved += bytes_to_add;
4248 				if (dest->reserved >= dest->size)
4249 					dest->full = 1;
4250 				num_bytes -= bytes_to_add;
4251 			}
4252 			spin_unlock(&dest->lock);
4253 		}
4254 		if (num_bytes) {
4255 			spin_lock(&space_info->lock);
4256 			space_info->bytes_may_use -= num_bytes;
4257 			trace_btrfs_space_reservation(fs_info, "space_info",
4258 					space_info->flags, num_bytes, 0);
4259 			space_info->reservation_progress++;
4260 			spin_unlock(&space_info->lock);
4261 		}
4262 	}
4263 }
4264 
4265 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4266 				   struct btrfs_block_rsv *dst, u64 num_bytes)
4267 {
4268 	int ret;
4269 
4270 	ret = block_rsv_use_bytes(src, num_bytes);
4271 	if (ret)
4272 		return ret;
4273 
4274 	block_rsv_add_bytes(dst, num_bytes, 1);
4275 	return 0;
4276 }
4277 
4278 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4279 {
4280 	memset(rsv, 0, sizeof(*rsv));
4281 	spin_lock_init(&rsv->lock);
4282 	rsv->type = type;
4283 }
4284 
4285 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4286 					      unsigned short type)
4287 {
4288 	struct btrfs_block_rsv *block_rsv;
4289 	struct btrfs_fs_info *fs_info = root->fs_info;
4290 
4291 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4292 	if (!block_rsv)
4293 		return NULL;
4294 
4295 	btrfs_init_block_rsv(block_rsv, type);
4296 	block_rsv->space_info = __find_space_info(fs_info,
4297 						  BTRFS_BLOCK_GROUP_METADATA);
4298 	return block_rsv;
4299 }
4300 
4301 void btrfs_free_block_rsv(struct btrfs_root *root,
4302 			  struct btrfs_block_rsv *rsv)
4303 {
4304 	if (!rsv)
4305 		return;
4306 	btrfs_block_rsv_release(root, rsv, (u64)-1);
4307 	kfree(rsv);
4308 }
4309 
4310 int btrfs_block_rsv_add(struct btrfs_root *root,
4311 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4312 			enum btrfs_reserve_flush_enum flush)
4313 {
4314 	int ret;
4315 
4316 	if (num_bytes == 0)
4317 		return 0;
4318 
4319 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4320 	if (!ret) {
4321 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
4322 		return 0;
4323 	}
4324 
4325 	return ret;
4326 }
4327 
4328 int btrfs_block_rsv_check(struct btrfs_root *root,
4329 			  struct btrfs_block_rsv *block_rsv, int min_factor)
4330 {
4331 	u64 num_bytes = 0;
4332 	int ret = -ENOSPC;
4333 
4334 	if (!block_rsv)
4335 		return 0;
4336 
4337 	spin_lock(&block_rsv->lock);
4338 	num_bytes = div_factor(block_rsv->size, min_factor);
4339 	if (block_rsv->reserved >= num_bytes)
4340 		ret = 0;
4341 	spin_unlock(&block_rsv->lock);
4342 
4343 	return ret;
4344 }
4345 
4346 int btrfs_block_rsv_refill(struct btrfs_root *root,
4347 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4348 			   enum btrfs_reserve_flush_enum flush)
4349 {
4350 	u64 num_bytes = 0;
4351 	int ret = -ENOSPC;
4352 
4353 	if (!block_rsv)
4354 		return 0;
4355 
4356 	spin_lock(&block_rsv->lock);
4357 	num_bytes = min_reserved;
4358 	if (block_rsv->reserved >= num_bytes)
4359 		ret = 0;
4360 	else
4361 		num_bytes -= block_rsv->reserved;
4362 	spin_unlock(&block_rsv->lock);
4363 
4364 	if (!ret)
4365 		return 0;
4366 
4367 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4368 	if (!ret) {
4369 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
4370 		return 0;
4371 	}
4372 
4373 	return ret;
4374 }
4375 
4376 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4377 			    struct btrfs_block_rsv *dst_rsv,
4378 			    u64 num_bytes)
4379 {
4380 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4381 }
4382 
4383 void btrfs_block_rsv_release(struct btrfs_root *root,
4384 			     struct btrfs_block_rsv *block_rsv,
4385 			     u64 num_bytes)
4386 {
4387 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4388 	if (global_rsv->full || global_rsv == block_rsv ||
4389 	    block_rsv->space_info != global_rsv->space_info)
4390 		global_rsv = NULL;
4391 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4392 				num_bytes);
4393 }
4394 
4395 /*
4396  * helper to calculate size of global block reservation.
4397  * the desired value is sum of space used by extent tree,
4398  * checksum tree and root tree
4399  */
4400 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4401 {
4402 	struct btrfs_space_info *sinfo;
4403 	u64 num_bytes;
4404 	u64 meta_used;
4405 	u64 data_used;
4406 	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4407 
4408 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4409 	spin_lock(&sinfo->lock);
4410 	data_used = sinfo->bytes_used;
4411 	spin_unlock(&sinfo->lock);
4412 
4413 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4414 	spin_lock(&sinfo->lock);
4415 	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4416 		data_used = 0;
4417 	meta_used = sinfo->bytes_used;
4418 	spin_unlock(&sinfo->lock);
4419 
4420 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4421 		    csum_size * 2;
4422 	num_bytes += div64_u64(data_used + meta_used, 50);
4423 
4424 	if (num_bytes * 3 > meta_used)
4425 		num_bytes = div64_u64(meta_used, 3);
4426 
4427 	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4428 }
4429 
4430 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4431 {
4432 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4433 	struct btrfs_space_info *sinfo = block_rsv->space_info;
4434 	u64 num_bytes;
4435 
4436 	num_bytes = calc_global_metadata_size(fs_info);
4437 
4438 	spin_lock(&sinfo->lock);
4439 	spin_lock(&block_rsv->lock);
4440 
4441 	block_rsv->size = num_bytes;
4442 
4443 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4444 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
4445 		    sinfo->bytes_may_use;
4446 
4447 	if (sinfo->total_bytes > num_bytes) {
4448 		num_bytes = sinfo->total_bytes - num_bytes;
4449 		block_rsv->reserved += num_bytes;
4450 		sinfo->bytes_may_use += num_bytes;
4451 		trace_btrfs_space_reservation(fs_info, "space_info",
4452 				      sinfo->flags, num_bytes, 1);
4453 	}
4454 
4455 	if (block_rsv->reserved >= block_rsv->size) {
4456 		num_bytes = block_rsv->reserved - block_rsv->size;
4457 		sinfo->bytes_may_use -= num_bytes;
4458 		trace_btrfs_space_reservation(fs_info, "space_info",
4459 				      sinfo->flags, num_bytes, 0);
4460 		sinfo->reservation_progress++;
4461 		block_rsv->reserved = block_rsv->size;
4462 		block_rsv->full = 1;
4463 	}
4464 
4465 	spin_unlock(&block_rsv->lock);
4466 	spin_unlock(&sinfo->lock);
4467 }
4468 
4469 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4470 {
4471 	struct btrfs_space_info *space_info;
4472 
4473 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4474 	fs_info->chunk_block_rsv.space_info = space_info;
4475 
4476 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4477 	fs_info->global_block_rsv.space_info = space_info;
4478 	fs_info->delalloc_block_rsv.space_info = space_info;
4479 	fs_info->trans_block_rsv.space_info = space_info;
4480 	fs_info->empty_block_rsv.space_info = space_info;
4481 	fs_info->delayed_block_rsv.space_info = space_info;
4482 
4483 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4484 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4485 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4486 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4487 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4488 
4489 	update_global_block_rsv(fs_info);
4490 }
4491 
4492 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4493 {
4494 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4495 				(u64)-1);
4496 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4497 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4498 	WARN_ON(fs_info->trans_block_rsv.size > 0);
4499 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4500 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
4501 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4502 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
4503 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4504 }
4505 
4506 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4507 				  struct btrfs_root *root)
4508 {
4509 	if (!trans->block_rsv)
4510 		return;
4511 
4512 	if (!trans->bytes_reserved)
4513 		return;
4514 
4515 	trace_btrfs_space_reservation(root->fs_info, "transaction",
4516 				      trans->transid, trans->bytes_reserved, 0);
4517 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4518 	trans->bytes_reserved = 0;
4519 }
4520 
4521 /* Can only return 0 or -ENOSPC */
4522 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4523 				  struct inode *inode)
4524 {
4525 	struct btrfs_root *root = BTRFS_I(inode)->root;
4526 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4527 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4528 
4529 	/*
4530 	 * We need to hold space in order to delete our orphan item once we've
4531 	 * added it, so this takes the reservation so we can release it later
4532 	 * when we are truly done with the orphan item.
4533 	 */
4534 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4535 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4536 				      btrfs_ino(inode), num_bytes, 1);
4537 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4538 }
4539 
4540 void btrfs_orphan_release_metadata(struct inode *inode)
4541 {
4542 	struct btrfs_root *root = BTRFS_I(inode)->root;
4543 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4544 	trace_btrfs_space_reservation(root->fs_info, "orphan",
4545 				      btrfs_ino(inode), num_bytes, 0);
4546 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4547 }
4548 
4549 /*
4550  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4551  * root: the root of the parent directory
4552  * rsv: block reservation
4553  * items: the number of items that we need do reservation
4554  * qgroup_reserved: used to return the reserved size in qgroup
4555  *
4556  * This function is used to reserve the space for snapshot/subvolume
4557  * creation and deletion. Those operations are different with the
4558  * common file/directory operations, they change two fs/file trees
4559  * and root tree, the number of items that the qgroup reserves is
4560  * different with the free space reservation. So we can not use
4561  * the space reseravtion mechanism in start_transaction().
4562  */
4563 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4564 				     struct btrfs_block_rsv *rsv,
4565 				     int items,
4566 				     u64 *qgroup_reserved)
4567 {
4568 	u64 num_bytes;
4569 	int ret;
4570 
4571 	if (root->fs_info->quota_enabled) {
4572 		/* One for parent inode, two for dir entries */
4573 		num_bytes = 3 * root->leafsize;
4574 		ret = btrfs_qgroup_reserve(root, num_bytes);
4575 		if (ret)
4576 			return ret;
4577 	} else {
4578 		num_bytes = 0;
4579 	}
4580 
4581 	*qgroup_reserved = num_bytes;
4582 
4583 	num_bytes = btrfs_calc_trans_metadata_size(root, items);
4584 	rsv->space_info = __find_space_info(root->fs_info,
4585 					    BTRFS_BLOCK_GROUP_METADATA);
4586 	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4587 				  BTRFS_RESERVE_FLUSH_ALL);
4588 	if (ret) {
4589 		if (*qgroup_reserved)
4590 			btrfs_qgroup_free(root, *qgroup_reserved);
4591 	}
4592 
4593 	return ret;
4594 }
4595 
4596 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4597 				      struct btrfs_block_rsv *rsv,
4598 				      u64 qgroup_reserved)
4599 {
4600 	btrfs_block_rsv_release(root, rsv, (u64)-1);
4601 	if (qgroup_reserved)
4602 		btrfs_qgroup_free(root, qgroup_reserved);
4603 }
4604 
4605 /**
4606  * drop_outstanding_extent - drop an outstanding extent
4607  * @inode: the inode we're dropping the extent for
4608  *
4609  * This is called when we are freeing up an outstanding extent, either called
4610  * after an error or after an extent is written.  This will return the number of
4611  * reserved extents that need to be freed.  This must be called with
4612  * BTRFS_I(inode)->lock held.
4613  */
4614 static unsigned drop_outstanding_extent(struct inode *inode)
4615 {
4616 	unsigned drop_inode_space = 0;
4617 	unsigned dropped_extents = 0;
4618 
4619 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4620 	BTRFS_I(inode)->outstanding_extents--;
4621 
4622 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
4623 	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4624 			       &BTRFS_I(inode)->runtime_flags))
4625 		drop_inode_space = 1;
4626 
4627 	/*
4628 	 * If we have more or the same amount of outsanding extents than we have
4629 	 * reserved then we need to leave the reserved extents count alone.
4630 	 */
4631 	if (BTRFS_I(inode)->outstanding_extents >=
4632 	    BTRFS_I(inode)->reserved_extents)
4633 		return drop_inode_space;
4634 
4635 	dropped_extents = BTRFS_I(inode)->reserved_extents -
4636 		BTRFS_I(inode)->outstanding_extents;
4637 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
4638 	return dropped_extents + drop_inode_space;
4639 }
4640 
4641 /**
4642  * calc_csum_metadata_size - return the amount of metada space that must be
4643  *	reserved/free'd for the given bytes.
4644  * @inode: the inode we're manipulating
4645  * @num_bytes: the number of bytes in question
4646  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4647  *
4648  * This adjusts the number of csum_bytes in the inode and then returns the
4649  * correct amount of metadata that must either be reserved or freed.  We
4650  * calculate how many checksums we can fit into one leaf and then divide the
4651  * number of bytes that will need to be checksumed by this value to figure out
4652  * how many checksums will be required.  If we are adding bytes then the number
4653  * may go up and we will return the number of additional bytes that must be
4654  * reserved.  If it is going down we will return the number of bytes that must
4655  * be freed.
4656  *
4657  * This must be called with BTRFS_I(inode)->lock held.
4658  */
4659 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4660 				   int reserve)
4661 {
4662 	struct btrfs_root *root = BTRFS_I(inode)->root;
4663 	u64 csum_size;
4664 	int num_csums_per_leaf;
4665 	int num_csums;
4666 	int old_csums;
4667 
4668 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4669 	    BTRFS_I(inode)->csum_bytes == 0)
4670 		return 0;
4671 
4672 	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4673 	if (reserve)
4674 		BTRFS_I(inode)->csum_bytes += num_bytes;
4675 	else
4676 		BTRFS_I(inode)->csum_bytes -= num_bytes;
4677 	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4678 	num_csums_per_leaf = (int)div64_u64(csum_size,
4679 					    sizeof(struct btrfs_csum_item) +
4680 					    sizeof(struct btrfs_disk_key));
4681 	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4682 	num_csums = num_csums + num_csums_per_leaf - 1;
4683 	num_csums = num_csums / num_csums_per_leaf;
4684 
4685 	old_csums = old_csums + num_csums_per_leaf - 1;
4686 	old_csums = old_csums / num_csums_per_leaf;
4687 
4688 	/* No change, no need to reserve more */
4689 	if (old_csums == num_csums)
4690 		return 0;
4691 
4692 	if (reserve)
4693 		return btrfs_calc_trans_metadata_size(root,
4694 						      num_csums - old_csums);
4695 
4696 	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4697 }
4698 
4699 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4700 {
4701 	struct btrfs_root *root = BTRFS_I(inode)->root;
4702 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4703 	u64 to_reserve = 0;
4704 	u64 csum_bytes;
4705 	unsigned nr_extents = 0;
4706 	int extra_reserve = 0;
4707 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4708 	int ret = 0;
4709 	bool delalloc_lock = true;
4710 	u64 to_free = 0;
4711 	unsigned dropped;
4712 
4713 	/* If we are a free space inode we need to not flush since we will be in
4714 	 * the middle of a transaction commit.  We also don't need the delalloc
4715 	 * mutex since we won't race with anybody.  We need this mostly to make
4716 	 * lockdep shut its filthy mouth.
4717 	 */
4718 	if (btrfs_is_free_space_inode(inode)) {
4719 		flush = BTRFS_RESERVE_NO_FLUSH;
4720 		delalloc_lock = false;
4721 	}
4722 
4723 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
4724 	    btrfs_transaction_in_commit(root->fs_info))
4725 		schedule_timeout(1);
4726 
4727 	if (delalloc_lock)
4728 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4729 
4730 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4731 
4732 	spin_lock(&BTRFS_I(inode)->lock);
4733 	BTRFS_I(inode)->outstanding_extents++;
4734 
4735 	if (BTRFS_I(inode)->outstanding_extents >
4736 	    BTRFS_I(inode)->reserved_extents)
4737 		nr_extents = BTRFS_I(inode)->outstanding_extents -
4738 			BTRFS_I(inode)->reserved_extents;
4739 
4740 	/*
4741 	 * Add an item to reserve for updating the inode when we complete the
4742 	 * delalloc io.
4743 	 */
4744 	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4745 		      &BTRFS_I(inode)->runtime_flags)) {
4746 		nr_extents++;
4747 		extra_reserve = 1;
4748 	}
4749 
4750 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4751 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4752 	csum_bytes = BTRFS_I(inode)->csum_bytes;
4753 	spin_unlock(&BTRFS_I(inode)->lock);
4754 
4755 	if (root->fs_info->quota_enabled) {
4756 		ret = btrfs_qgroup_reserve(root, num_bytes +
4757 					   nr_extents * root->leafsize);
4758 		if (ret)
4759 			goto out_fail;
4760 	}
4761 
4762 	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4763 	if (unlikely(ret)) {
4764 		if (root->fs_info->quota_enabled)
4765 			btrfs_qgroup_free(root, num_bytes +
4766 						nr_extents * root->leafsize);
4767 		goto out_fail;
4768 	}
4769 
4770 	spin_lock(&BTRFS_I(inode)->lock);
4771 	if (extra_reserve) {
4772 		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4773 			&BTRFS_I(inode)->runtime_flags);
4774 		nr_extents--;
4775 	}
4776 	BTRFS_I(inode)->reserved_extents += nr_extents;
4777 	spin_unlock(&BTRFS_I(inode)->lock);
4778 
4779 	if (delalloc_lock)
4780 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4781 
4782 	if (to_reserve)
4783 		trace_btrfs_space_reservation(root->fs_info,"delalloc",
4784 					      btrfs_ino(inode), to_reserve, 1);
4785 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
4786 
4787 	return 0;
4788 
4789 out_fail:
4790 	spin_lock(&BTRFS_I(inode)->lock);
4791 	dropped = drop_outstanding_extent(inode);
4792 	/*
4793 	 * If the inodes csum_bytes is the same as the original
4794 	 * csum_bytes then we know we haven't raced with any free()ers
4795 	 * so we can just reduce our inodes csum bytes and carry on.
4796 	 * Otherwise we have to do the normal free thing to account for
4797 	 * the case that the free side didn't free up its reserve
4798 	 * because of this outstanding reservation.
4799 	 */
4800 	if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4801 		calc_csum_metadata_size(inode, num_bytes, 0);
4802 	else
4803 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4804 	spin_unlock(&BTRFS_I(inode)->lock);
4805 	if (dropped)
4806 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
4807 
4808 	if (to_free) {
4809 		btrfs_block_rsv_release(root, block_rsv, to_free);
4810 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
4811 					      btrfs_ino(inode), to_free, 0);
4812 	}
4813 	if (delalloc_lock)
4814 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4815 	return ret;
4816 }
4817 
4818 /**
4819  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4820  * @inode: the inode to release the reservation for
4821  * @num_bytes: the number of bytes we're releasing
4822  *
4823  * This will release the metadata reservation for an inode.  This can be called
4824  * once we complete IO for a given set of bytes to release their metadata
4825  * reservations.
4826  */
4827 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4828 {
4829 	struct btrfs_root *root = BTRFS_I(inode)->root;
4830 	u64 to_free = 0;
4831 	unsigned dropped;
4832 
4833 	num_bytes = ALIGN(num_bytes, root->sectorsize);
4834 	spin_lock(&BTRFS_I(inode)->lock);
4835 	dropped = drop_outstanding_extent(inode);
4836 
4837 	if (num_bytes)
4838 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4839 	spin_unlock(&BTRFS_I(inode)->lock);
4840 	if (dropped > 0)
4841 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
4842 
4843 	trace_btrfs_space_reservation(root->fs_info, "delalloc",
4844 				      btrfs_ino(inode), to_free, 0);
4845 	if (root->fs_info->quota_enabled) {
4846 		btrfs_qgroup_free(root, num_bytes +
4847 					dropped * root->leafsize);
4848 	}
4849 
4850 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4851 				to_free);
4852 }
4853 
4854 /**
4855  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4856  * @inode: inode we're writing to
4857  * @num_bytes: the number of bytes we want to allocate
4858  *
4859  * This will do the following things
4860  *
4861  * o reserve space in the data space info for num_bytes
4862  * o reserve space in the metadata space info based on number of outstanding
4863  *   extents and how much csums will be needed
4864  * o add to the inodes ->delalloc_bytes
4865  * o add it to the fs_info's delalloc inodes list.
4866  *
4867  * This will return 0 for success and -ENOSPC if there is no space left.
4868  */
4869 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4870 {
4871 	int ret;
4872 
4873 	ret = btrfs_check_data_free_space(inode, num_bytes);
4874 	if (ret)
4875 		return ret;
4876 
4877 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4878 	if (ret) {
4879 		btrfs_free_reserved_data_space(inode, num_bytes);
4880 		return ret;
4881 	}
4882 
4883 	return 0;
4884 }
4885 
4886 /**
4887  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4888  * @inode: inode we're releasing space for
4889  * @num_bytes: the number of bytes we want to free up
4890  *
4891  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4892  * called in the case that we don't need the metadata AND data reservations
4893  * anymore.  So if there is an error or we insert an inline extent.
4894  *
4895  * This function will release the metadata space that was not used and will
4896  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4897  * list if there are no delalloc bytes left.
4898  */
4899 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4900 {
4901 	btrfs_delalloc_release_metadata(inode, num_bytes);
4902 	btrfs_free_reserved_data_space(inode, num_bytes);
4903 }
4904 
4905 static int update_block_group(struct btrfs_root *root,
4906 			      u64 bytenr, u64 num_bytes, int alloc)
4907 {
4908 	struct btrfs_block_group_cache *cache = NULL;
4909 	struct btrfs_fs_info *info = root->fs_info;
4910 	u64 total = num_bytes;
4911 	u64 old_val;
4912 	u64 byte_in_group;
4913 	int factor;
4914 
4915 	/* block accounting for super block */
4916 	spin_lock(&info->delalloc_lock);
4917 	old_val = btrfs_super_bytes_used(info->super_copy);
4918 	if (alloc)
4919 		old_val += num_bytes;
4920 	else
4921 		old_val -= num_bytes;
4922 	btrfs_set_super_bytes_used(info->super_copy, old_val);
4923 	spin_unlock(&info->delalloc_lock);
4924 
4925 	while (total) {
4926 		cache = btrfs_lookup_block_group(info, bytenr);
4927 		if (!cache)
4928 			return -ENOENT;
4929 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4930 				    BTRFS_BLOCK_GROUP_RAID1 |
4931 				    BTRFS_BLOCK_GROUP_RAID10))
4932 			factor = 2;
4933 		else
4934 			factor = 1;
4935 		/*
4936 		 * If this block group has free space cache written out, we
4937 		 * need to make sure to load it if we are removing space.  This
4938 		 * is because we need the unpinning stage to actually add the
4939 		 * space back to the block group, otherwise we will leak space.
4940 		 */
4941 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
4942 			cache_block_group(cache, 1);
4943 
4944 		byte_in_group = bytenr - cache->key.objectid;
4945 		WARN_ON(byte_in_group > cache->key.offset);
4946 
4947 		spin_lock(&cache->space_info->lock);
4948 		spin_lock(&cache->lock);
4949 
4950 		if (btrfs_test_opt(root, SPACE_CACHE) &&
4951 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
4952 			cache->disk_cache_state = BTRFS_DC_CLEAR;
4953 
4954 		cache->dirty = 1;
4955 		old_val = btrfs_block_group_used(&cache->item);
4956 		num_bytes = min(total, cache->key.offset - byte_in_group);
4957 		if (alloc) {
4958 			old_val += num_bytes;
4959 			btrfs_set_block_group_used(&cache->item, old_val);
4960 			cache->reserved -= num_bytes;
4961 			cache->space_info->bytes_reserved -= num_bytes;
4962 			cache->space_info->bytes_used += num_bytes;
4963 			cache->space_info->disk_used += num_bytes * factor;
4964 			spin_unlock(&cache->lock);
4965 			spin_unlock(&cache->space_info->lock);
4966 		} else {
4967 			old_val -= num_bytes;
4968 			btrfs_set_block_group_used(&cache->item, old_val);
4969 			cache->pinned += num_bytes;
4970 			cache->space_info->bytes_pinned += num_bytes;
4971 			cache->space_info->bytes_used -= num_bytes;
4972 			cache->space_info->disk_used -= num_bytes * factor;
4973 			spin_unlock(&cache->lock);
4974 			spin_unlock(&cache->space_info->lock);
4975 
4976 			set_extent_dirty(info->pinned_extents,
4977 					 bytenr, bytenr + num_bytes - 1,
4978 					 GFP_NOFS | __GFP_NOFAIL);
4979 		}
4980 		btrfs_put_block_group(cache);
4981 		total -= num_bytes;
4982 		bytenr += num_bytes;
4983 	}
4984 	return 0;
4985 }
4986 
4987 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4988 {
4989 	struct btrfs_block_group_cache *cache;
4990 	u64 bytenr;
4991 
4992 	spin_lock(&root->fs_info->block_group_cache_lock);
4993 	bytenr = root->fs_info->first_logical_byte;
4994 	spin_unlock(&root->fs_info->block_group_cache_lock);
4995 
4996 	if (bytenr < (u64)-1)
4997 		return bytenr;
4998 
4999 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5000 	if (!cache)
5001 		return 0;
5002 
5003 	bytenr = cache->key.objectid;
5004 	btrfs_put_block_group(cache);
5005 
5006 	return bytenr;
5007 }
5008 
5009 static int pin_down_extent(struct btrfs_root *root,
5010 			   struct btrfs_block_group_cache *cache,
5011 			   u64 bytenr, u64 num_bytes, int reserved)
5012 {
5013 	spin_lock(&cache->space_info->lock);
5014 	spin_lock(&cache->lock);
5015 	cache->pinned += num_bytes;
5016 	cache->space_info->bytes_pinned += num_bytes;
5017 	if (reserved) {
5018 		cache->reserved -= num_bytes;
5019 		cache->space_info->bytes_reserved -= num_bytes;
5020 	}
5021 	spin_unlock(&cache->lock);
5022 	spin_unlock(&cache->space_info->lock);
5023 
5024 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5025 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5026 	return 0;
5027 }
5028 
5029 /*
5030  * this function must be called within transaction
5031  */
5032 int btrfs_pin_extent(struct btrfs_root *root,
5033 		     u64 bytenr, u64 num_bytes, int reserved)
5034 {
5035 	struct btrfs_block_group_cache *cache;
5036 
5037 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5038 	BUG_ON(!cache); /* Logic error */
5039 
5040 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5041 
5042 	btrfs_put_block_group(cache);
5043 	return 0;
5044 }
5045 
5046 /*
5047  * this function must be called within transaction
5048  */
5049 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5050 				    u64 bytenr, u64 num_bytes)
5051 {
5052 	struct btrfs_block_group_cache *cache;
5053 
5054 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5055 	BUG_ON(!cache); /* Logic error */
5056 
5057 	/*
5058 	 * pull in the free space cache (if any) so that our pin
5059 	 * removes the free space from the cache.  We have load_only set
5060 	 * to one because the slow code to read in the free extents does check
5061 	 * the pinned extents.
5062 	 */
5063 	cache_block_group(cache, 1);
5064 
5065 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
5066 
5067 	/* remove us from the free space cache (if we're there at all) */
5068 	btrfs_remove_free_space(cache, bytenr, num_bytes);
5069 	btrfs_put_block_group(cache);
5070 	return 0;
5071 }
5072 
5073 /**
5074  * btrfs_update_reserved_bytes - update the block_group and space info counters
5075  * @cache:	The cache we are manipulating
5076  * @num_bytes:	The number of bytes in question
5077  * @reserve:	One of the reservation enums
5078  *
5079  * This is called by the allocator when it reserves space, or by somebody who is
5080  * freeing space that was never actually used on disk.  For example if you
5081  * reserve some space for a new leaf in transaction A and before transaction A
5082  * commits you free that leaf, you call this with reserve set to 0 in order to
5083  * clear the reservation.
5084  *
5085  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5086  * ENOSPC accounting.  For data we handle the reservation through clearing the
5087  * delalloc bits in the io_tree.  We have to do this since we could end up
5088  * allocating less disk space for the amount of data we have reserved in the
5089  * case of compression.
5090  *
5091  * If this is a reservation and the block group has become read only we cannot
5092  * make the reservation and return -EAGAIN, otherwise this function always
5093  * succeeds.
5094  */
5095 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5096 				       u64 num_bytes, int reserve)
5097 {
5098 	struct btrfs_space_info *space_info = cache->space_info;
5099 	int ret = 0;
5100 
5101 	spin_lock(&space_info->lock);
5102 	spin_lock(&cache->lock);
5103 	if (reserve != RESERVE_FREE) {
5104 		if (cache->ro) {
5105 			ret = -EAGAIN;
5106 		} else {
5107 			cache->reserved += num_bytes;
5108 			space_info->bytes_reserved += num_bytes;
5109 			if (reserve == RESERVE_ALLOC) {
5110 				trace_btrfs_space_reservation(cache->fs_info,
5111 						"space_info", space_info->flags,
5112 						num_bytes, 0);
5113 				space_info->bytes_may_use -= num_bytes;
5114 			}
5115 		}
5116 	} else {
5117 		if (cache->ro)
5118 			space_info->bytes_readonly += num_bytes;
5119 		cache->reserved -= num_bytes;
5120 		space_info->bytes_reserved -= num_bytes;
5121 		space_info->reservation_progress++;
5122 	}
5123 	spin_unlock(&cache->lock);
5124 	spin_unlock(&space_info->lock);
5125 	return ret;
5126 }
5127 
5128 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5129 				struct btrfs_root *root)
5130 {
5131 	struct btrfs_fs_info *fs_info = root->fs_info;
5132 	struct btrfs_caching_control *next;
5133 	struct btrfs_caching_control *caching_ctl;
5134 	struct btrfs_block_group_cache *cache;
5135 
5136 	down_write(&fs_info->extent_commit_sem);
5137 
5138 	list_for_each_entry_safe(caching_ctl, next,
5139 				 &fs_info->caching_block_groups, list) {
5140 		cache = caching_ctl->block_group;
5141 		if (block_group_cache_done(cache)) {
5142 			cache->last_byte_to_unpin = (u64)-1;
5143 			list_del_init(&caching_ctl->list);
5144 			put_caching_control(caching_ctl);
5145 		} else {
5146 			cache->last_byte_to_unpin = caching_ctl->progress;
5147 		}
5148 	}
5149 
5150 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5151 		fs_info->pinned_extents = &fs_info->freed_extents[1];
5152 	else
5153 		fs_info->pinned_extents = &fs_info->freed_extents[0];
5154 
5155 	up_write(&fs_info->extent_commit_sem);
5156 
5157 	update_global_block_rsv(fs_info);
5158 }
5159 
5160 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5161 {
5162 	struct btrfs_fs_info *fs_info = root->fs_info;
5163 	struct btrfs_block_group_cache *cache = NULL;
5164 	struct btrfs_space_info *space_info;
5165 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5166 	u64 len;
5167 	bool readonly;
5168 
5169 	while (start <= end) {
5170 		readonly = false;
5171 		if (!cache ||
5172 		    start >= cache->key.objectid + cache->key.offset) {
5173 			if (cache)
5174 				btrfs_put_block_group(cache);
5175 			cache = btrfs_lookup_block_group(fs_info, start);
5176 			BUG_ON(!cache); /* Logic error */
5177 		}
5178 
5179 		len = cache->key.objectid + cache->key.offset - start;
5180 		len = min(len, end + 1 - start);
5181 
5182 		if (start < cache->last_byte_to_unpin) {
5183 			len = min(len, cache->last_byte_to_unpin - start);
5184 			btrfs_add_free_space(cache, start, len);
5185 		}
5186 
5187 		start += len;
5188 		space_info = cache->space_info;
5189 
5190 		spin_lock(&space_info->lock);
5191 		spin_lock(&cache->lock);
5192 		cache->pinned -= len;
5193 		space_info->bytes_pinned -= len;
5194 		if (cache->ro) {
5195 			space_info->bytes_readonly += len;
5196 			readonly = true;
5197 		}
5198 		spin_unlock(&cache->lock);
5199 		if (!readonly && global_rsv->space_info == space_info) {
5200 			spin_lock(&global_rsv->lock);
5201 			if (!global_rsv->full) {
5202 				len = min(len, global_rsv->size -
5203 					  global_rsv->reserved);
5204 				global_rsv->reserved += len;
5205 				space_info->bytes_may_use += len;
5206 				if (global_rsv->reserved >= global_rsv->size)
5207 					global_rsv->full = 1;
5208 			}
5209 			spin_unlock(&global_rsv->lock);
5210 		}
5211 		spin_unlock(&space_info->lock);
5212 	}
5213 
5214 	if (cache)
5215 		btrfs_put_block_group(cache);
5216 	return 0;
5217 }
5218 
5219 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5220 			       struct btrfs_root *root)
5221 {
5222 	struct btrfs_fs_info *fs_info = root->fs_info;
5223 	struct extent_io_tree *unpin;
5224 	u64 start;
5225 	u64 end;
5226 	int ret;
5227 
5228 	if (trans->aborted)
5229 		return 0;
5230 
5231 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5232 		unpin = &fs_info->freed_extents[1];
5233 	else
5234 		unpin = &fs_info->freed_extents[0];
5235 
5236 	while (1) {
5237 		ret = find_first_extent_bit(unpin, 0, &start, &end,
5238 					    EXTENT_DIRTY, NULL);
5239 		if (ret)
5240 			break;
5241 
5242 		if (btrfs_test_opt(root, DISCARD))
5243 			ret = btrfs_discard_extent(root, start,
5244 						   end + 1 - start, NULL);
5245 
5246 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
5247 		unpin_extent_range(root, start, end);
5248 		cond_resched();
5249 	}
5250 
5251 	return 0;
5252 }
5253 
5254 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5255 				struct btrfs_root *root,
5256 				u64 bytenr, u64 num_bytes, u64 parent,
5257 				u64 root_objectid, u64 owner_objectid,
5258 				u64 owner_offset, int refs_to_drop,
5259 				struct btrfs_delayed_extent_op *extent_op)
5260 {
5261 	struct btrfs_key key;
5262 	struct btrfs_path *path;
5263 	struct btrfs_fs_info *info = root->fs_info;
5264 	struct btrfs_root *extent_root = info->extent_root;
5265 	struct extent_buffer *leaf;
5266 	struct btrfs_extent_item *ei;
5267 	struct btrfs_extent_inline_ref *iref;
5268 	int ret;
5269 	int is_data;
5270 	int extent_slot = 0;
5271 	int found_extent = 0;
5272 	int num_to_del = 1;
5273 	u32 item_size;
5274 	u64 refs;
5275 
5276 	path = btrfs_alloc_path();
5277 	if (!path)
5278 		return -ENOMEM;
5279 
5280 	path->reada = 1;
5281 	path->leave_spinning = 1;
5282 
5283 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5284 	BUG_ON(!is_data && refs_to_drop != 1);
5285 
5286 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
5287 				    bytenr, num_bytes, parent,
5288 				    root_objectid, owner_objectid,
5289 				    owner_offset);
5290 	if (ret == 0) {
5291 		extent_slot = path->slots[0];
5292 		while (extent_slot >= 0) {
5293 			btrfs_item_key_to_cpu(path->nodes[0], &key,
5294 					      extent_slot);
5295 			if (key.objectid != bytenr)
5296 				break;
5297 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5298 			    key.offset == num_bytes) {
5299 				found_extent = 1;
5300 				break;
5301 			}
5302 			if (path->slots[0] - extent_slot > 5)
5303 				break;
5304 			extent_slot--;
5305 		}
5306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5307 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5308 		if (found_extent && item_size < sizeof(*ei))
5309 			found_extent = 0;
5310 #endif
5311 		if (!found_extent) {
5312 			BUG_ON(iref);
5313 			ret = remove_extent_backref(trans, extent_root, path,
5314 						    NULL, refs_to_drop,
5315 						    is_data);
5316 			if (ret) {
5317 				btrfs_abort_transaction(trans, extent_root, ret);
5318 				goto out;
5319 			}
5320 			btrfs_release_path(path);
5321 			path->leave_spinning = 1;
5322 
5323 			key.objectid = bytenr;
5324 			key.type = BTRFS_EXTENT_ITEM_KEY;
5325 			key.offset = num_bytes;
5326 
5327 			ret = btrfs_search_slot(trans, extent_root,
5328 						&key, path, -1, 1);
5329 			if (ret) {
5330 				printk(KERN_ERR "umm, got %d back from search"
5331 				       ", was looking for %llu\n", ret,
5332 				       (unsigned long long)bytenr);
5333 				if (ret > 0)
5334 					btrfs_print_leaf(extent_root,
5335 							 path->nodes[0]);
5336 			}
5337 			if (ret < 0) {
5338 				btrfs_abort_transaction(trans, extent_root, ret);
5339 				goto out;
5340 			}
5341 			extent_slot = path->slots[0];
5342 		}
5343 	} else if (ret == -ENOENT) {
5344 		btrfs_print_leaf(extent_root, path->nodes[0]);
5345 		WARN_ON(1);
5346 		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5347 		       "parent %llu root %llu  owner %llu offset %llu\n",
5348 		       (unsigned long long)bytenr,
5349 		       (unsigned long long)parent,
5350 		       (unsigned long long)root_objectid,
5351 		       (unsigned long long)owner_objectid,
5352 		       (unsigned long long)owner_offset);
5353 	} else {
5354 		btrfs_abort_transaction(trans, extent_root, ret);
5355 		goto out;
5356 	}
5357 
5358 	leaf = path->nodes[0];
5359 	item_size = btrfs_item_size_nr(leaf, extent_slot);
5360 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5361 	if (item_size < sizeof(*ei)) {
5362 		BUG_ON(found_extent || extent_slot != path->slots[0]);
5363 		ret = convert_extent_item_v0(trans, extent_root, path,
5364 					     owner_objectid, 0);
5365 		if (ret < 0) {
5366 			btrfs_abort_transaction(trans, extent_root, ret);
5367 			goto out;
5368 		}
5369 
5370 		btrfs_release_path(path);
5371 		path->leave_spinning = 1;
5372 
5373 		key.objectid = bytenr;
5374 		key.type = BTRFS_EXTENT_ITEM_KEY;
5375 		key.offset = num_bytes;
5376 
5377 		ret = btrfs_search_slot(trans, extent_root, &key, path,
5378 					-1, 1);
5379 		if (ret) {
5380 			printk(KERN_ERR "umm, got %d back from search"
5381 			       ", was looking for %llu\n", ret,
5382 			       (unsigned long long)bytenr);
5383 			btrfs_print_leaf(extent_root, path->nodes[0]);
5384 		}
5385 		if (ret < 0) {
5386 			btrfs_abort_transaction(trans, extent_root, ret);
5387 			goto out;
5388 		}
5389 
5390 		extent_slot = path->slots[0];
5391 		leaf = path->nodes[0];
5392 		item_size = btrfs_item_size_nr(leaf, extent_slot);
5393 	}
5394 #endif
5395 	BUG_ON(item_size < sizeof(*ei));
5396 	ei = btrfs_item_ptr(leaf, extent_slot,
5397 			    struct btrfs_extent_item);
5398 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5399 		struct btrfs_tree_block_info *bi;
5400 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5401 		bi = (struct btrfs_tree_block_info *)(ei + 1);
5402 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5403 	}
5404 
5405 	refs = btrfs_extent_refs(leaf, ei);
5406 	BUG_ON(refs < refs_to_drop);
5407 	refs -= refs_to_drop;
5408 
5409 	if (refs > 0) {
5410 		if (extent_op)
5411 			__run_delayed_extent_op(extent_op, leaf, ei);
5412 		/*
5413 		 * In the case of inline back ref, reference count will
5414 		 * be updated by remove_extent_backref
5415 		 */
5416 		if (iref) {
5417 			BUG_ON(!found_extent);
5418 		} else {
5419 			btrfs_set_extent_refs(leaf, ei, refs);
5420 			btrfs_mark_buffer_dirty(leaf);
5421 		}
5422 		if (found_extent) {
5423 			ret = remove_extent_backref(trans, extent_root, path,
5424 						    iref, refs_to_drop,
5425 						    is_data);
5426 			if (ret) {
5427 				btrfs_abort_transaction(trans, extent_root, ret);
5428 				goto out;
5429 			}
5430 		}
5431 	} else {
5432 		if (found_extent) {
5433 			BUG_ON(is_data && refs_to_drop !=
5434 			       extent_data_ref_count(root, path, iref));
5435 			if (iref) {
5436 				BUG_ON(path->slots[0] != extent_slot);
5437 			} else {
5438 				BUG_ON(path->slots[0] != extent_slot + 1);
5439 				path->slots[0] = extent_slot;
5440 				num_to_del = 2;
5441 			}
5442 		}
5443 
5444 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5445 				      num_to_del);
5446 		if (ret) {
5447 			btrfs_abort_transaction(trans, extent_root, ret);
5448 			goto out;
5449 		}
5450 		btrfs_release_path(path);
5451 
5452 		if (is_data) {
5453 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5454 			if (ret) {
5455 				btrfs_abort_transaction(trans, extent_root, ret);
5456 				goto out;
5457 			}
5458 		}
5459 
5460 		ret = update_block_group(root, bytenr, num_bytes, 0);
5461 		if (ret) {
5462 			btrfs_abort_transaction(trans, extent_root, ret);
5463 			goto out;
5464 		}
5465 	}
5466 out:
5467 	btrfs_free_path(path);
5468 	return ret;
5469 }
5470 
5471 /*
5472  * when we free an block, it is possible (and likely) that we free the last
5473  * delayed ref for that extent as well.  This searches the delayed ref tree for
5474  * a given extent, and if there are no other delayed refs to be processed, it
5475  * removes it from the tree.
5476  */
5477 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5478 				      struct btrfs_root *root, u64 bytenr)
5479 {
5480 	struct btrfs_delayed_ref_head *head;
5481 	struct btrfs_delayed_ref_root *delayed_refs;
5482 	struct btrfs_delayed_ref_node *ref;
5483 	struct rb_node *node;
5484 	int ret = 0;
5485 
5486 	delayed_refs = &trans->transaction->delayed_refs;
5487 	spin_lock(&delayed_refs->lock);
5488 	head = btrfs_find_delayed_ref_head(trans, bytenr);
5489 	if (!head)
5490 		goto out;
5491 
5492 	node = rb_prev(&head->node.rb_node);
5493 	if (!node)
5494 		goto out;
5495 
5496 	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5497 
5498 	/* there are still entries for this ref, we can't drop it */
5499 	if (ref->bytenr == bytenr)
5500 		goto out;
5501 
5502 	if (head->extent_op) {
5503 		if (!head->must_insert_reserved)
5504 			goto out;
5505 		btrfs_free_delayed_extent_op(head->extent_op);
5506 		head->extent_op = NULL;
5507 	}
5508 
5509 	/*
5510 	 * waiting for the lock here would deadlock.  If someone else has it
5511 	 * locked they are already in the process of dropping it anyway
5512 	 */
5513 	if (!mutex_trylock(&head->mutex))
5514 		goto out;
5515 
5516 	/*
5517 	 * at this point we have a head with no other entries.  Go
5518 	 * ahead and process it.
5519 	 */
5520 	head->node.in_tree = 0;
5521 	rb_erase(&head->node.rb_node, &delayed_refs->root);
5522 
5523 	delayed_refs->num_entries--;
5524 
5525 	/*
5526 	 * we don't take a ref on the node because we're removing it from the
5527 	 * tree, so we just steal the ref the tree was holding.
5528 	 */
5529 	delayed_refs->num_heads--;
5530 	if (list_empty(&head->cluster))
5531 		delayed_refs->num_heads_ready--;
5532 
5533 	list_del_init(&head->cluster);
5534 	spin_unlock(&delayed_refs->lock);
5535 
5536 	BUG_ON(head->extent_op);
5537 	if (head->must_insert_reserved)
5538 		ret = 1;
5539 
5540 	mutex_unlock(&head->mutex);
5541 	btrfs_put_delayed_ref(&head->node);
5542 	return ret;
5543 out:
5544 	spin_unlock(&delayed_refs->lock);
5545 	return 0;
5546 }
5547 
5548 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5549 			   struct btrfs_root *root,
5550 			   struct extent_buffer *buf,
5551 			   u64 parent, int last_ref)
5552 {
5553 	struct btrfs_block_group_cache *cache = NULL;
5554 	int ret;
5555 
5556 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5557 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5558 					buf->start, buf->len,
5559 					parent, root->root_key.objectid,
5560 					btrfs_header_level(buf),
5561 					BTRFS_DROP_DELAYED_REF, NULL, 0);
5562 		BUG_ON(ret); /* -ENOMEM */
5563 	}
5564 
5565 	if (!last_ref)
5566 		return;
5567 
5568 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5569 
5570 	if (btrfs_header_generation(buf) == trans->transid) {
5571 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5572 			ret = check_ref_cleanup(trans, root, buf->start);
5573 			if (!ret)
5574 				goto out;
5575 		}
5576 
5577 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5578 			pin_down_extent(root, cache, buf->start, buf->len, 1);
5579 			goto out;
5580 		}
5581 
5582 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5583 
5584 		btrfs_add_free_space(cache, buf->start, buf->len);
5585 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5586 	}
5587 out:
5588 	/*
5589 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5590 	 * anymore.
5591 	 */
5592 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5593 	btrfs_put_block_group(cache);
5594 }
5595 
5596 /* Can return -ENOMEM */
5597 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5598 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5599 		      u64 owner, u64 offset, int for_cow)
5600 {
5601 	int ret;
5602 	struct btrfs_fs_info *fs_info = root->fs_info;
5603 
5604 	/*
5605 	 * tree log blocks never actually go into the extent allocation
5606 	 * tree, just update pinning info and exit early.
5607 	 */
5608 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5609 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5610 		/* unlocks the pinned mutex */
5611 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
5612 		ret = 0;
5613 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5614 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5615 					num_bytes,
5616 					parent, root_objectid, (int)owner,
5617 					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5618 	} else {
5619 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5620 						num_bytes,
5621 						parent, root_objectid, owner,
5622 						offset, BTRFS_DROP_DELAYED_REF,
5623 						NULL, for_cow);
5624 	}
5625 	return ret;
5626 }
5627 
5628 static u64 stripe_align(struct btrfs_root *root,
5629 			struct btrfs_block_group_cache *cache,
5630 			u64 val, u64 num_bytes)
5631 {
5632 	u64 ret = ALIGN(val, root->stripesize);
5633 	return ret;
5634 }
5635 
5636 /*
5637  * when we wait for progress in the block group caching, its because
5638  * our allocation attempt failed at least once.  So, we must sleep
5639  * and let some progress happen before we try again.
5640  *
5641  * This function will sleep at least once waiting for new free space to
5642  * show up, and then it will check the block group free space numbers
5643  * for our min num_bytes.  Another option is to have it go ahead
5644  * and look in the rbtree for a free extent of a given size, but this
5645  * is a good start.
5646  */
5647 static noinline int
5648 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5649 				u64 num_bytes)
5650 {
5651 	struct btrfs_caching_control *caching_ctl;
5652 
5653 	caching_ctl = get_caching_control(cache);
5654 	if (!caching_ctl)
5655 		return 0;
5656 
5657 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5658 		   (cache->free_space_ctl->free_space >= num_bytes));
5659 
5660 	put_caching_control(caching_ctl);
5661 	return 0;
5662 }
5663 
5664 static noinline int
5665 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5666 {
5667 	struct btrfs_caching_control *caching_ctl;
5668 
5669 	caching_ctl = get_caching_control(cache);
5670 	if (!caching_ctl)
5671 		return 0;
5672 
5673 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
5674 
5675 	put_caching_control(caching_ctl);
5676 	return 0;
5677 }
5678 
5679 int __get_raid_index(u64 flags)
5680 {
5681 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
5682 		return BTRFS_RAID_RAID10;
5683 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5684 		return BTRFS_RAID_RAID1;
5685 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
5686 		return BTRFS_RAID_DUP;
5687 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5688 		return BTRFS_RAID_RAID0;
5689 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5690 		return BTRFS_RAID_RAID5;
5691 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5692 		return BTRFS_RAID_RAID6;
5693 
5694 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5695 }
5696 
5697 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5698 {
5699 	return __get_raid_index(cache->flags);
5700 }
5701 
5702 enum btrfs_loop_type {
5703 	LOOP_CACHING_NOWAIT = 0,
5704 	LOOP_CACHING_WAIT = 1,
5705 	LOOP_ALLOC_CHUNK = 2,
5706 	LOOP_NO_EMPTY_SIZE = 3,
5707 };
5708 
5709 /*
5710  * walks the btree of allocated extents and find a hole of a given size.
5711  * The key ins is changed to record the hole:
5712  * ins->objectid == block start
5713  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5714  * ins->offset == number of blocks
5715  * Any available blocks before search_start are skipped.
5716  */
5717 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5718 				     struct btrfs_root *orig_root,
5719 				     u64 num_bytes, u64 empty_size,
5720 				     u64 hint_byte, struct btrfs_key *ins,
5721 				     u64 data)
5722 {
5723 	int ret = 0;
5724 	struct btrfs_root *root = orig_root->fs_info->extent_root;
5725 	struct btrfs_free_cluster *last_ptr = NULL;
5726 	struct btrfs_block_group_cache *block_group = NULL;
5727 	struct btrfs_block_group_cache *used_block_group;
5728 	u64 search_start = 0;
5729 	int empty_cluster = 2 * 1024 * 1024;
5730 	struct btrfs_space_info *space_info;
5731 	int loop = 0;
5732 	int index = __get_raid_index(data);
5733 	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5734 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5735 	bool found_uncached_bg = false;
5736 	bool failed_cluster_refill = false;
5737 	bool failed_alloc = false;
5738 	bool use_cluster = true;
5739 	bool have_caching_bg = false;
5740 
5741 	WARN_ON(num_bytes < root->sectorsize);
5742 	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5743 	ins->objectid = 0;
5744 	ins->offset = 0;
5745 
5746 	trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5747 
5748 	space_info = __find_space_info(root->fs_info, data);
5749 	if (!space_info) {
5750 		printk(KERN_ERR "No space info for %llu\n", data);
5751 		return -ENOSPC;
5752 	}
5753 
5754 	/*
5755 	 * If the space info is for both data and metadata it means we have a
5756 	 * small filesystem and we can't use the clustering stuff.
5757 	 */
5758 	if (btrfs_mixed_space_info(space_info))
5759 		use_cluster = false;
5760 
5761 	if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5762 		last_ptr = &root->fs_info->meta_alloc_cluster;
5763 		if (!btrfs_test_opt(root, SSD))
5764 			empty_cluster = 64 * 1024;
5765 	}
5766 
5767 	if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5768 	    btrfs_test_opt(root, SSD)) {
5769 		last_ptr = &root->fs_info->data_alloc_cluster;
5770 	}
5771 
5772 	if (last_ptr) {
5773 		spin_lock(&last_ptr->lock);
5774 		if (last_ptr->block_group)
5775 			hint_byte = last_ptr->window_start;
5776 		spin_unlock(&last_ptr->lock);
5777 	}
5778 
5779 	search_start = max(search_start, first_logical_byte(root, 0));
5780 	search_start = max(search_start, hint_byte);
5781 
5782 	if (!last_ptr)
5783 		empty_cluster = 0;
5784 
5785 	if (search_start == hint_byte) {
5786 		block_group = btrfs_lookup_block_group(root->fs_info,
5787 						       search_start);
5788 		used_block_group = block_group;
5789 		/*
5790 		 * we don't want to use the block group if it doesn't match our
5791 		 * allocation bits, or if its not cached.
5792 		 *
5793 		 * However if we are re-searching with an ideal block group
5794 		 * picked out then we don't care that the block group is cached.
5795 		 */
5796 		if (block_group && block_group_bits(block_group, data) &&
5797 		    block_group->cached != BTRFS_CACHE_NO) {
5798 			down_read(&space_info->groups_sem);
5799 			if (list_empty(&block_group->list) ||
5800 			    block_group->ro) {
5801 				/*
5802 				 * someone is removing this block group,
5803 				 * we can't jump into the have_block_group
5804 				 * target because our list pointers are not
5805 				 * valid
5806 				 */
5807 				btrfs_put_block_group(block_group);
5808 				up_read(&space_info->groups_sem);
5809 			} else {
5810 				index = get_block_group_index(block_group);
5811 				goto have_block_group;
5812 			}
5813 		} else if (block_group) {
5814 			btrfs_put_block_group(block_group);
5815 		}
5816 	}
5817 search:
5818 	have_caching_bg = false;
5819 	down_read(&space_info->groups_sem);
5820 	list_for_each_entry(block_group, &space_info->block_groups[index],
5821 			    list) {
5822 		u64 offset;
5823 		int cached;
5824 
5825 		used_block_group = block_group;
5826 		btrfs_get_block_group(block_group);
5827 		search_start = block_group->key.objectid;
5828 
5829 		/*
5830 		 * this can happen if we end up cycling through all the
5831 		 * raid types, but we want to make sure we only allocate
5832 		 * for the proper type.
5833 		 */
5834 		if (!block_group_bits(block_group, data)) {
5835 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
5836 				BTRFS_BLOCK_GROUP_RAID1 |
5837 				BTRFS_BLOCK_GROUP_RAID5 |
5838 				BTRFS_BLOCK_GROUP_RAID6 |
5839 				BTRFS_BLOCK_GROUP_RAID10;
5840 
5841 			/*
5842 			 * if they asked for extra copies and this block group
5843 			 * doesn't provide them, bail.  This does allow us to
5844 			 * fill raid0 from raid1.
5845 			 */
5846 			if ((data & extra) && !(block_group->flags & extra))
5847 				goto loop;
5848 		}
5849 
5850 have_block_group:
5851 		cached = block_group_cache_done(block_group);
5852 		if (unlikely(!cached)) {
5853 			found_uncached_bg = true;
5854 			ret = cache_block_group(block_group, 0);
5855 			BUG_ON(ret < 0);
5856 			ret = 0;
5857 		}
5858 
5859 		if (unlikely(block_group->ro))
5860 			goto loop;
5861 
5862 		/*
5863 		 * Ok we want to try and use the cluster allocator, so
5864 		 * lets look there
5865 		 */
5866 		if (last_ptr) {
5867 			unsigned long aligned_cluster;
5868 			/*
5869 			 * the refill lock keeps out other
5870 			 * people trying to start a new cluster
5871 			 */
5872 			spin_lock(&last_ptr->refill_lock);
5873 			used_block_group = last_ptr->block_group;
5874 			if (used_block_group != block_group &&
5875 			    (!used_block_group ||
5876 			     used_block_group->ro ||
5877 			     !block_group_bits(used_block_group, data))) {
5878 				used_block_group = block_group;
5879 				goto refill_cluster;
5880 			}
5881 
5882 			if (used_block_group != block_group)
5883 				btrfs_get_block_group(used_block_group);
5884 
5885 			offset = btrfs_alloc_from_cluster(used_block_group,
5886 			  last_ptr, num_bytes, used_block_group->key.objectid);
5887 			if (offset) {
5888 				/* we have a block, we're done */
5889 				spin_unlock(&last_ptr->refill_lock);
5890 				trace_btrfs_reserve_extent_cluster(root,
5891 					block_group, search_start, num_bytes);
5892 				goto checks;
5893 			}
5894 
5895 			WARN_ON(last_ptr->block_group != used_block_group);
5896 			if (used_block_group != block_group) {
5897 				btrfs_put_block_group(used_block_group);
5898 				used_block_group = block_group;
5899 			}
5900 refill_cluster:
5901 			BUG_ON(used_block_group != block_group);
5902 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
5903 			 * set up a new clusters, so lets just skip it
5904 			 * and let the allocator find whatever block
5905 			 * it can find.  If we reach this point, we
5906 			 * will have tried the cluster allocator
5907 			 * plenty of times and not have found
5908 			 * anything, so we are likely way too
5909 			 * fragmented for the clustering stuff to find
5910 			 * anything.
5911 			 *
5912 			 * However, if the cluster is taken from the
5913 			 * current block group, release the cluster
5914 			 * first, so that we stand a better chance of
5915 			 * succeeding in the unclustered
5916 			 * allocation.  */
5917 			if (loop >= LOOP_NO_EMPTY_SIZE &&
5918 			    last_ptr->block_group != block_group) {
5919 				spin_unlock(&last_ptr->refill_lock);
5920 				goto unclustered_alloc;
5921 			}
5922 
5923 			/*
5924 			 * this cluster didn't work out, free it and
5925 			 * start over
5926 			 */
5927 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5928 
5929 			if (loop >= LOOP_NO_EMPTY_SIZE) {
5930 				spin_unlock(&last_ptr->refill_lock);
5931 				goto unclustered_alloc;
5932 			}
5933 
5934 			aligned_cluster = max_t(unsigned long,
5935 						empty_cluster + empty_size,
5936 					      block_group->full_stripe_len);
5937 
5938 			/* allocate a cluster in this block group */
5939 			ret = btrfs_find_space_cluster(trans, root,
5940 					       block_group, last_ptr,
5941 					       search_start, num_bytes,
5942 					       aligned_cluster);
5943 			if (ret == 0) {
5944 				/*
5945 				 * now pull our allocation out of this
5946 				 * cluster
5947 				 */
5948 				offset = btrfs_alloc_from_cluster(block_group,
5949 						  last_ptr, num_bytes,
5950 						  search_start);
5951 				if (offset) {
5952 					/* we found one, proceed */
5953 					spin_unlock(&last_ptr->refill_lock);
5954 					trace_btrfs_reserve_extent_cluster(root,
5955 						block_group, search_start,
5956 						num_bytes);
5957 					goto checks;
5958 				}
5959 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
5960 				   && !failed_cluster_refill) {
5961 				spin_unlock(&last_ptr->refill_lock);
5962 
5963 				failed_cluster_refill = true;
5964 				wait_block_group_cache_progress(block_group,
5965 				       num_bytes + empty_cluster + empty_size);
5966 				goto have_block_group;
5967 			}
5968 
5969 			/*
5970 			 * at this point we either didn't find a cluster
5971 			 * or we weren't able to allocate a block from our
5972 			 * cluster.  Free the cluster we've been trying
5973 			 * to use, and go to the next block group
5974 			 */
5975 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
5976 			spin_unlock(&last_ptr->refill_lock);
5977 			goto loop;
5978 		}
5979 
5980 unclustered_alloc:
5981 		spin_lock(&block_group->free_space_ctl->tree_lock);
5982 		if (cached &&
5983 		    block_group->free_space_ctl->free_space <
5984 		    num_bytes + empty_cluster + empty_size) {
5985 			spin_unlock(&block_group->free_space_ctl->tree_lock);
5986 			goto loop;
5987 		}
5988 		spin_unlock(&block_group->free_space_ctl->tree_lock);
5989 
5990 		offset = btrfs_find_space_for_alloc(block_group, search_start,
5991 						    num_bytes, empty_size);
5992 		/*
5993 		 * If we didn't find a chunk, and we haven't failed on this
5994 		 * block group before, and this block group is in the middle of
5995 		 * caching and we are ok with waiting, then go ahead and wait
5996 		 * for progress to be made, and set failed_alloc to true.
5997 		 *
5998 		 * If failed_alloc is true then we've already waited on this
5999 		 * block group once and should move on to the next block group.
6000 		 */
6001 		if (!offset && !failed_alloc && !cached &&
6002 		    loop > LOOP_CACHING_NOWAIT) {
6003 			wait_block_group_cache_progress(block_group,
6004 						num_bytes + empty_size);
6005 			failed_alloc = true;
6006 			goto have_block_group;
6007 		} else if (!offset) {
6008 			if (!cached)
6009 				have_caching_bg = true;
6010 			goto loop;
6011 		}
6012 checks:
6013 		search_start = stripe_align(root, used_block_group,
6014 					    offset, num_bytes);
6015 
6016 		/* move on to the next group */
6017 		if (search_start + num_bytes >
6018 		    used_block_group->key.objectid + used_block_group->key.offset) {
6019 			btrfs_add_free_space(used_block_group, offset, num_bytes);
6020 			goto loop;
6021 		}
6022 
6023 		if (offset < search_start)
6024 			btrfs_add_free_space(used_block_group, offset,
6025 					     search_start - offset);
6026 		BUG_ON(offset > search_start);
6027 
6028 		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6029 						  alloc_type);
6030 		if (ret == -EAGAIN) {
6031 			btrfs_add_free_space(used_block_group, offset, num_bytes);
6032 			goto loop;
6033 		}
6034 
6035 		/* we are all good, lets return */
6036 		ins->objectid = search_start;
6037 		ins->offset = num_bytes;
6038 
6039 		trace_btrfs_reserve_extent(orig_root, block_group,
6040 					   search_start, num_bytes);
6041 		if (used_block_group != block_group)
6042 			btrfs_put_block_group(used_block_group);
6043 		btrfs_put_block_group(block_group);
6044 		break;
6045 loop:
6046 		failed_cluster_refill = false;
6047 		failed_alloc = false;
6048 		BUG_ON(index != get_block_group_index(block_group));
6049 		if (used_block_group != block_group)
6050 			btrfs_put_block_group(used_block_group);
6051 		btrfs_put_block_group(block_group);
6052 	}
6053 	up_read(&space_info->groups_sem);
6054 
6055 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6056 		goto search;
6057 
6058 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6059 		goto search;
6060 
6061 	/*
6062 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6063 	 *			caching kthreads as we move along
6064 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6065 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6066 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6067 	 *			again
6068 	 */
6069 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6070 		index = 0;
6071 		loop++;
6072 		if (loop == LOOP_ALLOC_CHUNK) {
6073 			ret = do_chunk_alloc(trans, root, data,
6074 					     CHUNK_ALLOC_FORCE);
6075 			/*
6076 			 * Do not bail out on ENOSPC since we
6077 			 * can do more things.
6078 			 */
6079 			if (ret < 0 && ret != -ENOSPC) {
6080 				btrfs_abort_transaction(trans,
6081 							root, ret);
6082 				goto out;
6083 			}
6084 		}
6085 
6086 		if (loop == LOOP_NO_EMPTY_SIZE) {
6087 			empty_size = 0;
6088 			empty_cluster = 0;
6089 		}
6090 
6091 		goto search;
6092 	} else if (!ins->objectid) {
6093 		ret = -ENOSPC;
6094 	} else if (ins->objectid) {
6095 		ret = 0;
6096 	}
6097 out:
6098 
6099 	return ret;
6100 }
6101 
6102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6103 			    int dump_block_groups)
6104 {
6105 	struct btrfs_block_group_cache *cache;
6106 	int index = 0;
6107 
6108 	spin_lock(&info->lock);
6109 	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6110 	       (unsigned long long)info->flags,
6111 	       (unsigned long long)(info->total_bytes - info->bytes_used -
6112 				    info->bytes_pinned - info->bytes_reserved -
6113 				    info->bytes_readonly),
6114 	       (info->full) ? "" : "not ");
6115 	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6116 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
6117 	       (unsigned long long)info->total_bytes,
6118 	       (unsigned long long)info->bytes_used,
6119 	       (unsigned long long)info->bytes_pinned,
6120 	       (unsigned long long)info->bytes_reserved,
6121 	       (unsigned long long)info->bytes_may_use,
6122 	       (unsigned long long)info->bytes_readonly);
6123 	spin_unlock(&info->lock);
6124 
6125 	if (!dump_block_groups)
6126 		return;
6127 
6128 	down_read(&info->groups_sem);
6129 again:
6130 	list_for_each_entry(cache, &info->block_groups[index], list) {
6131 		spin_lock(&cache->lock);
6132 		printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6133 		       (unsigned long long)cache->key.objectid,
6134 		       (unsigned long long)cache->key.offset,
6135 		       (unsigned long long)btrfs_block_group_used(&cache->item),
6136 		       (unsigned long long)cache->pinned,
6137 		       (unsigned long long)cache->reserved,
6138 		       cache->ro ? "[readonly]" : "");
6139 		btrfs_dump_free_space(cache, bytes);
6140 		spin_unlock(&cache->lock);
6141 	}
6142 	if (++index < BTRFS_NR_RAID_TYPES)
6143 		goto again;
6144 	up_read(&info->groups_sem);
6145 }
6146 
6147 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6148 			 struct btrfs_root *root,
6149 			 u64 num_bytes, u64 min_alloc_size,
6150 			 u64 empty_size, u64 hint_byte,
6151 			 struct btrfs_key *ins, u64 data)
6152 {
6153 	bool final_tried = false;
6154 	int ret;
6155 
6156 	data = btrfs_get_alloc_profile(root, data);
6157 again:
6158 	WARN_ON(num_bytes < root->sectorsize);
6159 	ret = find_free_extent(trans, root, num_bytes, empty_size,
6160 			       hint_byte, ins, data);
6161 
6162 	if (ret == -ENOSPC) {
6163 		if (!final_tried) {
6164 			num_bytes = num_bytes >> 1;
6165 			num_bytes = round_down(num_bytes, root->sectorsize);
6166 			num_bytes = max(num_bytes, min_alloc_size);
6167 			if (num_bytes == min_alloc_size)
6168 				final_tried = true;
6169 			goto again;
6170 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6171 			struct btrfs_space_info *sinfo;
6172 
6173 			sinfo = __find_space_info(root->fs_info, data);
6174 			printk(KERN_ERR "btrfs allocation failed flags %llu, "
6175 			       "wanted %llu\n", (unsigned long long)data,
6176 			       (unsigned long long)num_bytes);
6177 			if (sinfo)
6178 				dump_space_info(sinfo, num_bytes, 1);
6179 		}
6180 	}
6181 
6182 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6183 
6184 	return ret;
6185 }
6186 
6187 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6188 					u64 start, u64 len, int pin)
6189 {
6190 	struct btrfs_block_group_cache *cache;
6191 	int ret = 0;
6192 
6193 	cache = btrfs_lookup_block_group(root->fs_info, start);
6194 	if (!cache) {
6195 		printk(KERN_ERR "Unable to find block group for %llu\n",
6196 		       (unsigned long long)start);
6197 		return -ENOSPC;
6198 	}
6199 
6200 	if (btrfs_test_opt(root, DISCARD))
6201 		ret = btrfs_discard_extent(root, start, len, NULL);
6202 
6203 	if (pin)
6204 		pin_down_extent(root, cache, start, len, 1);
6205 	else {
6206 		btrfs_add_free_space(cache, start, len);
6207 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6208 	}
6209 	btrfs_put_block_group(cache);
6210 
6211 	trace_btrfs_reserved_extent_free(root, start, len);
6212 
6213 	return ret;
6214 }
6215 
6216 int btrfs_free_reserved_extent(struct btrfs_root *root,
6217 					u64 start, u64 len)
6218 {
6219 	return __btrfs_free_reserved_extent(root, start, len, 0);
6220 }
6221 
6222 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6223 				       u64 start, u64 len)
6224 {
6225 	return __btrfs_free_reserved_extent(root, start, len, 1);
6226 }
6227 
6228 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6229 				      struct btrfs_root *root,
6230 				      u64 parent, u64 root_objectid,
6231 				      u64 flags, u64 owner, u64 offset,
6232 				      struct btrfs_key *ins, int ref_mod)
6233 {
6234 	int ret;
6235 	struct btrfs_fs_info *fs_info = root->fs_info;
6236 	struct btrfs_extent_item *extent_item;
6237 	struct btrfs_extent_inline_ref *iref;
6238 	struct btrfs_path *path;
6239 	struct extent_buffer *leaf;
6240 	int type;
6241 	u32 size;
6242 
6243 	if (parent > 0)
6244 		type = BTRFS_SHARED_DATA_REF_KEY;
6245 	else
6246 		type = BTRFS_EXTENT_DATA_REF_KEY;
6247 
6248 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6249 
6250 	path = btrfs_alloc_path();
6251 	if (!path)
6252 		return -ENOMEM;
6253 
6254 	path->leave_spinning = 1;
6255 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6256 				      ins, size);
6257 	if (ret) {
6258 		btrfs_free_path(path);
6259 		return ret;
6260 	}
6261 
6262 	leaf = path->nodes[0];
6263 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6264 				     struct btrfs_extent_item);
6265 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6266 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6267 	btrfs_set_extent_flags(leaf, extent_item,
6268 			       flags | BTRFS_EXTENT_FLAG_DATA);
6269 
6270 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6271 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
6272 	if (parent > 0) {
6273 		struct btrfs_shared_data_ref *ref;
6274 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
6275 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6276 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6277 	} else {
6278 		struct btrfs_extent_data_ref *ref;
6279 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6280 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6281 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6282 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6283 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6284 	}
6285 
6286 	btrfs_mark_buffer_dirty(path->nodes[0]);
6287 	btrfs_free_path(path);
6288 
6289 	ret = update_block_group(root, ins->objectid, ins->offset, 1);
6290 	if (ret) { /* -ENOENT, logic error */
6291 		printk(KERN_ERR "btrfs update block group failed for %llu "
6292 		       "%llu\n", (unsigned long long)ins->objectid,
6293 		       (unsigned long long)ins->offset);
6294 		BUG();
6295 	}
6296 	return ret;
6297 }
6298 
6299 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6300 				     struct btrfs_root *root,
6301 				     u64 parent, u64 root_objectid,
6302 				     u64 flags, struct btrfs_disk_key *key,
6303 				     int level, struct btrfs_key *ins)
6304 {
6305 	int ret;
6306 	struct btrfs_fs_info *fs_info = root->fs_info;
6307 	struct btrfs_extent_item *extent_item;
6308 	struct btrfs_tree_block_info *block_info;
6309 	struct btrfs_extent_inline_ref *iref;
6310 	struct btrfs_path *path;
6311 	struct extent_buffer *leaf;
6312 	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6313 
6314 	path = btrfs_alloc_path();
6315 	if (!path)
6316 		return -ENOMEM;
6317 
6318 	path->leave_spinning = 1;
6319 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6320 				      ins, size);
6321 	if (ret) {
6322 		btrfs_free_path(path);
6323 		return ret;
6324 	}
6325 
6326 	leaf = path->nodes[0];
6327 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6328 				     struct btrfs_extent_item);
6329 	btrfs_set_extent_refs(leaf, extent_item, 1);
6330 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6331 	btrfs_set_extent_flags(leaf, extent_item,
6332 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6333 	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6334 
6335 	btrfs_set_tree_block_key(leaf, block_info, key);
6336 	btrfs_set_tree_block_level(leaf, block_info, level);
6337 
6338 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6339 	if (parent > 0) {
6340 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6341 		btrfs_set_extent_inline_ref_type(leaf, iref,
6342 						 BTRFS_SHARED_BLOCK_REF_KEY);
6343 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6344 	} else {
6345 		btrfs_set_extent_inline_ref_type(leaf, iref,
6346 						 BTRFS_TREE_BLOCK_REF_KEY);
6347 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6348 	}
6349 
6350 	btrfs_mark_buffer_dirty(leaf);
6351 	btrfs_free_path(path);
6352 
6353 	ret = update_block_group(root, ins->objectid, ins->offset, 1);
6354 	if (ret) { /* -ENOENT, logic error */
6355 		printk(KERN_ERR "btrfs update block group failed for %llu "
6356 		       "%llu\n", (unsigned long long)ins->objectid,
6357 		       (unsigned long long)ins->offset);
6358 		BUG();
6359 	}
6360 	return ret;
6361 }
6362 
6363 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6364 				     struct btrfs_root *root,
6365 				     u64 root_objectid, u64 owner,
6366 				     u64 offset, struct btrfs_key *ins)
6367 {
6368 	int ret;
6369 
6370 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6371 
6372 	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6373 					 ins->offset, 0,
6374 					 root_objectid, owner, offset,
6375 					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6376 	return ret;
6377 }
6378 
6379 /*
6380  * this is used by the tree logging recovery code.  It records that
6381  * an extent has been allocated and makes sure to clear the free
6382  * space cache bits as well
6383  */
6384 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6385 				   struct btrfs_root *root,
6386 				   u64 root_objectid, u64 owner, u64 offset,
6387 				   struct btrfs_key *ins)
6388 {
6389 	int ret;
6390 	struct btrfs_block_group_cache *block_group;
6391 	struct btrfs_caching_control *caching_ctl;
6392 	u64 start = ins->objectid;
6393 	u64 num_bytes = ins->offset;
6394 
6395 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6396 	cache_block_group(block_group, 0);
6397 	caching_ctl = get_caching_control(block_group);
6398 
6399 	if (!caching_ctl) {
6400 		BUG_ON(!block_group_cache_done(block_group));
6401 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
6402 		BUG_ON(ret); /* -ENOMEM */
6403 	} else {
6404 		mutex_lock(&caching_ctl->mutex);
6405 
6406 		if (start >= caching_ctl->progress) {
6407 			ret = add_excluded_extent(root, start, num_bytes);
6408 			BUG_ON(ret); /* -ENOMEM */
6409 		} else if (start + num_bytes <= caching_ctl->progress) {
6410 			ret = btrfs_remove_free_space(block_group,
6411 						      start, num_bytes);
6412 			BUG_ON(ret); /* -ENOMEM */
6413 		} else {
6414 			num_bytes = caching_ctl->progress - start;
6415 			ret = btrfs_remove_free_space(block_group,
6416 						      start, num_bytes);
6417 			BUG_ON(ret); /* -ENOMEM */
6418 
6419 			start = caching_ctl->progress;
6420 			num_bytes = ins->objectid + ins->offset -
6421 				    caching_ctl->progress;
6422 			ret = add_excluded_extent(root, start, num_bytes);
6423 			BUG_ON(ret); /* -ENOMEM */
6424 		}
6425 
6426 		mutex_unlock(&caching_ctl->mutex);
6427 		put_caching_control(caching_ctl);
6428 	}
6429 
6430 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6431 					  RESERVE_ALLOC_NO_ACCOUNT);
6432 	BUG_ON(ret); /* logic error */
6433 	btrfs_put_block_group(block_group);
6434 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6435 					 0, owner, offset, ins, 1);
6436 	return ret;
6437 }
6438 
6439 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6440 					    struct btrfs_root *root,
6441 					    u64 bytenr, u32 blocksize,
6442 					    int level)
6443 {
6444 	struct extent_buffer *buf;
6445 
6446 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6447 	if (!buf)
6448 		return ERR_PTR(-ENOMEM);
6449 	btrfs_set_header_generation(buf, trans->transid);
6450 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6451 	btrfs_tree_lock(buf);
6452 	clean_tree_block(trans, root, buf);
6453 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6454 
6455 	btrfs_set_lock_blocking(buf);
6456 	btrfs_set_buffer_uptodate(buf);
6457 
6458 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6459 		/*
6460 		 * we allow two log transactions at a time, use different
6461 		 * EXENT bit to differentiate dirty pages.
6462 		 */
6463 		if (root->log_transid % 2 == 0)
6464 			set_extent_dirty(&root->dirty_log_pages, buf->start,
6465 					buf->start + buf->len - 1, GFP_NOFS);
6466 		else
6467 			set_extent_new(&root->dirty_log_pages, buf->start,
6468 					buf->start + buf->len - 1, GFP_NOFS);
6469 	} else {
6470 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6471 			 buf->start + buf->len - 1, GFP_NOFS);
6472 	}
6473 	trans->blocks_used++;
6474 	/* this returns a buffer locked for blocking */
6475 	return buf;
6476 }
6477 
6478 static struct btrfs_block_rsv *
6479 use_block_rsv(struct btrfs_trans_handle *trans,
6480 	      struct btrfs_root *root, u32 blocksize)
6481 {
6482 	struct btrfs_block_rsv *block_rsv;
6483 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6484 	int ret;
6485 
6486 	block_rsv = get_block_rsv(trans, root);
6487 
6488 	if (block_rsv->size == 0) {
6489 		ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6490 					     BTRFS_RESERVE_NO_FLUSH);
6491 		/*
6492 		 * If we couldn't reserve metadata bytes try and use some from
6493 		 * the global reserve.
6494 		 */
6495 		if (ret && block_rsv != global_rsv) {
6496 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6497 			if (!ret)
6498 				return global_rsv;
6499 			return ERR_PTR(ret);
6500 		} else if (ret) {
6501 			return ERR_PTR(ret);
6502 		}
6503 		return block_rsv;
6504 	}
6505 
6506 	ret = block_rsv_use_bytes(block_rsv, blocksize);
6507 	if (!ret)
6508 		return block_rsv;
6509 	if (ret && !block_rsv->failfast) {
6510 		if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6511 			static DEFINE_RATELIMIT_STATE(_rs,
6512 					DEFAULT_RATELIMIT_INTERVAL * 10,
6513 					/*DEFAULT_RATELIMIT_BURST*/ 1);
6514 			if (__ratelimit(&_rs))
6515 				WARN(1, KERN_DEBUG
6516 					"btrfs: block rsv returned %d\n", ret);
6517 		}
6518 		ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6519 					     BTRFS_RESERVE_NO_FLUSH);
6520 		if (!ret) {
6521 			return block_rsv;
6522 		} else if (ret && block_rsv != global_rsv) {
6523 			ret = block_rsv_use_bytes(global_rsv, blocksize);
6524 			if (!ret)
6525 				return global_rsv;
6526 		}
6527 	}
6528 
6529 	return ERR_PTR(-ENOSPC);
6530 }
6531 
6532 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6533 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
6534 {
6535 	block_rsv_add_bytes(block_rsv, blocksize, 0);
6536 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6537 }
6538 
6539 /*
6540  * finds a free extent and does all the dirty work required for allocation
6541  * returns the key for the extent through ins, and a tree buffer for
6542  * the first block of the extent through buf.
6543  *
6544  * returns the tree buffer or NULL.
6545  */
6546 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6547 					struct btrfs_root *root, u32 blocksize,
6548 					u64 parent, u64 root_objectid,
6549 					struct btrfs_disk_key *key, int level,
6550 					u64 hint, u64 empty_size)
6551 {
6552 	struct btrfs_key ins;
6553 	struct btrfs_block_rsv *block_rsv;
6554 	struct extent_buffer *buf;
6555 	u64 flags = 0;
6556 	int ret;
6557 
6558 
6559 	block_rsv = use_block_rsv(trans, root, blocksize);
6560 	if (IS_ERR(block_rsv))
6561 		return ERR_CAST(block_rsv);
6562 
6563 	ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6564 				   empty_size, hint, &ins, 0);
6565 	if (ret) {
6566 		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6567 		return ERR_PTR(ret);
6568 	}
6569 
6570 	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6571 				    blocksize, level);
6572 	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6573 
6574 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6575 		if (parent == 0)
6576 			parent = ins.objectid;
6577 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6578 	} else
6579 		BUG_ON(parent > 0);
6580 
6581 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6582 		struct btrfs_delayed_extent_op *extent_op;
6583 		extent_op = btrfs_alloc_delayed_extent_op();
6584 		BUG_ON(!extent_op); /* -ENOMEM */
6585 		if (key)
6586 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
6587 		else
6588 			memset(&extent_op->key, 0, sizeof(extent_op->key));
6589 		extent_op->flags_to_set = flags;
6590 		extent_op->update_key = 1;
6591 		extent_op->update_flags = 1;
6592 		extent_op->is_data = 0;
6593 
6594 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6595 					ins.objectid,
6596 					ins.offset, parent, root_objectid,
6597 					level, BTRFS_ADD_DELAYED_EXTENT,
6598 					extent_op, 0);
6599 		BUG_ON(ret); /* -ENOMEM */
6600 	}
6601 	return buf;
6602 }
6603 
6604 struct walk_control {
6605 	u64 refs[BTRFS_MAX_LEVEL];
6606 	u64 flags[BTRFS_MAX_LEVEL];
6607 	struct btrfs_key update_progress;
6608 	int stage;
6609 	int level;
6610 	int shared_level;
6611 	int update_ref;
6612 	int keep_locks;
6613 	int reada_slot;
6614 	int reada_count;
6615 	int for_reloc;
6616 };
6617 
6618 #define DROP_REFERENCE	1
6619 #define UPDATE_BACKREF	2
6620 
6621 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6622 				     struct btrfs_root *root,
6623 				     struct walk_control *wc,
6624 				     struct btrfs_path *path)
6625 {
6626 	u64 bytenr;
6627 	u64 generation;
6628 	u64 refs;
6629 	u64 flags;
6630 	u32 nritems;
6631 	u32 blocksize;
6632 	struct btrfs_key key;
6633 	struct extent_buffer *eb;
6634 	int ret;
6635 	int slot;
6636 	int nread = 0;
6637 
6638 	if (path->slots[wc->level] < wc->reada_slot) {
6639 		wc->reada_count = wc->reada_count * 2 / 3;
6640 		wc->reada_count = max(wc->reada_count, 2);
6641 	} else {
6642 		wc->reada_count = wc->reada_count * 3 / 2;
6643 		wc->reada_count = min_t(int, wc->reada_count,
6644 					BTRFS_NODEPTRS_PER_BLOCK(root));
6645 	}
6646 
6647 	eb = path->nodes[wc->level];
6648 	nritems = btrfs_header_nritems(eb);
6649 	blocksize = btrfs_level_size(root, wc->level - 1);
6650 
6651 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6652 		if (nread >= wc->reada_count)
6653 			break;
6654 
6655 		cond_resched();
6656 		bytenr = btrfs_node_blockptr(eb, slot);
6657 		generation = btrfs_node_ptr_generation(eb, slot);
6658 
6659 		if (slot == path->slots[wc->level])
6660 			goto reada;
6661 
6662 		if (wc->stage == UPDATE_BACKREF &&
6663 		    generation <= root->root_key.offset)
6664 			continue;
6665 
6666 		/* We don't lock the tree block, it's OK to be racy here */
6667 		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6668 					       &refs, &flags);
6669 		/* We don't care about errors in readahead. */
6670 		if (ret < 0)
6671 			continue;
6672 		BUG_ON(refs == 0);
6673 
6674 		if (wc->stage == DROP_REFERENCE) {
6675 			if (refs == 1)
6676 				goto reada;
6677 
6678 			if (wc->level == 1 &&
6679 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6680 				continue;
6681 			if (!wc->update_ref ||
6682 			    generation <= root->root_key.offset)
6683 				continue;
6684 			btrfs_node_key_to_cpu(eb, &key, slot);
6685 			ret = btrfs_comp_cpu_keys(&key,
6686 						  &wc->update_progress);
6687 			if (ret < 0)
6688 				continue;
6689 		} else {
6690 			if (wc->level == 1 &&
6691 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6692 				continue;
6693 		}
6694 reada:
6695 		ret = readahead_tree_block(root, bytenr, blocksize,
6696 					   generation);
6697 		if (ret)
6698 			break;
6699 		nread++;
6700 	}
6701 	wc->reada_slot = slot;
6702 }
6703 
6704 /*
6705  * helper to process tree block while walking down the tree.
6706  *
6707  * when wc->stage == UPDATE_BACKREF, this function updates
6708  * back refs for pointers in the block.
6709  *
6710  * NOTE: return value 1 means we should stop walking down.
6711  */
6712 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6713 				   struct btrfs_root *root,
6714 				   struct btrfs_path *path,
6715 				   struct walk_control *wc, int lookup_info)
6716 {
6717 	int level = wc->level;
6718 	struct extent_buffer *eb = path->nodes[level];
6719 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6720 	int ret;
6721 
6722 	if (wc->stage == UPDATE_BACKREF &&
6723 	    btrfs_header_owner(eb) != root->root_key.objectid)
6724 		return 1;
6725 
6726 	/*
6727 	 * when reference count of tree block is 1, it won't increase
6728 	 * again. once full backref flag is set, we never clear it.
6729 	 */
6730 	if (lookup_info &&
6731 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6732 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6733 		BUG_ON(!path->locks[level]);
6734 		ret = btrfs_lookup_extent_info(trans, root,
6735 					       eb->start, eb->len,
6736 					       &wc->refs[level],
6737 					       &wc->flags[level]);
6738 		BUG_ON(ret == -ENOMEM);
6739 		if (ret)
6740 			return ret;
6741 		BUG_ON(wc->refs[level] == 0);
6742 	}
6743 
6744 	if (wc->stage == DROP_REFERENCE) {
6745 		if (wc->refs[level] > 1)
6746 			return 1;
6747 
6748 		if (path->locks[level] && !wc->keep_locks) {
6749 			btrfs_tree_unlock_rw(eb, path->locks[level]);
6750 			path->locks[level] = 0;
6751 		}
6752 		return 0;
6753 	}
6754 
6755 	/* wc->stage == UPDATE_BACKREF */
6756 	if (!(wc->flags[level] & flag)) {
6757 		BUG_ON(!path->locks[level]);
6758 		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6759 		BUG_ON(ret); /* -ENOMEM */
6760 		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6761 		BUG_ON(ret); /* -ENOMEM */
6762 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6763 						  eb->len, flag, 0);
6764 		BUG_ON(ret); /* -ENOMEM */
6765 		wc->flags[level] |= flag;
6766 	}
6767 
6768 	/*
6769 	 * the block is shared by multiple trees, so it's not good to
6770 	 * keep the tree lock
6771 	 */
6772 	if (path->locks[level] && level > 0) {
6773 		btrfs_tree_unlock_rw(eb, path->locks[level]);
6774 		path->locks[level] = 0;
6775 	}
6776 	return 0;
6777 }
6778 
6779 /*
6780  * helper to process tree block pointer.
6781  *
6782  * when wc->stage == DROP_REFERENCE, this function checks
6783  * reference count of the block pointed to. if the block
6784  * is shared and we need update back refs for the subtree
6785  * rooted at the block, this function changes wc->stage to
6786  * UPDATE_BACKREF. if the block is shared and there is no
6787  * need to update back, this function drops the reference
6788  * to the block.
6789  *
6790  * NOTE: return value 1 means we should stop walking down.
6791  */
6792 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6793 				 struct btrfs_root *root,
6794 				 struct btrfs_path *path,
6795 				 struct walk_control *wc, int *lookup_info)
6796 {
6797 	u64 bytenr;
6798 	u64 generation;
6799 	u64 parent;
6800 	u32 blocksize;
6801 	struct btrfs_key key;
6802 	struct extent_buffer *next;
6803 	int level = wc->level;
6804 	int reada = 0;
6805 	int ret = 0;
6806 
6807 	generation = btrfs_node_ptr_generation(path->nodes[level],
6808 					       path->slots[level]);
6809 	/*
6810 	 * if the lower level block was created before the snapshot
6811 	 * was created, we know there is no need to update back refs
6812 	 * for the subtree
6813 	 */
6814 	if (wc->stage == UPDATE_BACKREF &&
6815 	    generation <= root->root_key.offset) {
6816 		*lookup_info = 1;
6817 		return 1;
6818 	}
6819 
6820 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6821 	blocksize = btrfs_level_size(root, level - 1);
6822 
6823 	next = btrfs_find_tree_block(root, bytenr, blocksize);
6824 	if (!next) {
6825 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6826 		if (!next)
6827 			return -ENOMEM;
6828 		reada = 1;
6829 	}
6830 	btrfs_tree_lock(next);
6831 	btrfs_set_lock_blocking(next);
6832 
6833 	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6834 				       &wc->refs[level - 1],
6835 				       &wc->flags[level - 1]);
6836 	if (ret < 0) {
6837 		btrfs_tree_unlock(next);
6838 		return ret;
6839 	}
6840 
6841 	BUG_ON(wc->refs[level - 1] == 0);
6842 	*lookup_info = 0;
6843 
6844 	if (wc->stage == DROP_REFERENCE) {
6845 		if (wc->refs[level - 1] > 1) {
6846 			if (level == 1 &&
6847 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6848 				goto skip;
6849 
6850 			if (!wc->update_ref ||
6851 			    generation <= root->root_key.offset)
6852 				goto skip;
6853 
6854 			btrfs_node_key_to_cpu(path->nodes[level], &key,
6855 					      path->slots[level]);
6856 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6857 			if (ret < 0)
6858 				goto skip;
6859 
6860 			wc->stage = UPDATE_BACKREF;
6861 			wc->shared_level = level - 1;
6862 		}
6863 	} else {
6864 		if (level == 1 &&
6865 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6866 			goto skip;
6867 	}
6868 
6869 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
6870 		btrfs_tree_unlock(next);
6871 		free_extent_buffer(next);
6872 		next = NULL;
6873 		*lookup_info = 1;
6874 	}
6875 
6876 	if (!next) {
6877 		if (reada && level == 1)
6878 			reada_walk_down(trans, root, wc, path);
6879 		next = read_tree_block(root, bytenr, blocksize, generation);
6880 		if (!next)
6881 			return -EIO;
6882 		btrfs_tree_lock(next);
6883 		btrfs_set_lock_blocking(next);
6884 	}
6885 
6886 	level--;
6887 	BUG_ON(level != btrfs_header_level(next));
6888 	path->nodes[level] = next;
6889 	path->slots[level] = 0;
6890 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6891 	wc->level = level;
6892 	if (wc->level == 1)
6893 		wc->reada_slot = 0;
6894 	return 0;
6895 skip:
6896 	wc->refs[level - 1] = 0;
6897 	wc->flags[level - 1] = 0;
6898 	if (wc->stage == DROP_REFERENCE) {
6899 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6900 			parent = path->nodes[level]->start;
6901 		} else {
6902 			BUG_ON(root->root_key.objectid !=
6903 			       btrfs_header_owner(path->nodes[level]));
6904 			parent = 0;
6905 		}
6906 
6907 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6908 				root->root_key.objectid, level - 1, 0, 0);
6909 		BUG_ON(ret); /* -ENOMEM */
6910 	}
6911 	btrfs_tree_unlock(next);
6912 	free_extent_buffer(next);
6913 	*lookup_info = 1;
6914 	return 1;
6915 }
6916 
6917 /*
6918  * helper to process tree block while walking up the tree.
6919  *
6920  * when wc->stage == DROP_REFERENCE, this function drops
6921  * reference count on the block.
6922  *
6923  * when wc->stage == UPDATE_BACKREF, this function changes
6924  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6925  * to UPDATE_BACKREF previously while processing the block.
6926  *
6927  * NOTE: return value 1 means we should stop walking up.
6928  */
6929 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6930 				 struct btrfs_root *root,
6931 				 struct btrfs_path *path,
6932 				 struct walk_control *wc)
6933 {
6934 	int ret;
6935 	int level = wc->level;
6936 	struct extent_buffer *eb = path->nodes[level];
6937 	u64 parent = 0;
6938 
6939 	if (wc->stage == UPDATE_BACKREF) {
6940 		BUG_ON(wc->shared_level < level);
6941 		if (level < wc->shared_level)
6942 			goto out;
6943 
6944 		ret = find_next_key(path, level + 1, &wc->update_progress);
6945 		if (ret > 0)
6946 			wc->update_ref = 0;
6947 
6948 		wc->stage = DROP_REFERENCE;
6949 		wc->shared_level = -1;
6950 		path->slots[level] = 0;
6951 
6952 		/*
6953 		 * check reference count again if the block isn't locked.
6954 		 * we should start walking down the tree again if reference
6955 		 * count is one.
6956 		 */
6957 		if (!path->locks[level]) {
6958 			BUG_ON(level == 0);
6959 			btrfs_tree_lock(eb);
6960 			btrfs_set_lock_blocking(eb);
6961 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6962 
6963 			ret = btrfs_lookup_extent_info(trans, root,
6964 						       eb->start, eb->len,
6965 						       &wc->refs[level],
6966 						       &wc->flags[level]);
6967 			if (ret < 0) {
6968 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6969 				path->locks[level] = 0;
6970 				return ret;
6971 			}
6972 			BUG_ON(wc->refs[level] == 0);
6973 			if (wc->refs[level] == 1) {
6974 				btrfs_tree_unlock_rw(eb, path->locks[level]);
6975 				path->locks[level] = 0;
6976 				return 1;
6977 			}
6978 		}
6979 	}
6980 
6981 	/* wc->stage == DROP_REFERENCE */
6982 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6983 
6984 	if (wc->refs[level] == 1) {
6985 		if (level == 0) {
6986 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6987 				ret = btrfs_dec_ref(trans, root, eb, 1,
6988 						    wc->for_reloc);
6989 			else
6990 				ret = btrfs_dec_ref(trans, root, eb, 0,
6991 						    wc->for_reloc);
6992 			BUG_ON(ret); /* -ENOMEM */
6993 		}
6994 		/* make block locked assertion in clean_tree_block happy */
6995 		if (!path->locks[level] &&
6996 		    btrfs_header_generation(eb) == trans->transid) {
6997 			btrfs_tree_lock(eb);
6998 			btrfs_set_lock_blocking(eb);
6999 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7000 		}
7001 		clean_tree_block(trans, root, eb);
7002 	}
7003 
7004 	if (eb == root->node) {
7005 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7006 			parent = eb->start;
7007 		else
7008 			BUG_ON(root->root_key.objectid !=
7009 			       btrfs_header_owner(eb));
7010 	} else {
7011 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7012 			parent = path->nodes[level + 1]->start;
7013 		else
7014 			BUG_ON(root->root_key.objectid !=
7015 			       btrfs_header_owner(path->nodes[level + 1]));
7016 	}
7017 
7018 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7019 out:
7020 	wc->refs[level] = 0;
7021 	wc->flags[level] = 0;
7022 	return 0;
7023 }
7024 
7025 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7026 				   struct btrfs_root *root,
7027 				   struct btrfs_path *path,
7028 				   struct walk_control *wc)
7029 {
7030 	int level = wc->level;
7031 	int lookup_info = 1;
7032 	int ret;
7033 
7034 	while (level >= 0) {
7035 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
7036 		if (ret > 0)
7037 			break;
7038 
7039 		if (level == 0)
7040 			break;
7041 
7042 		if (path->slots[level] >=
7043 		    btrfs_header_nritems(path->nodes[level]))
7044 			break;
7045 
7046 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
7047 		if (ret > 0) {
7048 			path->slots[level]++;
7049 			continue;
7050 		} else if (ret < 0)
7051 			return ret;
7052 		level = wc->level;
7053 	}
7054 	return 0;
7055 }
7056 
7057 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7058 				 struct btrfs_root *root,
7059 				 struct btrfs_path *path,
7060 				 struct walk_control *wc, int max_level)
7061 {
7062 	int level = wc->level;
7063 	int ret;
7064 
7065 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7066 	while (level < max_level && path->nodes[level]) {
7067 		wc->level = level;
7068 		if (path->slots[level] + 1 <
7069 		    btrfs_header_nritems(path->nodes[level])) {
7070 			path->slots[level]++;
7071 			return 0;
7072 		} else {
7073 			ret = walk_up_proc(trans, root, path, wc);
7074 			if (ret > 0)
7075 				return 0;
7076 
7077 			if (path->locks[level]) {
7078 				btrfs_tree_unlock_rw(path->nodes[level],
7079 						     path->locks[level]);
7080 				path->locks[level] = 0;
7081 			}
7082 			free_extent_buffer(path->nodes[level]);
7083 			path->nodes[level] = NULL;
7084 			level++;
7085 		}
7086 	}
7087 	return 1;
7088 }
7089 
7090 /*
7091  * drop a subvolume tree.
7092  *
7093  * this function traverses the tree freeing any blocks that only
7094  * referenced by the tree.
7095  *
7096  * when a shared tree block is found. this function decreases its
7097  * reference count by one. if update_ref is true, this function
7098  * also make sure backrefs for the shared block and all lower level
7099  * blocks are properly updated.
7100  */
7101 int btrfs_drop_snapshot(struct btrfs_root *root,
7102 			 struct btrfs_block_rsv *block_rsv, int update_ref,
7103 			 int for_reloc)
7104 {
7105 	struct btrfs_path *path;
7106 	struct btrfs_trans_handle *trans;
7107 	struct btrfs_root *tree_root = root->fs_info->tree_root;
7108 	struct btrfs_root_item *root_item = &root->root_item;
7109 	struct walk_control *wc;
7110 	struct btrfs_key key;
7111 	int err = 0;
7112 	int ret;
7113 	int level;
7114 
7115 	path = btrfs_alloc_path();
7116 	if (!path) {
7117 		err = -ENOMEM;
7118 		goto out;
7119 	}
7120 
7121 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
7122 	if (!wc) {
7123 		btrfs_free_path(path);
7124 		err = -ENOMEM;
7125 		goto out;
7126 	}
7127 
7128 	trans = btrfs_start_transaction(tree_root, 0);
7129 	if (IS_ERR(trans)) {
7130 		err = PTR_ERR(trans);
7131 		goto out_free;
7132 	}
7133 
7134 	if (block_rsv)
7135 		trans->block_rsv = block_rsv;
7136 
7137 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7138 		level = btrfs_header_level(root->node);
7139 		path->nodes[level] = btrfs_lock_root_node(root);
7140 		btrfs_set_lock_blocking(path->nodes[level]);
7141 		path->slots[level] = 0;
7142 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7143 		memset(&wc->update_progress, 0,
7144 		       sizeof(wc->update_progress));
7145 	} else {
7146 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7147 		memcpy(&wc->update_progress, &key,
7148 		       sizeof(wc->update_progress));
7149 
7150 		level = root_item->drop_level;
7151 		BUG_ON(level == 0);
7152 		path->lowest_level = level;
7153 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7154 		path->lowest_level = 0;
7155 		if (ret < 0) {
7156 			err = ret;
7157 			goto out_end_trans;
7158 		}
7159 		WARN_ON(ret > 0);
7160 
7161 		/*
7162 		 * unlock our path, this is safe because only this
7163 		 * function is allowed to delete this snapshot
7164 		 */
7165 		btrfs_unlock_up_safe(path, 0);
7166 
7167 		level = btrfs_header_level(root->node);
7168 		while (1) {
7169 			btrfs_tree_lock(path->nodes[level]);
7170 			btrfs_set_lock_blocking(path->nodes[level]);
7171 
7172 			ret = btrfs_lookup_extent_info(trans, root,
7173 						path->nodes[level]->start,
7174 						path->nodes[level]->len,
7175 						&wc->refs[level],
7176 						&wc->flags[level]);
7177 			if (ret < 0) {
7178 				err = ret;
7179 				goto out_end_trans;
7180 			}
7181 			BUG_ON(wc->refs[level] == 0);
7182 
7183 			if (level == root_item->drop_level)
7184 				break;
7185 
7186 			btrfs_tree_unlock(path->nodes[level]);
7187 			WARN_ON(wc->refs[level] != 1);
7188 			level--;
7189 		}
7190 	}
7191 
7192 	wc->level = level;
7193 	wc->shared_level = -1;
7194 	wc->stage = DROP_REFERENCE;
7195 	wc->update_ref = update_ref;
7196 	wc->keep_locks = 0;
7197 	wc->for_reloc = for_reloc;
7198 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7199 
7200 	while (1) {
7201 		ret = walk_down_tree(trans, root, path, wc);
7202 		if (ret < 0) {
7203 			err = ret;
7204 			break;
7205 		}
7206 
7207 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7208 		if (ret < 0) {
7209 			err = ret;
7210 			break;
7211 		}
7212 
7213 		if (ret > 0) {
7214 			BUG_ON(wc->stage != DROP_REFERENCE);
7215 			break;
7216 		}
7217 
7218 		if (wc->stage == DROP_REFERENCE) {
7219 			level = wc->level;
7220 			btrfs_node_key(path->nodes[level],
7221 				       &root_item->drop_progress,
7222 				       path->slots[level]);
7223 			root_item->drop_level = level;
7224 		}
7225 
7226 		BUG_ON(wc->level == 0);
7227 		if (btrfs_should_end_transaction(trans, tree_root)) {
7228 			ret = btrfs_update_root(trans, tree_root,
7229 						&root->root_key,
7230 						root_item);
7231 			if (ret) {
7232 				btrfs_abort_transaction(trans, tree_root, ret);
7233 				err = ret;
7234 				goto out_end_trans;
7235 			}
7236 
7237 			btrfs_end_transaction_throttle(trans, tree_root);
7238 			trans = btrfs_start_transaction(tree_root, 0);
7239 			if (IS_ERR(trans)) {
7240 				err = PTR_ERR(trans);
7241 				goto out_free;
7242 			}
7243 			if (block_rsv)
7244 				trans->block_rsv = block_rsv;
7245 		}
7246 	}
7247 	btrfs_release_path(path);
7248 	if (err)
7249 		goto out_end_trans;
7250 
7251 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
7252 	if (ret) {
7253 		btrfs_abort_transaction(trans, tree_root, ret);
7254 		goto out_end_trans;
7255 	}
7256 
7257 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7258 		ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7259 					   NULL, NULL);
7260 		if (ret < 0) {
7261 			btrfs_abort_transaction(trans, tree_root, ret);
7262 			err = ret;
7263 			goto out_end_trans;
7264 		} else if (ret > 0) {
7265 			/* if we fail to delete the orphan item this time
7266 			 * around, it'll get picked up the next time.
7267 			 *
7268 			 * The most common failure here is just -ENOENT.
7269 			 */
7270 			btrfs_del_orphan_item(trans, tree_root,
7271 					      root->root_key.objectid);
7272 		}
7273 	}
7274 
7275 	if (root->in_radix) {
7276 		btrfs_free_fs_root(tree_root->fs_info, root);
7277 	} else {
7278 		free_extent_buffer(root->node);
7279 		free_extent_buffer(root->commit_root);
7280 		kfree(root);
7281 	}
7282 out_end_trans:
7283 	btrfs_end_transaction_throttle(trans, tree_root);
7284 out_free:
7285 	kfree(wc);
7286 	btrfs_free_path(path);
7287 out:
7288 	if (err)
7289 		btrfs_std_error(root->fs_info, err);
7290 	return err;
7291 }
7292 
7293 /*
7294  * drop subtree rooted at tree block 'node'.
7295  *
7296  * NOTE: this function will unlock and release tree block 'node'
7297  * only used by relocation code
7298  */
7299 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7300 			struct btrfs_root *root,
7301 			struct extent_buffer *node,
7302 			struct extent_buffer *parent)
7303 {
7304 	struct btrfs_path *path;
7305 	struct walk_control *wc;
7306 	int level;
7307 	int parent_level;
7308 	int ret = 0;
7309 	int wret;
7310 
7311 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7312 
7313 	path = btrfs_alloc_path();
7314 	if (!path)
7315 		return -ENOMEM;
7316 
7317 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
7318 	if (!wc) {
7319 		btrfs_free_path(path);
7320 		return -ENOMEM;
7321 	}
7322 
7323 	btrfs_assert_tree_locked(parent);
7324 	parent_level = btrfs_header_level(parent);
7325 	extent_buffer_get(parent);
7326 	path->nodes[parent_level] = parent;
7327 	path->slots[parent_level] = btrfs_header_nritems(parent);
7328 
7329 	btrfs_assert_tree_locked(node);
7330 	level = btrfs_header_level(node);
7331 	path->nodes[level] = node;
7332 	path->slots[level] = 0;
7333 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7334 
7335 	wc->refs[parent_level] = 1;
7336 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7337 	wc->level = level;
7338 	wc->shared_level = -1;
7339 	wc->stage = DROP_REFERENCE;
7340 	wc->update_ref = 0;
7341 	wc->keep_locks = 1;
7342 	wc->for_reloc = 1;
7343 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7344 
7345 	while (1) {
7346 		wret = walk_down_tree(trans, root, path, wc);
7347 		if (wret < 0) {
7348 			ret = wret;
7349 			break;
7350 		}
7351 
7352 		wret = walk_up_tree(trans, root, path, wc, parent_level);
7353 		if (wret < 0)
7354 			ret = wret;
7355 		if (wret != 0)
7356 			break;
7357 	}
7358 
7359 	kfree(wc);
7360 	btrfs_free_path(path);
7361 	return ret;
7362 }
7363 
7364 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7365 {
7366 	u64 num_devices;
7367 	u64 stripped;
7368 
7369 	/*
7370 	 * if restripe for this chunk_type is on pick target profile and
7371 	 * return, otherwise do the usual balance
7372 	 */
7373 	stripped = get_restripe_target(root->fs_info, flags);
7374 	if (stripped)
7375 		return extended_to_chunk(stripped);
7376 
7377 	/*
7378 	 * we add in the count of missing devices because we want
7379 	 * to make sure that any RAID levels on a degraded FS
7380 	 * continue to be honored.
7381 	 */
7382 	num_devices = root->fs_info->fs_devices->rw_devices +
7383 		root->fs_info->fs_devices->missing_devices;
7384 
7385 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
7386 		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7387 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7388 
7389 	if (num_devices == 1) {
7390 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7391 		stripped = flags & ~stripped;
7392 
7393 		/* turn raid0 into single device chunks */
7394 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
7395 			return stripped;
7396 
7397 		/* turn mirroring into duplication */
7398 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7399 			     BTRFS_BLOCK_GROUP_RAID10))
7400 			return stripped | BTRFS_BLOCK_GROUP_DUP;
7401 	} else {
7402 		/* they already had raid on here, just return */
7403 		if (flags & stripped)
7404 			return flags;
7405 
7406 		stripped |= BTRFS_BLOCK_GROUP_DUP;
7407 		stripped = flags & ~stripped;
7408 
7409 		/* switch duplicated blocks with raid1 */
7410 		if (flags & BTRFS_BLOCK_GROUP_DUP)
7411 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
7412 
7413 		/* this is drive concat, leave it alone */
7414 	}
7415 
7416 	return flags;
7417 }
7418 
7419 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7420 {
7421 	struct btrfs_space_info *sinfo = cache->space_info;
7422 	u64 num_bytes;
7423 	u64 min_allocable_bytes;
7424 	int ret = -ENOSPC;
7425 
7426 
7427 	/*
7428 	 * We need some metadata space and system metadata space for
7429 	 * allocating chunks in some corner cases until we force to set
7430 	 * it to be readonly.
7431 	 */
7432 	if ((sinfo->flags &
7433 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7434 	    !force)
7435 		min_allocable_bytes = 1 * 1024 * 1024;
7436 	else
7437 		min_allocable_bytes = 0;
7438 
7439 	spin_lock(&sinfo->lock);
7440 	spin_lock(&cache->lock);
7441 
7442 	if (cache->ro) {
7443 		ret = 0;
7444 		goto out;
7445 	}
7446 
7447 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7448 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7449 
7450 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7451 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7452 	    min_allocable_bytes <= sinfo->total_bytes) {
7453 		sinfo->bytes_readonly += num_bytes;
7454 		cache->ro = 1;
7455 		ret = 0;
7456 	}
7457 out:
7458 	spin_unlock(&cache->lock);
7459 	spin_unlock(&sinfo->lock);
7460 	return ret;
7461 }
7462 
7463 int btrfs_set_block_group_ro(struct btrfs_root *root,
7464 			     struct btrfs_block_group_cache *cache)
7465 
7466 {
7467 	struct btrfs_trans_handle *trans;
7468 	u64 alloc_flags;
7469 	int ret;
7470 
7471 	BUG_ON(cache->ro);
7472 
7473 	trans = btrfs_join_transaction(root);
7474 	if (IS_ERR(trans))
7475 		return PTR_ERR(trans);
7476 
7477 	alloc_flags = update_block_group_flags(root, cache->flags);
7478 	if (alloc_flags != cache->flags) {
7479 		ret = do_chunk_alloc(trans, root, alloc_flags,
7480 				     CHUNK_ALLOC_FORCE);
7481 		if (ret < 0)
7482 			goto out;
7483 	}
7484 
7485 	ret = set_block_group_ro(cache, 0);
7486 	if (!ret)
7487 		goto out;
7488 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7489 	ret = do_chunk_alloc(trans, root, alloc_flags,
7490 			     CHUNK_ALLOC_FORCE);
7491 	if (ret < 0)
7492 		goto out;
7493 	ret = set_block_group_ro(cache, 0);
7494 out:
7495 	btrfs_end_transaction(trans, root);
7496 	return ret;
7497 }
7498 
7499 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7500 			    struct btrfs_root *root, u64 type)
7501 {
7502 	u64 alloc_flags = get_alloc_profile(root, type);
7503 	return do_chunk_alloc(trans, root, alloc_flags,
7504 			      CHUNK_ALLOC_FORCE);
7505 }
7506 
7507 /*
7508  * helper to account the unused space of all the readonly block group in the
7509  * list. takes mirrors into account.
7510  */
7511 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7512 {
7513 	struct btrfs_block_group_cache *block_group;
7514 	u64 free_bytes = 0;
7515 	int factor;
7516 
7517 	list_for_each_entry(block_group, groups_list, list) {
7518 		spin_lock(&block_group->lock);
7519 
7520 		if (!block_group->ro) {
7521 			spin_unlock(&block_group->lock);
7522 			continue;
7523 		}
7524 
7525 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7526 					  BTRFS_BLOCK_GROUP_RAID10 |
7527 					  BTRFS_BLOCK_GROUP_DUP))
7528 			factor = 2;
7529 		else
7530 			factor = 1;
7531 
7532 		free_bytes += (block_group->key.offset -
7533 			       btrfs_block_group_used(&block_group->item)) *
7534 			       factor;
7535 
7536 		spin_unlock(&block_group->lock);
7537 	}
7538 
7539 	return free_bytes;
7540 }
7541 
7542 /*
7543  * helper to account the unused space of all the readonly block group in the
7544  * space_info. takes mirrors into account.
7545  */
7546 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7547 {
7548 	int i;
7549 	u64 free_bytes = 0;
7550 
7551 	spin_lock(&sinfo->lock);
7552 
7553 	for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7554 		if (!list_empty(&sinfo->block_groups[i]))
7555 			free_bytes += __btrfs_get_ro_block_group_free_space(
7556 						&sinfo->block_groups[i]);
7557 
7558 	spin_unlock(&sinfo->lock);
7559 
7560 	return free_bytes;
7561 }
7562 
7563 void btrfs_set_block_group_rw(struct btrfs_root *root,
7564 			      struct btrfs_block_group_cache *cache)
7565 {
7566 	struct btrfs_space_info *sinfo = cache->space_info;
7567 	u64 num_bytes;
7568 
7569 	BUG_ON(!cache->ro);
7570 
7571 	spin_lock(&sinfo->lock);
7572 	spin_lock(&cache->lock);
7573 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7574 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
7575 	sinfo->bytes_readonly -= num_bytes;
7576 	cache->ro = 0;
7577 	spin_unlock(&cache->lock);
7578 	spin_unlock(&sinfo->lock);
7579 }
7580 
7581 /*
7582  * checks to see if its even possible to relocate this block group.
7583  *
7584  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7585  * ok to go ahead and try.
7586  */
7587 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7588 {
7589 	struct btrfs_block_group_cache *block_group;
7590 	struct btrfs_space_info *space_info;
7591 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7592 	struct btrfs_device *device;
7593 	u64 min_free;
7594 	u64 dev_min = 1;
7595 	u64 dev_nr = 0;
7596 	u64 target;
7597 	int index;
7598 	int full = 0;
7599 	int ret = 0;
7600 
7601 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7602 
7603 	/* odd, couldn't find the block group, leave it alone */
7604 	if (!block_group)
7605 		return -1;
7606 
7607 	min_free = btrfs_block_group_used(&block_group->item);
7608 
7609 	/* no bytes used, we're good */
7610 	if (!min_free)
7611 		goto out;
7612 
7613 	space_info = block_group->space_info;
7614 	spin_lock(&space_info->lock);
7615 
7616 	full = space_info->full;
7617 
7618 	/*
7619 	 * if this is the last block group we have in this space, we can't
7620 	 * relocate it unless we're able to allocate a new chunk below.
7621 	 *
7622 	 * Otherwise, we need to make sure we have room in the space to handle
7623 	 * all of the extents from this block group.  If we can, we're good
7624 	 */
7625 	if ((space_info->total_bytes != block_group->key.offset) &&
7626 	    (space_info->bytes_used + space_info->bytes_reserved +
7627 	     space_info->bytes_pinned + space_info->bytes_readonly +
7628 	     min_free < space_info->total_bytes)) {
7629 		spin_unlock(&space_info->lock);
7630 		goto out;
7631 	}
7632 	spin_unlock(&space_info->lock);
7633 
7634 	/*
7635 	 * ok we don't have enough space, but maybe we have free space on our
7636 	 * devices to allocate new chunks for relocation, so loop through our
7637 	 * alloc devices and guess if we have enough space.  if this block
7638 	 * group is going to be restriped, run checks against the target
7639 	 * profile instead of the current one.
7640 	 */
7641 	ret = -1;
7642 
7643 	/*
7644 	 * index:
7645 	 *      0: raid10
7646 	 *      1: raid1
7647 	 *      2: dup
7648 	 *      3: raid0
7649 	 *      4: single
7650 	 */
7651 	target = get_restripe_target(root->fs_info, block_group->flags);
7652 	if (target) {
7653 		index = __get_raid_index(extended_to_chunk(target));
7654 	} else {
7655 		/*
7656 		 * this is just a balance, so if we were marked as full
7657 		 * we know there is no space for a new chunk
7658 		 */
7659 		if (full)
7660 			goto out;
7661 
7662 		index = get_block_group_index(block_group);
7663 	}
7664 
7665 	if (index == BTRFS_RAID_RAID10) {
7666 		dev_min = 4;
7667 		/* Divide by 2 */
7668 		min_free >>= 1;
7669 	} else if (index == BTRFS_RAID_RAID1) {
7670 		dev_min = 2;
7671 	} else if (index == BTRFS_RAID_DUP) {
7672 		/* Multiply by 2 */
7673 		min_free <<= 1;
7674 	} else if (index == BTRFS_RAID_RAID0) {
7675 		dev_min = fs_devices->rw_devices;
7676 		do_div(min_free, dev_min);
7677 	}
7678 
7679 	mutex_lock(&root->fs_info->chunk_mutex);
7680 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7681 		u64 dev_offset;
7682 
7683 		/*
7684 		 * check to make sure we can actually find a chunk with enough
7685 		 * space to fit our block group in.
7686 		 */
7687 		if (device->total_bytes > device->bytes_used + min_free &&
7688 		    !device->is_tgtdev_for_dev_replace) {
7689 			ret = find_free_dev_extent(device, min_free,
7690 						   &dev_offset, NULL);
7691 			if (!ret)
7692 				dev_nr++;
7693 
7694 			if (dev_nr >= dev_min)
7695 				break;
7696 
7697 			ret = -1;
7698 		}
7699 	}
7700 	mutex_unlock(&root->fs_info->chunk_mutex);
7701 out:
7702 	btrfs_put_block_group(block_group);
7703 	return ret;
7704 }
7705 
7706 static int find_first_block_group(struct btrfs_root *root,
7707 		struct btrfs_path *path, struct btrfs_key *key)
7708 {
7709 	int ret = 0;
7710 	struct btrfs_key found_key;
7711 	struct extent_buffer *leaf;
7712 	int slot;
7713 
7714 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7715 	if (ret < 0)
7716 		goto out;
7717 
7718 	while (1) {
7719 		slot = path->slots[0];
7720 		leaf = path->nodes[0];
7721 		if (slot >= btrfs_header_nritems(leaf)) {
7722 			ret = btrfs_next_leaf(root, path);
7723 			if (ret == 0)
7724 				continue;
7725 			if (ret < 0)
7726 				goto out;
7727 			break;
7728 		}
7729 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
7730 
7731 		if (found_key.objectid >= key->objectid &&
7732 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7733 			ret = 0;
7734 			goto out;
7735 		}
7736 		path->slots[0]++;
7737 	}
7738 out:
7739 	return ret;
7740 }
7741 
7742 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7743 {
7744 	struct btrfs_block_group_cache *block_group;
7745 	u64 last = 0;
7746 
7747 	while (1) {
7748 		struct inode *inode;
7749 
7750 		block_group = btrfs_lookup_first_block_group(info, last);
7751 		while (block_group) {
7752 			spin_lock(&block_group->lock);
7753 			if (block_group->iref)
7754 				break;
7755 			spin_unlock(&block_group->lock);
7756 			block_group = next_block_group(info->tree_root,
7757 						       block_group);
7758 		}
7759 		if (!block_group) {
7760 			if (last == 0)
7761 				break;
7762 			last = 0;
7763 			continue;
7764 		}
7765 
7766 		inode = block_group->inode;
7767 		block_group->iref = 0;
7768 		block_group->inode = NULL;
7769 		spin_unlock(&block_group->lock);
7770 		iput(inode);
7771 		last = block_group->key.objectid + block_group->key.offset;
7772 		btrfs_put_block_group(block_group);
7773 	}
7774 }
7775 
7776 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7777 {
7778 	struct btrfs_block_group_cache *block_group;
7779 	struct btrfs_space_info *space_info;
7780 	struct btrfs_caching_control *caching_ctl;
7781 	struct rb_node *n;
7782 
7783 	down_write(&info->extent_commit_sem);
7784 	while (!list_empty(&info->caching_block_groups)) {
7785 		caching_ctl = list_entry(info->caching_block_groups.next,
7786 					 struct btrfs_caching_control, list);
7787 		list_del(&caching_ctl->list);
7788 		put_caching_control(caching_ctl);
7789 	}
7790 	up_write(&info->extent_commit_sem);
7791 
7792 	spin_lock(&info->block_group_cache_lock);
7793 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7794 		block_group = rb_entry(n, struct btrfs_block_group_cache,
7795 				       cache_node);
7796 		rb_erase(&block_group->cache_node,
7797 			 &info->block_group_cache_tree);
7798 		spin_unlock(&info->block_group_cache_lock);
7799 
7800 		down_write(&block_group->space_info->groups_sem);
7801 		list_del(&block_group->list);
7802 		up_write(&block_group->space_info->groups_sem);
7803 
7804 		if (block_group->cached == BTRFS_CACHE_STARTED)
7805 			wait_block_group_cache_done(block_group);
7806 
7807 		/*
7808 		 * We haven't cached this block group, which means we could
7809 		 * possibly have excluded extents on this block group.
7810 		 */
7811 		if (block_group->cached == BTRFS_CACHE_NO)
7812 			free_excluded_extents(info->extent_root, block_group);
7813 
7814 		btrfs_remove_free_space_cache(block_group);
7815 		btrfs_put_block_group(block_group);
7816 
7817 		spin_lock(&info->block_group_cache_lock);
7818 	}
7819 	spin_unlock(&info->block_group_cache_lock);
7820 
7821 	/* now that all the block groups are freed, go through and
7822 	 * free all the space_info structs.  This is only called during
7823 	 * the final stages of unmount, and so we know nobody is
7824 	 * using them.  We call synchronize_rcu() once before we start,
7825 	 * just to be on the safe side.
7826 	 */
7827 	synchronize_rcu();
7828 
7829 	release_global_block_rsv(info);
7830 
7831 	while(!list_empty(&info->space_info)) {
7832 		space_info = list_entry(info->space_info.next,
7833 					struct btrfs_space_info,
7834 					list);
7835 		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
7836 			if (space_info->bytes_pinned > 0 ||
7837 			    space_info->bytes_reserved > 0 ||
7838 			    space_info->bytes_may_use > 0) {
7839 				WARN_ON(1);
7840 				dump_space_info(space_info, 0, 0);
7841 			}
7842 		}
7843 		list_del(&space_info->list);
7844 		kfree(space_info);
7845 	}
7846 	return 0;
7847 }
7848 
7849 static void __link_block_group(struct btrfs_space_info *space_info,
7850 			       struct btrfs_block_group_cache *cache)
7851 {
7852 	int index = get_block_group_index(cache);
7853 
7854 	down_write(&space_info->groups_sem);
7855 	list_add_tail(&cache->list, &space_info->block_groups[index]);
7856 	up_write(&space_info->groups_sem);
7857 }
7858 
7859 int btrfs_read_block_groups(struct btrfs_root *root)
7860 {
7861 	struct btrfs_path *path;
7862 	int ret;
7863 	struct btrfs_block_group_cache *cache;
7864 	struct btrfs_fs_info *info = root->fs_info;
7865 	struct btrfs_space_info *space_info;
7866 	struct btrfs_key key;
7867 	struct btrfs_key found_key;
7868 	struct extent_buffer *leaf;
7869 	int need_clear = 0;
7870 	u64 cache_gen;
7871 
7872 	root = info->extent_root;
7873 	key.objectid = 0;
7874 	key.offset = 0;
7875 	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7876 	path = btrfs_alloc_path();
7877 	if (!path)
7878 		return -ENOMEM;
7879 	path->reada = 1;
7880 
7881 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7882 	if (btrfs_test_opt(root, SPACE_CACHE) &&
7883 	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7884 		need_clear = 1;
7885 	if (btrfs_test_opt(root, CLEAR_CACHE))
7886 		need_clear = 1;
7887 
7888 	while (1) {
7889 		ret = find_first_block_group(root, path, &key);
7890 		if (ret > 0)
7891 			break;
7892 		if (ret != 0)
7893 			goto error;
7894 		leaf = path->nodes[0];
7895 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7896 		cache = kzalloc(sizeof(*cache), GFP_NOFS);
7897 		if (!cache) {
7898 			ret = -ENOMEM;
7899 			goto error;
7900 		}
7901 		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7902 						GFP_NOFS);
7903 		if (!cache->free_space_ctl) {
7904 			kfree(cache);
7905 			ret = -ENOMEM;
7906 			goto error;
7907 		}
7908 
7909 		atomic_set(&cache->count, 1);
7910 		spin_lock_init(&cache->lock);
7911 		cache->fs_info = info;
7912 		INIT_LIST_HEAD(&cache->list);
7913 		INIT_LIST_HEAD(&cache->cluster_list);
7914 
7915 		if (need_clear) {
7916 			/*
7917 			 * When we mount with old space cache, we need to
7918 			 * set BTRFS_DC_CLEAR and set dirty flag.
7919 			 *
7920 			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
7921 			 *    truncate the old free space cache inode and
7922 			 *    setup a new one.
7923 			 * b) Setting 'dirty flag' makes sure that we flush
7924 			 *    the new space cache info onto disk.
7925 			 */
7926 			cache->disk_cache_state = BTRFS_DC_CLEAR;
7927 			if (btrfs_test_opt(root, SPACE_CACHE))
7928 				cache->dirty = 1;
7929 		}
7930 
7931 		read_extent_buffer(leaf, &cache->item,
7932 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
7933 				   sizeof(cache->item));
7934 		memcpy(&cache->key, &found_key, sizeof(found_key));
7935 
7936 		key.objectid = found_key.objectid + found_key.offset;
7937 		btrfs_release_path(path);
7938 		cache->flags = btrfs_block_group_flags(&cache->item);
7939 		cache->sectorsize = root->sectorsize;
7940 		cache->full_stripe_len = btrfs_full_stripe_len(root,
7941 					       &root->fs_info->mapping_tree,
7942 					       found_key.objectid);
7943 		btrfs_init_free_space_ctl(cache);
7944 
7945 		/*
7946 		 * We need to exclude the super stripes now so that the space
7947 		 * info has super bytes accounted for, otherwise we'll think
7948 		 * we have more space than we actually do.
7949 		 */
7950 		exclude_super_stripes(root, cache);
7951 
7952 		/*
7953 		 * check for two cases, either we are full, and therefore
7954 		 * don't need to bother with the caching work since we won't
7955 		 * find any space, or we are empty, and we can just add all
7956 		 * the space in and be done with it.  This saves us _alot_ of
7957 		 * time, particularly in the full case.
7958 		 */
7959 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7960 			cache->last_byte_to_unpin = (u64)-1;
7961 			cache->cached = BTRFS_CACHE_FINISHED;
7962 			free_excluded_extents(root, cache);
7963 		} else if (btrfs_block_group_used(&cache->item) == 0) {
7964 			cache->last_byte_to_unpin = (u64)-1;
7965 			cache->cached = BTRFS_CACHE_FINISHED;
7966 			add_new_free_space(cache, root->fs_info,
7967 					   found_key.objectid,
7968 					   found_key.objectid +
7969 					   found_key.offset);
7970 			free_excluded_extents(root, cache);
7971 		}
7972 
7973 		ret = update_space_info(info, cache->flags, found_key.offset,
7974 					btrfs_block_group_used(&cache->item),
7975 					&space_info);
7976 		BUG_ON(ret); /* -ENOMEM */
7977 		cache->space_info = space_info;
7978 		spin_lock(&cache->space_info->lock);
7979 		cache->space_info->bytes_readonly += cache->bytes_super;
7980 		spin_unlock(&cache->space_info->lock);
7981 
7982 		__link_block_group(space_info, cache);
7983 
7984 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
7985 		BUG_ON(ret); /* Logic error */
7986 
7987 		set_avail_alloc_bits(root->fs_info, cache->flags);
7988 		if (btrfs_chunk_readonly(root, cache->key.objectid))
7989 			set_block_group_ro(cache, 1);
7990 	}
7991 
7992 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7993 		if (!(get_alloc_profile(root, space_info->flags) &
7994 		      (BTRFS_BLOCK_GROUP_RAID10 |
7995 		       BTRFS_BLOCK_GROUP_RAID1 |
7996 		       BTRFS_BLOCK_GROUP_RAID5 |
7997 		       BTRFS_BLOCK_GROUP_RAID6 |
7998 		       BTRFS_BLOCK_GROUP_DUP)))
7999 			continue;
8000 		/*
8001 		 * avoid allocating from un-mirrored block group if there are
8002 		 * mirrored block groups.
8003 		 */
8004 		list_for_each_entry(cache, &space_info->block_groups[3], list)
8005 			set_block_group_ro(cache, 1);
8006 		list_for_each_entry(cache, &space_info->block_groups[4], list)
8007 			set_block_group_ro(cache, 1);
8008 	}
8009 
8010 	init_global_block_rsv(info);
8011 	ret = 0;
8012 error:
8013 	btrfs_free_path(path);
8014 	return ret;
8015 }
8016 
8017 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8018 				       struct btrfs_root *root)
8019 {
8020 	struct btrfs_block_group_cache *block_group, *tmp;
8021 	struct btrfs_root *extent_root = root->fs_info->extent_root;
8022 	struct btrfs_block_group_item item;
8023 	struct btrfs_key key;
8024 	int ret = 0;
8025 
8026 	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8027 				 new_bg_list) {
8028 		list_del_init(&block_group->new_bg_list);
8029 
8030 		if (ret)
8031 			continue;
8032 
8033 		spin_lock(&block_group->lock);
8034 		memcpy(&item, &block_group->item, sizeof(item));
8035 		memcpy(&key, &block_group->key, sizeof(key));
8036 		spin_unlock(&block_group->lock);
8037 
8038 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
8039 					sizeof(item));
8040 		if (ret)
8041 			btrfs_abort_transaction(trans, extent_root, ret);
8042 	}
8043 }
8044 
8045 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8046 			   struct btrfs_root *root, u64 bytes_used,
8047 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
8048 			   u64 size)
8049 {
8050 	int ret;
8051 	struct btrfs_root *extent_root;
8052 	struct btrfs_block_group_cache *cache;
8053 
8054 	extent_root = root->fs_info->extent_root;
8055 
8056 	root->fs_info->last_trans_log_full_commit = trans->transid;
8057 
8058 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
8059 	if (!cache)
8060 		return -ENOMEM;
8061 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8062 					GFP_NOFS);
8063 	if (!cache->free_space_ctl) {
8064 		kfree(cache);
8065 		return -ENOMEM;
8066 	}
8067 
8068 	cache->key.objectid = chunk_offset;
8069 	cache->key.offset = size;
8070 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8071 	cache->sectorsize = root->sectorsize;
8072 	cache->fs_info = root->fs_info;
8073 	cache->full_stripe_len = btrfs_full_stripe_len(root,
8074 					       &root->fs_info->mapping_tree,
8075 					       chunk_offset);
8076 
8077 	atomic_set(&cache->count, 1);
8078 	spin_lock_init(&cache->lock);
8079 	INIT_LIST_HEAD(&cache->list);
8080 	INIT_LIST_HEAD(&cache->cluster_list);
8081 	INIT_LIST_HEAD(&cache->new_bg_list);
8082 
8083 	btrfs_init_free_space_ctl(cache);
8084 
8085 	btrfs_set_block_group_used(&cache->item, bytes_used);
8086 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8087 	cache->flags = type;
8088 	btrfs_set_block_group_flags(&cache->item, type);
8089 
8090 	cache->last_byte_to_unpin = (u64)-1;
8091 	cache->cached = BTRFS_CACHE_FINISHED;
8092 	exclude_super_stripes(root, cache);
8093 
8094 	add_new_free_space(cache, root->fs_info, chunk_offset,
8095 			   chunk_offset + size);
8096 
8097 	free_excluded_extents(root, cache);
8098 
8099 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8100 				&cache->space_info);
8101 	BUG_ON(ret); /* -ENOMEM */
8102 	update_global_block_rsv(root->fs_info);
8103 
8104 	spin_lock(&cache->space_info->lock);
8105 	cache->space_info->bytes_readonly += cache->bytes_super;
8106 	spin_unlock(&cache->space_info->lock);
8107 
8108 	__link_block_group(cache->space_info, cache);
8109 
8110 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
8111 	BUG_ON(ret); /* Logic error */
8112 
8113 	list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8114 
8115 	set_avail_alloc_bits(extent_root->fs_info, type);
8116 
8117 	return 0;
8118 }
8119 
8120 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8121 {
8122 	u64 extra_flags = chunk_to_extended(flags) &
8123 				BTRFS_EXTENDED_PROFILE_MASK;
8124 
8125 	write_seqlock(&fs_info->profiles_lock);
8126 	if (flags & BTRFS_BLOCK_GROUP_DATA)
8127 		fs_info->avail_data_alloc_bits &= ~extra_flags;
8128 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
8129 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8130 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8131 		fs_info->avail_system_alloc_bits &= ~extra_flags;
8132 	write_sequnlock(&fs_info->profiles_lock);
8133 }
8134 
8135 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8136 			     struct btrfs_root *root, u64 group_start)
8137 {
8138 	struct btrfs_path *path;
8139 	struct btrfs_block_group_cache *block_group;
8140 	struct btrfs_free_cluster *cluster;
8141 	struct btrfs_root *tree_root = root->fs_info->tree_root;
8142 	struct btrfs_key key;
8143 	struct inode *inode;
8144 	int ret;
8145 	int index;
8146 	int factor;
8147 
8148 	root = root->fs_info->extent_root;
8149 
8150 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8151 	BUG_ON(!block_group);
8152 	BUG_ON(!block_group->ro);
8153 
8154 	/*
8155 	 * Free the reserved super bytes from this block group before
8156 	 * remove it.
8157 	 */
8158 	free_excluded_extents(root, block_group);
8159 
8160 	memcpy(&key, &block_group->key, sizeof(key));
8161 	index = get_block_group_index(block_group);
8162 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8163 				  BTRFS_BLOCK_GROUP_RAID1 |
8164 				  BTRFS_BLOCK_GROUP_RAID10))
8165 		factor = 2;
8166 	else
8167 		factor = 1;
8168 
8169 	/* make sure this block group isn't part of an allocation cluster */
8170 	cluster = &root->fs_info->data_alloc_cluster;
8171 	spin_lock(&cluster->refill_lock);
8172 	btrfs_return_cluster_to_free_space(block_group, cluster);
8173 	spin_unlock(&cluster->refill_lock);
8174 
8175 	/*
8176 	 * make sure this block group isn't part of a metadata
8177 	 * allocation cluster
8178 	 */
8179 	cluster = &root->fs_info->meta_alloc_cluster;
8180 	spin_lock(&cluster->refill_lock);
8181 	btrfs_return_cluster_to_free_space(block_group, cluster);
8182 	spin_unlock(&cluster->refill_lock);
8183 
8184 	path = btrfs_alloc_path();
8185 	if (!path) {
8186 		ret = -ENOMEM;
8187 		goto out;
8188 	}
8189 
8190 	inode = lookup_free_space_inode(tree_root, block_group, path);
8191 	if (!IS_ERR(inode)) {
8192 		ret = btrfs_orphan_add(trans, inode);
8193 		if (ret) {
8194 			btrfs_add_delayed_iput(inode);
8195 			goto out;
8196 		}
8197 		clear_nlink(inode);
8198 		/* One for the block groups ref */
8199 		spin_lock(&block_group->lock);
8200 		if (block_group->iref) {
8201 			block_group->iref = 0;
8202 			block_group->inode = NULL;
8203 			spin_unlock(&block_group->lock);
8204 			iput(inode);
8205 		} else {
8206 			spin_unlock(&block_group->lock);
8207 		}
8208 		/* One for our lookup ref */
8209 		btrfs_add_delayed_iput(inode);
8210 	}
8211 
8212 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8213 	key.offset = block_group->key.objectid;
8214 	key.type = 0;
8215 
8216 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8217 	if (ret < 0)
8218 		goto out;
8219 	if (ret > 0)
8220 		btrfs_release_path(path);
8221 	if (ret == 0) {
8222 		ret = btrfs_del_item(trans, tree_root, path);
8223 		if (ret)
8224 			goto out;
8225 		btrfs_release_path(path);
8226 	}
8227 
8228 	spin_lock(&root->fs_info->block_group_cache_lock);
8229 	rb_erase(&block_group->cache_node,
8230 		 &root->fs_info->block_group_cache_tree);
8231 
8232 	if (root->fs_info->first_logical_byte == block_group->key.objectid)
8233 		root->fs_info->first_logical_byte = (u64)-1;
8234 	spin_unlock(&root->fs_info->block_group_cache_lock);
8235 
8236 	down_write(&block_group->space_info->groups_sem);
8237 	/*
8238 	 * we must use list_del_init so people can check to see if they
8239 	 * are still on the list after taking the semaphore
8240 	 */
8241 	list_del_init(&block_group->list);
8242 	if (list_empty(&block_group->space_info->block_groups[index]))
8243 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
8244 	up_write(&block_group->space_info->groups_sem);
8245 
8246 	if (block_group->cached == BTRFS_CACHE_STARTED)
8247 		wait_block_group_cache_done(block_group);
8248 
8249 	btrfs_remove_free_space_cache(block_group);
8250 
8251 	spin_lock(&block_group->space_info->lock);
8252 	block_group->space_info->total_bytes -= block_group->key.offset;
8253 	block_group->space_info->bytes_readonly -= block_group->key.offset;
8254 	block_group->space_info->disk_total -= block_group->key.offset * factor;
8255 	spin_unlock(&block_group->space_info->lock);
8256 
8257 	memcpy(&key, &block_group->key, sizeof(key));
8258 
8259 	btrfs_clear_space_info_full(root->fs_info);
8260 
8261 	btrfs_put_block_group(block_group);
8262 	btrfs_put_block_group(block_group);
8263 
8264 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8265 	if (ret > 0)
8266 		ret = -EIO;
8267 	if (ret < 0)
8268 		goto out;
8269 
8270 	ret = btrfs_del_item(trans, root, path);
8271 out:
8272 	btrfs_free_path(path);
8273 	return ret;
8274 }
8275 
8276 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8277 {
8278 	struct btrfs_space_info *space_info;
8279 	struct btrfs_super_block *disk_super;
8280 	u64 features;
8281 	u64 flags;
8282 	int mixed = 0;
8283 	int ret;
8284 
8285 	disk_super = fs_info->super_copy;
8286 	if (!btrfs_super_root(disk_super))
8287 		return 1;
8288 
8289 	features = btrfs_super_incompat_flags(disk_super);
8290 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8291 		mixed = 1;
8292 
8293 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
8294 	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8295 	if (ret)
8296 		goto out;
8297 
8298 	if (mixed) {
8299 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8300 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8301 	} else {
8302 		flags = BTRFS_BLOCK_GROUP_METADATA;
8303 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8304 		if (ret)
8305 			goto out;
8306 
8307 		flags = BTRFS_BLOCK_GROUP_DATA;
8308 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8309 	}
8310 out:
8311 	return ret;
8312 }
8313 
8314 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8315 {
8316 	return unpin_extent_range(root, start, end);
8317 }
8318 
8319 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8320 			       u64 num_bytes, u64 *actual_bytes)
8321 {
8322 	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8323 }
8324 
8325 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8326 {
8327 	struct btrfs_fs_info *fs_info = root->fs_info;
8328 	struct btrfs_block_group_cache *cache = NULL;
8329 	u64 group_trimmed;
8330 	u64 start;
8331 	u64 end;
8332 	u64 trimmed = 0;
8333 	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8334 	int ret = 0;
8335 
8336 	/*
8337 	 * try to trim all FS space, our block group may start from non-zero.
8338 	 */
8339 	if (range->len == total_bytes)
8340 		cache = btrfs_lookup_first_block_group(fs_info, range->start);
8341 	else
8342 		cache = btrfs_lookup_block_group(fs_info, range->start);
8343 
8344 	while (cache) {
8345 		if (cache->key.objectid >= (range->start + range->len)) {
8346 			btrfs_put_block_group(cache);
8347 			break;
8348 		}
8349 
8350 		start = max(range->start, cache->key.objectid);
8351 		end = min(range->start + range->len,
8352 				cache->key.objectid + cache->key.offset);
8353 
8354 		if (end - start >= range->minlen) {
8355 			if (!block_group_cache_done(cache)) {
8356 				ret = cache_block_group(cache, 0);
8357 				if (!ret)
8358 					wait_block_group_cache_done(cache);
8359 			}
8360 			ret = btrfs_trim_block_group(cache,
8361 						     &group_trimmed,
8362 						     start,
8363 						     end,
8364 						     range->minlen);
8365 
8366 			trimmed += group_trimmed;
8367 			if (ret) {
8368 				btrfs_put_block_group(cache);
8369 				break;
8370 			}
8371 		}
8372 
8373 		cache = next_block_group(fs_info->tree_root, cache);
8374 	}
8375 
8376 	range->len = trimmed;
8377 	return ret;
8378 }
8379