xref: /linux/fs/btrfs/ctree.c (revision b1d29ba82cf2bc784f4c963ddd6a2cf29e229b33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
15 
16 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
17 		      *root, struct btrfs_path *path, int level);
18 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
19 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
20 		      int data_size, int extend);
21 static int push_node_left(struct btrfs_trans_handle *trans,
22 			  struct btrfs_fs_info *fs_info,
23 			  struct extent_buffer *dst,
24 			  struct extent_buffer *src, int empty);
25 static int balance_node_right(struct btrfs_trans_handle *trans,
26 			      struct btrfs_fs_info *fs_info,
27 			      struct extent_buffer *dst_buf,
28 			      struct extent_buffer *src_buf);
29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 		    int level, int slot);
31 
32 struct btrfs_path *btrfs_alloc_path(void)
33 {
34 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
35 }
36 
37 /*
38  * set all locked nodes in the path to blocking locks.  This should
39  * be done before scheduling
40  */
41 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
42 {
43 	int i;
44 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
45 		if (!p->nodes[i] || !p->locks[i])
46 			continue;
47 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
48 		if (p->locks[i] == BTRFS_READ_LOCK)
49 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
50 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
51 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
52 	}
53 }
54 
55 /* this also releases the path */
56 void btrfs_free_path(struct btrfs_path *p)
57 {
58 	if (!p)
59 		return;
60 	btrfs_release_path(p);
61 	kmem_cache_free(btrfs_path_cachep, p);
62 }
63 
64 /*
65  * path release drops references on the extent buffers in the path
66  * and it drops any locks held by this path
67  *
68  * It is safe to call this on paths that no locks or extent buffers held.
69  */
70 noinline void btrfs_release_path(struct btrfs_path *p)
71 {
72 	int i;
73 
74 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
75 		p->slots[i] = 0;
76 		if (!p->nodes[i])
77 			continue;
78 		if (p->locks[i]) {
79 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
80 			p->locks[i] = 0;
81 		}
82 		free_extent_buffer(p->nodes[i]);
83 		p->nodes[i] = NULL;
84 	}
85 }
86 
87 /*
88  * safely gets a reference on the root node of a tree.  A lock
89  * is not taken, so a concurrent writer may put a different node
90  * at the root of the tree.  See btrfs_lock_root_node for the
91  * looping required.
92  *
93  * The extent buffer returned by this has a reference taken, so
94  * it won't disappear.  It may stop being the root of the tree
95  * at any time because there are no locks held.
96  */
97 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
98 {
99 	struct extent_buffer *eb;
100 
101 	while (1) {
102 		rcu_read_lock();
103 		eb = rcu_dereference(root->node);
104 
105 		/*
106 		 * RCU really hurts here, we could free up the root node because
107 		 * it was COWed but we may not get the new root node yet so do
108 		 * the inc_not_zero dance and if it doesn't work then
109 		 * synchronize_rcu and try again.
110 		 */
111 		if (atomic_inc_not_zero(&eb->refs)) {
112 			rcu_read_unlock();
113 			break;
114 		}
115 		rcu_read_unlock();
116 		synchronize_rcu();
117 	}
118 	return eb;
119 }
120 
121 /* loop around taking references on and locking the root node of the
122  * tree until you end up with a lock on the root.  A locked buffer
123  * is returned, with a reference held.
124  */
125 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
126 {
127 	struct extent_buffer *eb;
128 
129 	while (1) {
130 		eb = btrfs_root_node(root);
131 		btrfs_tree_lock(eb);
132 		if (eb == root->node)
133 			break;
134 		btrfs_tree_unlock(eb);
135 		free_extent_buffer(eb);
136 	}
137 	return eb;
138 }
139 
140 /* loop around taking references on and locking the root node of the
141  * tree until you end up with a lock on the root.  A locked buffer
142  * is returned, with a reference held.
143  */
144 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
145 {
146 	struct extent_buffer *eb;
147 
148 	while (1) {
149 		eb = btrfs_root_node(root);
150 		btrfs_tree_read_lock(eb);
151 		if (eb == root->node)
152 			break;
153 		btrfs_tree_read_unlock(eb);
154 		free_extent_buffer(eb);
155 	}
156 	return eb;
157 }
158 
159 /* cowonly root (everything not a reference counted cow subvolume), just get
160  * put onto a simple dirty list.  transaction.c walks this to make sure they
161  * get properly updated on disk.
162  */
163 static void add_root_to_dirty_list(struct btrfs_root *root)
164 {
165 	struct btrfs_fs_info *fs_info = root->fs_info;
166 
167 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
168 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
169 		return;
170 
171 	spin_lock(&fs_info->trans_lock);
172 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
173 		/* Want the extent tree to be the last on the list */
174 		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
175 			list_move_tail(&root->dirty_list,
176 				       &fs_info->dirty_cowonly_roots);
177 		else
178 			list_move(&root->dirty_list,
179 				  &fs_info->dirty_cowonly_roots);
180 	}
181 	spin_unlock(&fs_info->trans_lock);
182 }
183 
184 /*
185  * used by snapshot creation to make a copy of a root for a tree with
186  * a given objectid.  The buffer with the new root node is returned in
187  * cow_ret, and this func returns zero on success or a negative error code.
188  */
189 int btrfs_copy_root(struct btrfs_trans_handle *trans,
190 		      struct btrfs_root *root,
191 		      struct extent_buffer *buf,
192 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
193 {
194 	struct btrfs_fs_info *fs_info = root->fs_info;
195 	struct extent_buffer *cow;
196 	int ret = 0;
197 	int level;
198 	struct btrfs_disk_key disk_key;
199 
200 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
201 		trans->transid != fs_info->running_transaction->transid);
202 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
203 		trans->transid != root->last_trans);
204 
205 	level = btrfs_header_level(buf);
206 	if (level == 0)
207 		btrfs_item_key(buf, &disk_key, 0);
208 	else
209 		btrfs_node_key(buf, &disk_key, 0);
210 
211 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
212 			&disk_key, level, buf->start, 0);
213 	if (IS_ERR(cow))
214 		return PTR_ERR(cow);
215 
216 	copy_extent_buffer_full(cow, buf);
217 	btrfs_set_header_bytenr(cow, cow->start);
218 	btrfs_set_header_generation(cow, trans->transid);
219 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
220 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
221 				     BTRFS_HEADER_FLAG_RELOC);
222 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
223 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
224 	else
225 		btrfs_set_header_owner(cow, new_root_objectid);
226 
227 	write_extent_buffer_fsid(cow, fs_info->fsid);
228 
229 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
230 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
231 		ret = btrfs_inc_ref(trans, root, cow, 1);
232 	else
233 		ret = btrfs_inc_ref(trans, root, cow, 0);
234 
235 	if (ret)
236 		return ret;
237 
238 	btrfs_mark_buffer_dirty(cow);
239 	*cow_ret = cow;
240 	return 0;
241 }
242 
243 enum mod_log_op {
244 	MOD_LOG_KEY_REPLACE,
245 	MOD_LOG_KEY_ADD,
246 	MOD_LOG_KEY_REMOVE,
247 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
248 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
249 	MOD_LOG_MOVE_KEYS,
250 	MOD_LOG_ROOT_REPLACE,
251 };
252 
253 struct tree_mod_root {
254 	u64 logical;
255 	u8 level;
256 };
257 
258 struct tree_mod_elem {
259 	struct rb_node node;
260 	u64 logical;
261 	u64 seq;
262 	enum mod_log_op op;
263 
264 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
265 	int slot;
266 
267 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
268 	u64 generation;
269 
270 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
271 	struct btrfs_disk_key key;
272 	u64 blockptr;
273 
274 	/* this is used for op == MOD_LOG_MOVE_KEYS */
275 	struct {
276 		int dst_slot;
277 		int nr_items;
278 	} move;
279 
280 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
281 	struct tree_mod_root old_root;
282 };
283 
284 /*
285  * Pull a new tree mod seq number for our operation.
286  */
287 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
288 {
289 	return atomic64_inc_return(&fs_info->tree_mod_seq);
290 }
291 
292 /*
293  * This adds a new blocker to the tree mod log's blocker list if the @elem
294  * passed does not already have a sequence number set. So when a caller expects
295  * to record tree modifications, it should ensure to set elem->seq to zero
296  * before calling btrfs_get_tree_mod_seq.
297  * Returns a fresh, unused tree log modification sequence number, even if no new
298  * blocker was added.
299  */
300 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
301 			   struct seq_list *elem)
302 {
303 	write_lock(&fs_info->tree_mod_log_lock);
304 	spin_lock(&fs_info->tree_mod_seq_lock);
305 	if (!elem->seq) {
306 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
307 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
308 	}
309 	spin_unlock(&fs_info->tree_mod_seq_lock);
310 	write_unlock(&fs_info->tree_mod_log_lock);
311 
312 	return elem->seq;
313 }
314 
315 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
316 			    struct seq_list *elem)
317 {
318 	struct rb_root *tm_root;
319 	struct rb_node *node;
320 	struct rb_node *next;
321 	struct seq_list *cur_elem;
322 	struct tree_mod_elem *tm;
323 	u64 min_seq = (u64)-1;
324 	u64 seq_putting = elem->seq;
325 
326 	if (!seq_putting)
327 		return;
328 
329 	spin_lock(&fs_info->tree_mod_seq_lock);
330 	list_del(&elem->list);
331 	elem->seq = 0;
332 
333 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
334 		if (cur_elem->seq < min_seq) {
335 			if (seq_putting > cur_elem->seq) {
336 				/*
337 				 * blocker with lower sequence number exists, we
338 				 * cannot remove anything from the log
339 				 */
340 				spin_unlock(&fs_info->tree_mod_seq_lock);
341 				return;
342 			}
343 			min_seq = cur_elem->seq;
344 		}
345 	}
346 	spin_unlock(&fs_info->tree_mod_seq_lock);
347 
348 	/*
349 	 * anything that's lower than the lowest existing (read: blocked)
350 	 * sequence number can be removed from the tree.
351 	 */
352 	write_lock(&fs_info->tree_mod_log_lock);
353 	tm_root = &fs_info->tree_mod_log;
354 	for (node = rb_first(tm_root); node; node = next) {
355 		next = rb_next(node);
356 		tm = rb_entry(node, struct tree_mod_elem, node);
357 		if (tm->seq > min_seq)
358 			continue;
359 		rb_erase(node, tm_root);
360 		kfree(tm);
361 	}
362 	write_unlock(&fs_info->tree_mod_log_lock);
363 }
364 
365 /*
366  * key order of the log:
367  *       node/leaf start address -> sequence
368  *
369  * The 'start address' is the logical address of the *new* root node
370  * for root replace operations, or the logical address of the affected
371  * block for all other operations.
372  *
373  * Note: must be called with write lock for fs_info::tree_mod_log_lock.
374  */
375 static noinline int
376 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
377 {
378 	struct rb_root *tm_root;
379 	struct rb_node **new;
380 	struct rb_node *parent = NULL;
381 	struct tree_mod_elem *cur;
382 
383 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
384 
385 	tm_root = &fs_info->tree_mod_log;
386 	new = &tm_root->rb_node;
387 	while (*new) {
388 		cur = rb_entry(*new, struct tree_mod_elem, node);
389 		parent = *new;
390 		if (cur->logical < tm->logical)
391 			new = &((*new)->rb_left);
392 		else if (cur->logical > tm->logical)
393 			new = &((*new)->rb_right);
394 		else if (cur->seq < tm->seq)
395 			new = &((*new)->rb_left);
396 		else if (cur->seq > tm->seq)
397 			new = &((*new)->rb_right);
398 		else
399 			return -EEXIST;
400 	}
401 
402 	rb_link_node(&tm->node, parent, new);
403 	rb_insert_color(&tm->node, tm_root);
404 	return 0;
405 }
406 
407 /*
408  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
409  * returns zero with the tree_mod_log_lock acquired. The caller must hold
410  * this until all tree mod log insertions are recorded in the rb tree and then
411  * write unlock fs_info::tree_mod_log_lock.
412  */
413 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
414 				    struct extent_buffer *eb) {
415 	smp_mb();
416 	if (list_empty(&(fs_info)->tree_mod_seq_list))
417 		return 1;
418 	if (eb && btrfs_header_level(eb) == 0)
419 		return 1;
420 
421 	write_lock(&fs_info->tree_mod_log_lock);
422 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
423 		write_unlock(&fs_info->tree_mod_log_lock);
424 		return 1;
425 	}
426 
427 	return 0;
428 }
429 
430 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
431 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
432 				    struct extent_buffer *eb)
433 {
434 	smp_mb();
435 	if (list_empty(&(fs_info)->tree_mod_seq_list))
436 		return 0;
437 	if (eb && btrfs_header_level(eb) == 0)
438 		return 0;
439 
440 	return 1;
441 }
442 
443 static struct tree_mod_elem *
444 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
445 		    enum mod_log_op op, gfp_t flags)
446 {
447 	struct tree_mod_elem *tm;
448 
449 	tm = kzalloc(sizeof(*tm), flags);
450 	if (!tm)
451 		return NULL;
452 
453 	tm->logical = eb->start;
454 	if (op != MOD_LOG_KEY_ADD) {
455 		btrfs_node_key(eb, &tm->key, slot);
456 		tm->blockptr = btrfs_node_blockptr(eb, slot);
457 	}
458 	tm->op = op;
459 	tm->slot = slot;
460 	tm->generation = btrfs_node_ptr_generation(eb, slot);
461 	RB_CLEAR_NODE(&tm->node);
462 
463 	return tm;
464 }
465 
466 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
467 		enum mod_log_op op, gfp_t flags)
468 {
469 	struct tree_mod_elem *tm;
470 	int ret;
471 
472 	if (!tree_mod_need_log(eb->fs_info, eb))
473 		return 0;
474 
475 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
476 	if (!tm)
477 		return -ENOMEM;
478 
479 	if (tree_mod_dont_log(eb->fs_info, eb)) {
480 		kfree(tm);
481 		return 0;
482 	}
483 
484 	ret = __tree_mod_log_insert(eb->fs_info, tm);
485 	write_unlock(&eb->fs_info->tree_mod_log_lock);
486 	if (ret)
487 		kfree(tm);
488 
489 	return ret;
490 }
491 
492 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
493 		int dst_slot, int src_slot, int nr_items)
494 {
495 	struct tree_mod_elem *tm = NULL;
496 	struct tree_mod_elem **tm_list = NULL;
497 	int ret = 0;
498 	int i;
499 	int locked = 0;
500 
501 	if (!tree_mod_need_log(eb->fs_info, eb))
502 		return 0;
503 
504 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
505 	if (!tm_list)
506 		return -ENOMEM;
507 
508 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
509 	if (!tm) {
510 		ret = -ENOMEM;
511 		goto free_tms;
512 	}
513 
514 	tm->logical = eb->start;
515 	tm->slot = src_slot;
516 	tm->move.dst_slot = dst_slot;
517 	tm->move.nr_items = nr_items;
518 	tm->op = MOD_LOG_MOVE_KEYS;
519 
520 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
521 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
522 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
523 		if (!tm_list[i]) {
524 			ret = -ENOMEM;
525 			goto free_tms;
526 		}
527 	}
528 
529 	if (tree_mod_dont_log(eb->fs_info, eb))
530 		goto free_tms;
531 	locked = 1;
532 
533 	/*
534 	 * When we override something during the move, we log these removals.
535 	 * This can only happen when we move towards the beginning of the
536 	 * buffer, i.e. dst_slot < src_slot.
537 	 */
538 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
539 		ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
540 		if (ret)
541 			goto free_tms;
542 	}
543 
544 	ret = __tree_mod_log_insert(eb->fs_info, tm);
545 	if (ret)
546 		goto free_tms;
547 	write_unlock(&eb->fs_info->tree_mod_log_lock);
548 	kfree(tm_list);
549 
550 	return 0;
551 free_tms:
552 	for (i = 0; i < nr_items; i++) {
553 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
554 			rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
555 		kfree(tm_list[i]);
556 	}
557 	if (locked)
558 		write_unlock(&eb->fs_info->tree_mod_log_lock);
559 	kfree(tm_list);
560 	kfree(tm);
561 
562 	return ret;
563 }
564 
565 static inline int
566 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
567 		       struct tree_mod_elem **tm_list,
568 		       int nritems)
569 {
570 	int i, j;
571 	int ret;
572 
573 	for (i = nritems - 1; i >= 0; i--) {
574 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
575 		if (ret) {
576 			for (j = nritems - 1; j > i; j--)
577 				rb_erase(&tm_list[j]->node,
578 					 &fs_info->tree_mod_log);
579 			return ret;
580 		}
581 	}
582 
583 	return 0;
584 }
585 
586 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
587 			 struct extent_buffer *new_root, int log_removal)
588 {
589 	struct btrfs_fs_info *fs_info = old_root->fs_info;
590 	struct tree_mod_elem *tm = NULL;
591 	struct tree_mod_elem **tm_list = NULL;
592 	int nritems = 0;
593 	int ret = 0;
594 	int i;
595 
596 	if (!tree_mod_need_log(fs_info, NULL))
597 		return 0;
598 
599 	if (log_removal && btrfs_header_level(old_root) > 0) {
600 		nritems = btrfs_header_nritems(old_root);
601 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
602 				  GFP_NOFS);
603 		if (!tm_list) {
604 			ret = -ENOMEM;
605 			goto free_tms;
606 		}
607 		for (i = 0; i < nritems; i++) {
608 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
609 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
610 			if (!tm_list[i]) {
611 				ret = -ENOMEM;
612 				goto free_tms;
613 			}
614 		}
615 	}
616 
617 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
618 	if (!tm) {
619 		ret = -ENOMEM;
620 		goto free_tms;
621 	}
622 
623 	tm->logical = new_root->start;
624 	tm->old_root.logical = old_root->start;
625 	tm->old_root.level = btrfs_header_level(old_root);
626 	tm->generation = btrfs_header_generation(old_root);
627 	tm->op = MOD_LOG_ROOT_REPLACE;
628 
629 	if (tree_mod_dont_log(fs_info, NULL))
630 		goto free_tms;
631 
632 	if (tm_list)
633 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
634 	if (!ret)
635 		ret = __tree_mod_log_insert(fs_info, tm);
636 
637 	write_unlock(&fs_info->tree_mod_log_lock);
638 	if (ret)
639 		goto free_tms;
640 	kfree(tm_list);
641 
642 	return ret;
643 
644 free_tms:
645 	if (tm_list) {
646 		for (i = 0; i < nritems; i++)
647 			kfree(tm_list[i]);
648 		kfree(tm_list);
649 	}
650 	kfree(tm);
651 
652 	return ret;
653 }
654 
655 static struct tree_mod_elem *
656 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
657 		      int smallest)
658 {
659 	struct rb_root *tm_root;
660 	struct rb_node *node;
661 	struct tree_mod_elem *cur = NULL;
662 	struct tree_mod_elem *found = NULL;
663 
664 	read_lock(&fs_info->tree_mod_log_lock);
665 	tm_root = &fs_info->tree_mod_log;
666 	node = tm_root->rb_node;
667 	while (node) {
668 		cur = rb_entry(node, struct tree_mod_elem, node);
669 		if (cur->logical < start) {
670 			node = node->rb_left;
671 		} else if (cur->logical > start) {
672 			node = node->rb_right;
673 		} else if (cur->seq < min_seq) {
674 			node = node->rb_left;
675 		} else if (!smallest) {
676 			/* we want the node with the highest seq */
677 			if (found)
678 				BUG_ON(found->seq > cur->seq);
679 			found = cur;
680 			node = node->rb_left;
681 		} else if (cur->seq > min_seq) {
682 			/* we want the node with the smallest seq */
683 			if (found)
684 				BUG_ON(found->seq < cur->seq);
685 			found = cur;
686 			node = node->rb_right;
687 		} else {
688 			found = cur;
689 			break;
690 		}
691 	}
692 	read_unlock(&fs_info->tree_mod_log_lock);
693 
694 	return found;
695 }
696 
697 /*
698  * this returns the element from the log with the smallest time sequence
699  * value that's in the log (the oldest log item). any element with a time
700  * sequence lower than min_seq will be ignored.
701  */
702 static struct tree_mod_elem *
703 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
704 			   u64 min_seq)
705 {
706 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
707 }
708 
709 /*
710  * this returns the element from the log with the largest time sequence
711  * value that's in the log (the most recent log item). any element with
712  * a time sequence lower than min_seq will be ignored.
713  */
714 static struct tree_mod_elem *
715 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
716 {
717 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
718 }
719 
720 static noinline int
721 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
722 		     struct extent_buffer *src, unsigned long dst_offset,
723 		     unsigned long src_offset, int nr_items)
724 {
725 	int ret = 0;
726 	struct tree_mod_elem **tm_list = NULL;
727 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
728 	int i;
729 	int locked = 0;
730 
731 	if (!tree_mod_need_log(fs_info, NULL))
732 		return 0;
733 
734 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
735 		return 0;
736 
737 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
738 			  GFP_NOFS);
739 	if (!tm_list)
740 		return -ENOMEM;
741 
742 	tm_list_add = tm_list;
743 	tm_list_rem = tm_list + nr_items;
744 	for (i = 0; i < nr_items; i++) {
745 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
746 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
747 		if (!tm_list_rem[i]) {
748 			ret = -ENOMEM;
749 			goto free_tms;
750 		}
751 
752 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
753 		    MOD_LOG_KEY_ADD, GFP_NOFS);
754 		if (!tm_list_add[i]) {
755 			ret = -ENOMEM;
756 			goto free_tms;
757 		}
758 	}
759 
760 	if (tree_mod_dont_log(fs_info, NULL))
761 		goto free_tms;
762 	locked = 1;
763 
764 	for (i = 0; i < nr_items; i++) {
765 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
766 		if (ret)
767 			goto free_tms;
768 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
769 		if (ret)
770 			goto free_tms;
771 	}
772 
773 	write_unlock(&fs_info->tree_mod_log_lock);
774 	kfree(tm_list);
775 
776 	return 0;
777 
778 free_tms:
779 	for (i = 0; i < nr_items * 2; i++) {
780 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
781 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
782 		kfree(tm_list[i]);
783 	}
784 	if (locked)
785 		write_unlock(&fs_info->tree_mod_log_lock);
786 	kfree(tm_list);
787 
788 	return ret;
789 }
790 
791 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
792 {
793 	struct tree_mod_elem **tm_list = NULL;
794 	int nritems = 0;
795 	int i;
796 	int ret = 0;
797 
798 	if (btrfs_header_level(eb) == 0)
799 		return 0;
800 
801 	if (!tree_mod_need_log(eb->fs_info, NULL))
802 		return 0;
803 
804 	nritems = btrfs_header_nritems(eb);
805 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
806 	if (!tm_list)
807 		return -ENOMEM;
808 
809 	for (i = 0; i < nritems; i++) {
810 		tm_list[i] = alloc_tree_mod_elem(eb, i,
811 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
812 		if (!tm_list[i]) {
813 			ret = -ENOMEM;
814 			goto free_tms;
815 		}
816 	}
817 
818 	if (tree_mod_dont_log(eb->fs_info, eb))
819 		goto free_tms;
820 
821 	ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
822 	write_unlock(&eb->fs_info->tree_mod_log_lock);
823 	if (ret)
824 		goto free_tms;
825 	kfree(tm_list);
826 
827 	return 0;
828 
829 free_tms:
830 	for (i = 0; i < nritems; i++)
831 		kfree(tm_list[i]);
832 	kfree(tm_list);
833 
834 	return ret;
835 }
836 
837 /*
838  * check if the tree block can be shared by multiple trees
839  */
840 int btrfs_block_can_be_shared(struct btrfs_root *root,
841 			      struct extent_buffer *buf)
842 {
843 	/*
844 	 * Tree blocks not in reference counted trees and tree roots
845 	 * are never shared. If a block was allocated after the last
846 	 * snapshot and the block was not allocated by tree relocation,
847 	 * we know the block is not shared.
848 	 */
849 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
850 	    buf != root->node && buf != root->commit_root &&
851 	    (btrfs_header_generation(buf) <=
852 	     btrfs_root_last_snapshot(&root->root_item) ||
853 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
854 		return 1;
855 
856 	return 0;
857 }
858 
859 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
860 				       struct btrfs_root *root,
861 				       struct extent_buffer *buf,
862 				       struct extent_buffer *cow,
863 				       int *last_ref)
864 {
865 	struct btrfs_fs_info *fs_info = root->fs_info;
866 	u64 refs;
867 	u64 owner;
868 	u64 flags;
869 	u64 new_flags = 0;
870 	int ret;
871 
872 	/*
873 	 * Backrefs update rules:
874 	 *
875 	 * Always use full backrefs for extent pointers in tree block
876 	 * allocated by tree relocation.
877 	 *
878 	 * If a shared tree block is no longer referenced by its owner
879 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
880 	 * use full backrefs for extent pointers in tree block.
881 	 *
882 	 * If a tree block is been relocating
883 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
884 	 * use full backrefs for extent pointers in tree block.
885 	 * The reason for this is some operations (such as drop tree)
886 	 * are only allowed for blocks use full backrefs.
887 	 */
888 
889 	if (btrfs_block_can_be_shared(root, buf)) {
890 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
891 					       btrfs_header_level(buf), 1,
892 					       &refs, &flags);
893 		if (ret)
894 			return ret;
895 		if (refs == 0) {
896 			ret = -EROFS;
897 			btrfs_handle_fs_error(fs_info, ret, NULL);
898 			return ret;
899 		}
900 	} else {
901 		refs = 1;
902 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
903 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
904 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
905 		else
906 			flags = 0;
907 	}
908 
909 	owner = btrfs_header_owner(buf);
910 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
911 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
912 
913 	if (refs > 1) {
914 		if ((owner == root->root_key.objectid ||
915 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
916 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
917 			ret = btrfs_inc_ref(trans, root, buf, 1);
918 			if (ret)
919 				return ret;
920 
921 			if (root->root_key.objectid ==
922 			    BTRFS_TREE_RELOC_OBJECTID) {
923 				ret = btrfs_dec_ref(trans, root, buf, 0);
924 				if (ret)
925 					return ret;
926 				ret = btrfs_inc_ref(trans, root, cow, 1);
927 				if (ret)
928 					return ret;
929 			}
930 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
931 		} else {
932 
933 			if (root->root_key.objectid ==
934 			    BTRFS_TREE_RELOC_OBJECTID)
935 				ret = btrfs_inc_ref(trans, root, cow, 1);
936 			else
937 				ret = btrfs_inc_ref(trans, root, cow, 0);
938 			if (ret)
939 				return ret;
940 		}
941 		if (new_flags != 0) {
942 			int level = btrfs_header_level(buf);
943 
944 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
945 							  buf->start,
946 							  buf->len,
947 							  new_flags, level, 0);
948 			if (ret)
949 				return ret;
950 		}
951 	} else {
952 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
953 			if (root->root_key.objectid ==
954 			    BTRFS_TREE_RELOC_OBJECTID)
955 				ret = btrfs_inc_ref(trans, root, cow, 1);
956 			else
957 				ret = btrfs_inc_ref(trans, root, cow, 0);
958 			if (ret)
959 				return ret;
960 			ret = btrfs_dec_ref(trans, root, buf, 1);
961 			if (ret)
962 				return ret;
963 		}
964 		clean_tree_block(fs_info, buf);
965 		*last_ref = 1;
966 	}
967 	return 0;
968 }
969 
970 /*
971  * does the dirty work in cow of a single block.  The parent block (if
972  * supplied) is updated to point to the new cow copy.  The new buffer is marked
973  * dirty and returned locked.  If you modify the block it needs to be marked
974  * dirty again.
975  *
976  * search_start -- an allocation hint for the new block
977  *
978  * empty_size -- a hint that you plan on doing more cow.  This is the size in
979  * bytes the allocator should try to find free next to the block it returns.
980  * This is just a hint and may be ignored by the allocator.
981  */
982 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
983 			     struct btrfs_root *root,
984 			     struct extent_buffer *buf,
985 			     struct extent_buffer *parent, int parent_slot,
986 			     struct extent_buffer **cow_ret,
987 			     u64 search_start, u64 empty_size)
988 {
989 	struct btrfs_fs_info *fs_info = root->fs_info;
990 	struct btrfs_disk_key disk_key;
991 	struct extent_buffer *cow;
992 	int level, ret;
993 	int last_ref = 0;
994 	int unlock_orig = 0;
995 	u64 parent_start = 0;
996 
997 	if (*cow_ret == buf)
998 		unlock_orig = 1;
999 
1000 	btrfs_assert_tree_locked(buf);
1001 
1002 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1003 		trans->transid != fs_info->running_transaction->transid);
1004 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1005 		trans->transid != root->last_trans);
1006 
1007 	level = btrfs_header_level(buf);
1008 
1009 	if (level == 0)
1010 		btrfs_item_key(buf, &disk_key, 0);
1011 	else
1012 		btrfs_node_key(buf, &disk_key, 0);
1013 
1014 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1015 		parent_start = parent->start;
1016 
1017 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1018 			root->root_key.objectid, &disk_key, level,
1019 			search_start, empty_size);
1020 	if (IS_ERR(cow))
1021 		return PTR_ERR(cow);
1022 
1023 	/* cow is set to blocking by btrfs_init_new_buffer */
1024 
1025 	copy_extent_buffer_full(cow, buf);
1026 	btrfs_set_header_bytenr(cow, cow->start);
1027 	btrfs_set_header_generation(cow, trans->transid);
1028 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1029 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1030 				     BTRFS_HEADER_FLAG_RELOC);
1031 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1032 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1033 	else
1034 		btrfs_set_header_owner(cow, root->root_key.objectid);
1035 
1036 	write_extent_buffer_fsid(cow, fs_info->fsid);
1037 
1038 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1039 	if (ret) {
1040 		btrfs_abort_transaction(trans, ret);
1041 		return ret;
1042 	}
1043 
1044 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1045 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1046 		if (ret) {
1047 			btrfs_abort_transaction(trans, ret);
1048 			return ret;
1049 		}
1050 	}
1051 
1052 	if (buf == root->node) {
1053 		WARN_ON(parent && parent != buf);
1054 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1055 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1056 			parent_start = buf->start;
1057 
1058 		extent_buffer_get(cow);
1059 		ret = tree_mod_log_insert_root(root->node, cow, 1);
1060 		BUG_ON(ret < 0);
1061 		rcu_assign_pointer(root->node, cow);
1062 
1063 		btrfs_free_tree_block(trans, root, buf, parent_start,
1064 				      last_ref);
1065 		free_extent_buffer(buf);
1066 		add_root_to_dirty_list(root);
1067 	} else {
1068 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1069 		tree_mod_log_insert_key(parent, parent_slot,
1070 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1071 		btrfs_set_node_blockptr(parent, parent_slot,
1072 					cow->start);
1073 		btrfs_set_node_ptr_generation(parent, parent_slot,
1074 					      trans->transid);
1075 		btrfs_mark_buffer_dirty(parent);
1076 		if (last_ref) {
1077 			ret = tree_mod_log_free_eb(buf);
1078 			if (ret) {
1079 				btrfs_abort_transaction(trans, ret);
1080 				return ret;
1081 			}
1082 		}
1083 		btrfs_free_tree_block(trans, root, buf, parent_start,
1084 				      last_ref);
1085 	}
1086 	if (unlock_orig)
1087 		btrfs_tree_unlock(buf);
1088 	free_extent_buffer_stale(buf);
1089 	btrfs_mark_buffer_dirty(cow);
1090 	*cow_ret = cow;
1091 	return 0;
1092 }
1093 
1094 /*
1095  * returns the logical address of the oldest predecessor of the given root.
1096  * entries older than time_seq are ignored.
1097  */
1098 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1099 		struct extent_buffer *eb_root, u64 time_seq)
1100 {
1101 	struct tree_mod_elem *tm;
1102 	struct tree_mod_elem *found = NULL;
1103 	u64 root_logical = eb_root->start;
1104 	int looped = 0;
1105 
1106 	if (!time_seq)
1107 		return NULL;
1108 
1109 	/*
1110 	 * the very last operation that's logged for a root is the
1111 	 * replacement operation (if it is replaced at all). this has
1112 	 * the logical address of the *new* root, making it the very
1113 	 * first operation that's logged for this root.
1114 	 */
1115 	while (1) {
1116 		tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1117 						time_seq);
1118 		if (!looped && !tm)
1119 			return NULL;
1120 		/*
1121 		 * if there are no tree operation for the oldest root, we simply
1122 		 * return it. this should only happen if that (old) root is at
1123 		 * level 0.
1124 		 */
1125 		if (!tm)
1126 			break;
1127 
1128 		/*
1129 		 * if there's an operation that's not a root replacement, we
1130 		 * found the oldest version of our root. normally, we'll find a
1131 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1132 		 */
1133 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1134 			break;
1135 
1136 		found = tm;
1137 		root_logical = tm->old_root.logical;
1138 		looped = 1;
1139 	}
1140 
1141 	/* if there's no old root to return, return what we found instead */
1142 	if (!found)
1143 		found = tm;
1144 
1145 	return found;
1146 }
1147 
1148 /*
1149  * tm is a pointer to the first operation to rewind within eb. then, all
1150  * previous operations will be rewound (until we reach something older than
1151  * time_seq).
1152  */
1153 static void
1154 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1155 		      u64 time_seq, struct tree_mod_elem *first_tm)
1156 {
1157 	u32 n;
1158 	struct rb_node *next;
1159 	struct tree_mod_elem *tm = first_tm;
1160 	unsigned long o_dst;
1161 	unsigned long o_src;
1162 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1163 
1164 	n = btrfs_header_nritems(eb);
1165 	read_lock(&fs_info->tree_mod_log_lock);
1166 	while (tm && tm->seq >= time_seq) {
1167 		/*
1168 		 * all the operations are recorded with the operator used for
1169 		 * the modification. as we're going backwards, we do the
1170 		 * opposite of each operation here.
1171 		 */
1172 		switch (tm->op) {
1173 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1174 			BUG_ON(tm->slot < n);
1175 			/* Fallthrough */
1176 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1177 		case MOD_LOG_KEY_REMOVE:
1178 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1179 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1180 			btrfs_set_node_ptr_generation(eb, tm->slot,
1181 						      tm->generation);
1182 			n++;
1183 			break;
1184 		case MOD_LOG_KEY_REPLACE:
1185 			BUG_ON(tm->slot >= n);
1186 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1187 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1188 			btrfs_set_node_ptr_generation(eb, tm->slot,
1189 						      tm->generation);
1190 			break;
1191 		case MOD_LOG_KEY_ADD:
1192 			/* if a move operation is needed it's in the log */
1193 			n--;
1194 			break;
1195 		case MOD_LOG_MOVE_KEYS:
1196 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1197 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1198 			memmove_extent_buffer(eb, o_dst, o_src,
1199 					      tm->move.nr_items * p_size);
1200 			break;
1201 		case MOD_LOG_ROOT_REPLACE:
1202 			/*
1203 			 * this operation is special. for roots, this must be
1204 			 * handled explicitly before rewinding.
1205 			 * for non-roots, this operation may exist if the node
1206 			 * was a root: root A -> child B; then A gets empty and
1207 			 * B is promoted to the new root. in the mod log, we'll
1208 			 * have a root-replace operation for B, a tree block
1209 			 * that is no root. we simply ignore that operation.
1210 			 */
1211 			break;
1212 		}
1213 		next = rb_next(&tm->node);
1214 		if (!next)
1215 			break;
1216 		tm = rb_entry(next, struct tree_mod_elem, node);
1217 		if (tm->logical != first_tm->logical)
1218 			break;
1219 	}
1220 	read_unlock(&fs_info->tree_mod_log_lock);
1221 	btrfs_set_header_nritems(eb, n);
1222 }
1223 
1224 /*
1225  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1226  * is returned. If rewind operations happen, a fresh buffer is returned. The
1227  * returned buffer is always read-locked. If the returned buffer is not the
1228  * input buffer, the lock on the input buffer is released and the input buffer
1229  * is freed (its refcount is decremented).
1230  */
1231 static struct extent_buffer *
1232 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1233 		    struct extent_buffer *eb, u64 time_seq)
1234 {
1235 	struct extent_buffer *eb_rewin;
1236 	struct tree_mod_elem *tm;
1237 
1238 	if (!time_seq)
1239 		return eb;
1240 
1241 	if (btrfs_header_level(eb) == 0)
1242 		return eb;
1243 
1244 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1245 	if (!tm)
1246 		return eb;
1247 
1248 	btrfs_set_path_blocking(path);
1249 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1250 
1251 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1252 		BUG_ON(tm->slot != 0);
1253 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1254 		if (!eb_rewin) {
1255 			btrfs_tree_read_unlock_blocking(eb);
1256 			free_extent_buffer(eb);
1257 			return NULL;
1258 		}
1259 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1260 		btrfs_set_header_backref_rev(eb_rewin,
1261 					     btrfs_header_backref_rev(eb));
1262 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1263 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1264 	} else {
1265 		eb_rewin = btrfs_clone_extent_buffer(eb);
1266 		if (!eb_rewin) {
1267 			btrfs_tree_read_unlock_blocking(eb);
1268 			free_extent_buffer(eb);
1269 			return NULL;
1270 		}
1271 	}
1272 
1273 	btrfs_tree_read_unlock_blocking(eb);
1274 	free_extent_buffer(eb);
1275 
1276 	extent_buffer_get(eb_rewin);
1277 	btrfs_tree_read_lock(eb_rewin);
1278 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1279 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1280 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1281 
1282 	return eb_rewin;
1283 }
1284 
1285 /*
1286  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1287  * value. If there are no changes, the current root->root_node is returned. If
1288  * anything changed in between, there's a fresh buffer allocated on which the
1289  * rewind operations are done. In any case, the returned buffer is read locked.
1290  * Returns NULL on error (with no locks held).
1291  */
1292 static inline struct extent_buffer *
1293 get_old_root(struct btrfs_root *root, u64 time_seq)
1294 {
1295 	struct btrfs_fs_info *fs_info = root->fs_info;
1296 	struct tree_mod_elem *tm;
1297 	struct extent_buffer *eb = NULL;
1298 	struct extent_buffer *eb_root;
1299 	struct extent_buffer *old;
1300 	struct tree_mod_root *old_root = NULL;
1301 	u64 old_generation = 0;
1302 	u64 logical;
1303 	int level;
1304 
1305 	eb_root = btrfs_read_lock_root_node(root);
1306 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1307 	if (!tm)
1308 		return eb_root;
1309 
1310 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1311 		old_root = &tm->old_root;
1312 		old_generation = tm->generation;
1313 		logical = old_root->logical;
1314 		level = old_root->level;
1315 	} else {
1316 		logical = eb_root->start;
1317 		level = btrfs_header_level(eb_root);
1318 	}
1319 
1320 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1321 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1322 		btrfs_tree_read_unlock(eb_root);
1323 		free_extent_buffer(eb_root);
1324 		old = read_tree_block(fs_info, logical, 0, level, NULL);
1325 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1326 			if (!IS_ERR(old))
1327 				free_extent_buffer(old);
1328 			btrfs_warn(fs_info,
1329 				   "failed to read tree block %llu from get_old_root",
1330 				   logical);
1331 		} else {
1332 			eb = btrfs_clone_extent_buffer(old);
1333 			free_extent_buffer(old);
1334 		}
1335 	} else if (old_root) {
1336 		btrfs_tree_read_unlock(eb_root);
1337 		free_extent_buffer(eb_root);
1338 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1339 	} else {
1340 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1341 		eb = btrfs_clone_extent_buffer(eb_root);
1342 		btrfs_tree_read_unlock_blocking(eb_root);
1343 		free_extent_buffer(eb_root);
1344 	}
1345 
1346 	if (!eb)
1347 		return NULL;
1348 	extent_buffer_get(eb);
1349 	btrfs_tree_read_lock(eb);
1350 	if (old_root) {
1351 		btrfs_set_header_bytenr(eb, eb->start);
1352 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1353 		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1354 		btrfs_set_header_level(eb, old_root->level);
1355 		btrfs_set_header_generation(eb, old_generation);
1356 	}
1357 	if (tm)
1358 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1359 	else
1360 		WARN_ON(btrfs_header_level(eb) != 0);
1361 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1362 
1363 	return eb;
1364 }
1365 
1366 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1367 {
1368 	struct tree_mod_elem *tm;
1369 	int level;
1370 	struct extent_buffer *eb_root = btrfs_root_node(root);
1371 
1372 	tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1373 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1374 		level = tm->old_root.level;
1375 	} else {
1376 		level = btrfs_header_level(eb_root);
1377 	}
1378 	free_extent_buffer(eb_root);
1379 
1380 	return level;
1381 }
1382 
1383 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1384 				   struct btrfs_root *root,
1385 				   struct extent_buffer *buf)
1386 {
1387 	if (btrfs_is_testing(root->fs_info))
1388 		return 0;
1389 
1390 	/* Ensure we can see the FORCE_COW bit */
1391 	smp_mb__before_atomic();
1392 
1393 	/*
1394 	 * We do not need to cow a block if
1395 	 * 1) this block is not created or changed in this transaction;
1396 	 * 2) this block does not belong to TREE_RELOC tree;
1397 	 * 3) the root is not forced COW.
1398 	 *
1399 	 * What is forced COW:
1400 	 *    when we create snapshot during committing the transaction,
1401 	 *    after we've finished coping src root, we must COW the shared
1402 	 *    block to ensure the metadata consistency.
1403 	 */
1404 	if (btrfs_header_generation(buf) == trans->transid &&
1405 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1406 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1407 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1408 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1409 		return 0;
1410 	return 1;
1411 }
1412 
1413 /*
1414  * cows a single block, see __btrfs_cow_block for the real work.
1415  * This version of it has extra checks so that a block isn't COWed more than
1416  * once per transaction, as long as it hasn't been written yet
1417  */
1418 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1419 		    struct btrfs_root *root, struct extent_buffer *buf,
1420 		    struct extent_buffer *parent, int parent_slot,
1421 		    struct extent_buffer **cow_ret)
1422 {
1423 	struct btrfs_fs_info *fs_info = root->fs_info;
1424 	u64 search_start;
1425 	int ret;
1426 
1427 	if (trans->transaction != fs_info->running_transaction)
1428 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1429 		       trans->transid,
1430 		       fs_info->running_transaction->transid);
1431 
1432 	if (trans->transid != fs_info->generation)
1433 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1434 		       trans->transid, fs_info->generation);
1435 
1436 	if (!should_cow_block(trans, root, buf)) {
1437 		trans->dirty = true;
1438 		*cow_ret = buf;
1439 		return 0;
1440 	}
1441 
1442 	search_start = buf->start & ~((u64)SZ_1G - 1);
1443 
1444 	if (parent)
1445 		btrfs_set_lock_blocking(parent);
1446 	btrfs_set_lock_blocking(buf);
1447 
1448 	ret = __btrfs_cow_block(trans, root, buf, parent,
1449 				 parent_slot, cow_ret, search_start, 0);
1450 
1451 	trace_btrfs_cow_block(root, buf, *cow_ret);
1452 
1453 	return ret;
1454 }
1455 
1456 /*
1457  * helper function for defrag to decide if two blocks pointed to by a
1458  * node are actually close by
1459  */
1460 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1461 {
1462 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1463 		return 1;
1464 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1465 		return 1;
1466 	return 0;
1467 }
1468 
1469 /*
1470  * compare two keys in a memcmp fashion
1471  */
1472 static int comp_keys(const struct btrfs_disk_key *disk,
1473 		     const struct btrfs_key *k2)
1474 {
1475 	struct btrfs_key k1;
1476 
1477 	btrfs_disk_key_to_cpu(&k1, disk);
1478 
1479 	return btrfs_comp_cpu_keys(&k1, k2);
1480 }
1481 
1482 /*
1483  * same as comp_keys only with two btrfs_key's
1484  */
1485 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1486 {
1487 	if (k1->objectid > k2->objectid)
1488 		return 1;
1489 	if (k1->objectid < k2->objectid)
1490 		return -1;
1491 	if (k1->type > k2->type)
1492 		return 1;
1493 	if (k1->type < k2->type)
1494 		return -1;
1495 	if (k1->offset > k2->offset)
1496 		return 1;
1497 	if (k1->offset < k2->offset)
1498 		return -1;
1499 	return 0;
1500 }
1501 
1502 /*
1503  * this is used by the defrag code to go through all the
1504  * leaves pointed to by a node and reallocate them so that
1505  * disk order is close to key order
1506  */
1507 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1508 		       struct btrfs_root *root, struct extent_buffer *parent,
1509 		       int start_slot, u64 *last_ret,
1510 		       struct btrfs_key *progress)
1511 {
1512 	struct btrfs_fs_info *fs_info = root->fs_info;
1513 	struct extent_buffer *cur;
1514 	u64 blocknr;
1515 	u64 gen;
1516 	u64 search_start = *last_ret;
1517 	u64 last_block = 0;
1518 	u64 other;
1519 	u32 parent_nritems;
1520 	int end_slot;
1521 	int i;
1522 	int err = 0;
1523 	int parent_level;
1524 	int uptodate;
1525 	u32 blocksize;
1526 	int progress_passed = 0;
1527 	struct btrfs_disk_key disk_key;
1528 
1529 	parent_level = btrfs_header_level(parent);
1530 
1531 	WARN_ON(trans->transaction != fs_info->running_transaction);
1532 	WARN_ON(trans->transid != fs_info->generation);
1533 
1534 	parent_nritems = btrfs_header_nritems(parent);
1535 	blocksize = fs_info->nodesize;
1536 	end_slot = parent_nritems - 1;
1537 
1538 	if (parent_nritems <= 1)
1539 		return 0;
1540 
1541 	btrfs_set_lock_blocking(parent);
1542 
1543 	for (i = start_slot; i <= end_slot; i++) {
1544 		struct btrfs_key first_key;
1545 		int close = 1;
1546 
1547 		btrfs_node_key(parent, &disk_key, i);
1548 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1549 			continue;
1550 
1551 		progress_passed = 1;
1552 		blocknr = btrfs_node_blockptr(parent, i);
1553 		gen = btrfs_node_ptr_generation(parent, i);
1554 		btrfs_node_key_to_cpu(parent, &first_key, i);
1555 		if (last_block == 0)
1556 			last_block = blocknr;
1557 
1558 		if (i > 0) {
1559 			other = btrfs_node_blockptr(parent, i - 1);
1560 			close = close_blocks(blocknr, other, blocksize);
1561 		}
1562 		if (!close && i < end_slot) {
1563 			other = btrfs_node_blockptr(parent, i + 1);
1564 			close = close_blocks(blocknr, other, blocksize);
1565 		}
1566 		if (close) {
1567 			last_block = blocknr;
1568 			continue;
1569 		}
1570 
1571 		cur = find_extent_buffer(fs_info, blocknr);
1572 		if (cur)
1573 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1574 		else
1575 			uptodate = 0;
1576 		if (!cur || !uptodate) {
1577 			if (!cur) {
1578 				cur = read_tree_block(fs_info, blocknr, gen,
1579 						      parent_level - 1,
1580 						      &first_key);
1581 				if (IS_ERR(cur)) {
1582 					return PTR_ERR(cur);
1583 				} else if (!extent_buffer_uptodate(cur)) {
1584 					free_extent_buffer(cur);
1585 					return -EIO;
1586 				}
1587 			} else if (!uptodate) {
1588 				err = btrfs_read_buffer(cur, gen,
1589 						parent_level - 1,&first_key);
1590 				if (err) {
1591 					free_extent_buffer(cur);
1592 					return err;
1593 				}
1594 			}
1595 		}
1596 		if (search_start == 0)
1597 			search_start = last_block;
1598 
1599 		btrfs_tree_lock(cur);
1600 		btrfs_set_lock_blocking(cur);
1601 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1602 					&cur, search_start,
1603 					min(16 * blocksize,
1604 					    (end_slot - i) * blocksize));
1605 		if (err) {
1606 			btrfs_tree_unlock(cur);
1607 			free_extent_buffer(cur);
1608 			break;
1609 		}
1610 		search_start = cur->start;
1611 		last_block = cur->start;
1612 		*last_ret = search_start;
1613 		btrfs_tree_unlock(cur);
1614 		free_extent_buffer(cur);
1615 	}
1616 	return err;
1617 }
1618 
1619 /*
1620  * search for key in the extent_buffer.  The items start at offset p,
1621  * and they are item_size apart.  There are 'max' items in p.
1622  *
1623  * the slot in the array is returned via slot, and it points to
1624  * the place where you would insert key if it is not found in
1625  * the array.
1626  *
1627  * slot may point to max if the key is bigger than all of the keys
1628  */
1629 static noinline int generic_bin_search(struct extent_buffer *eb,
1630 				       unsigned long p, int item_size,
1631 				       const struct btrfs_key *key,
1632 				       int max, int *slot)
1633 {
1634 	int low = 0;
1635 	int high = max;
1636 	int mid;
1637 	int ret;
1638 	struct btrfs_disk_key *tmp = NULL;
1639 	struct btrfs_disk_key unaligned;
1640 	unsigned long offset;
1641 	char *kaddr = NULL;
1642 	unsigned long map_start = 0;
1643 	unsigned long map_len = 0;
1644 	int err;
1645 
1646 	if (low > high) {
1647 		btrfs_err(eb->fs_info,
1648 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1649 			  __func__, low, high, eb->start,
1650 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1651 		return -EINVAL;
1652 	}
1653 
1654 	while (low < high) {
1655 		mid = (low + high) / 2;
1656 		offset = p + mid * item_size;
1657 
1658 		if (!kaddr || offset < map_start ||
1659 		    (offset + sizeof(struct btrfs_disk_key)) >
1660 		    map_start + map_len) {
1661 
1662 			err = map_private_extent_buffer(eb, offset,
1663 						sizeof(struct btrfs_disk_key),
1664 						&kaddr, &map_start, &map_len);
1665 
1666 			if (!err) {
1667 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1668 							map_start);
1669 			} else if (err == 1) {
1670 				read_extent_buffer(eb, &unaligned,
1671 						   offset, sizeof(unaligned));
1672 				tmp = &unaligned;
1673 			} else {
1674 				return err;
1675 			}
1676 
1677 		} else {
1678 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1679 							map_start);
1680 		}
1681 		ret = comp_keys(tmp, key);
1682 
1683 		if (ret < 0)
1684 			low = mid + 1;
1685 		else if (ret > 0)
1686 			high = mid;
1687 		else {
1688 			*slot = mid;
1689 			return 0;
1690 		}
1691 	}
1692 	*slot = low;
1693 	return 1;
1694 }
1695 
1696 /*
1697  * simple bin_search frontend that does the right thing for
1698  * leaves vs nodes
1699  */
1700 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1701 		     int level, int *slot)
1702 {
1703 	if (level == 0)
1704 		return generic_bin_search(eb,
1705 					  offsetof(struct btrfs_leaf, items),
1706 					  sizeof(struct btrfs_item),
1707 					  key, btrfs_header_nritems(eb),
1708 					  slot);
1709 	else
1710 		return generic_bin_search(eb,
1711 					  offsetof(struct btrfs_node, ptrs),
1712 					  sizeof(struct btrfs_key_ptr),
1713 					  key, btrfs_header_nritems(eb),
1714 					  slot);
1715 }
1716 
1717 static void root_add_used(struct btrfs_root *root, u32 size)
1718 {
1719 	spin_lock(&root->accounting_lock);
1720 	btrfs_set_root_used(&root->root_item,
1721 			    btrfs_root_used(&root->root_item) + size);
1722 	spin_unlock(&root->accounting_lock);
1723 }
1724 
1725 static void root_sub_used(struct btrfs_root *root, u32 size)
1726 {
1727 	spin_lock(&root->accounting_lock);
1728 	btrfs_set_root_used(&root->root_item,
1729 			    btrfs_root_used(&root->root_item) - size);
1730 	spin_unlock(&root->accounting_lock);
1731 }
1732 
1733 /* given a node and slot number, this reads the blocks it points to.  The
1734  * extent buffer is returned with a reference taken (but unlocked).
1735  */
1736 static noinline struct extent_buffer *
1737 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1738 	       int slot)
1739 {
1740 	int level = btrfs_header_level(parent);
1741 	struct extent_buffer *eb;
1742 	struct btrfs_key first_key;
1743 
1744 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1745 		return ERR_PTR(-ENOENT);
1746 
1747 	BUG_ON(level == 0);
1748 
1749 	btrfs_node_key_to_cpu(parent, &first_key, slot);
1750 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1751 			     btrfs_node_ptr_generation(parent, slot),
1752 			     level - 1, &first_key);
1753 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1754 		free_extent_buffer(eb);
1755 		eb = ERR_PTR(-EIO);
1756 	}
1757 
1758 	return eb;
1759 }
1760 
1761 /*
1762  * node level balancing, used to make sure nodes are in proper order for
1763  * item deletion.  We balance from the top down, so we have to make sure
1764  * that a deletion won't leave an node completely empty later on.
1765  */
1766 static noinline int balance_level(struct btrfs_trans_handle *trans,
1767 			 struct btrfs_root *root,
1768 			 struct btrfs_path *path, int level)
1769 {
1770 	struct btrfs_fs_info *fs_info = root->fs_info;
1771 	struct extent_buffer *right = NULL;
1772 	struct extent_buffer *mid;
1773 	struct extent_buffer *left = NULL;
1774 	struct extent_buffer *parent = NULL;
1775 	int ret = 0;
1776 	int wret;
1777 	int pslot;
1778 	int orig_slot = path->slots[level];
1779 	u64 orig_ptr;
1780 
1781 	ASSERT(level > 0);
1782 
1783 	mid = path->nodes[level];
1784 
1785 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1786 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1787 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1788 
1789 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1790 
1791 	if (level < BTRFS_MAX_LEVEL - 1) {
1792 		parent = path->nodes[level + 1];
1793 		pslot = path->slots[level + 1];
1794 	}
1795 
1796 	/*
1797 	 * deal with the case where there is only one pointer in the root
1798 	 * by promoting the node below to a root
1799 	 */
1800 	if (!parent) {
1801 		struct extent_buffer *child;
1802 
1803 		if (btrfs_header_nritems(mid) != 1)
1804 			return 0;
1805 
1806 		/* promote the child to a root */
1807 		child = read_node_slot(fs_info, mid, 0);
1808 		if (IS_ERR(child)) {
1809 			ret = PTR_ERR(child);
1810 			btrfs_handle_fs_error(fs_info, ret, NULL);
1811 			goto enospc;
1812 		}
1813 
1814 		btrfs_tree_lock(child);
1815 		btrfs_set_lock_blocking(child);
1816 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1817 		if (ret) {
1818 			btrfs_tree_unlock(child);
1819 			free_extent_buffer(child);
1820 			goto enospc;
1821 		}
1822 
1823 		ret = tree_mod_log_insert_root(root->node, child, 1);
1824 		BUG_ON(ret < 0);
1825 		rcu_assign_pointer(root->node, child);
1826 
1827 		add_root_to_dirty_list(root);
1828 		btrfs_tree_unlock(child);
1829 
1830 		path->locks[level] = 0;
1831 		path->nodes[level] = NULL;
1832 		clean_tree_block(fs_info, mid);
1833 		btrfs_tree_unlock(mid);
1834 		/* once for the path */
1835 		free_extent_buffer(mid);
1836 
1837 		root_sub_used(root, mid->len);
1838 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1839 		/* once for the root ptr */
1840 		free_extent_buffer_stale(mid);
1841 		return 0;
1842 	}
1843 	if (btrfs_header_nritems(mid) >
1844 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1845 		return 0;
1846 
1847 	left = read_node_slot(fs_info, parent, pslot - 1);
1848 	if (IS_ERR(left))
1849 		left = NULL;
1850 
1851 	if (left) {
1852 		btrfs_tree_lock(left);
1853 		btrfs_set_lock_blocking(left);
1854 		wret = btrfs_cow_block(trans, root, left,
1855 				       parent, pslot - 1, &left);
1856 		if (wret) {
1857 			ret = wret;
1858 			goto enospc;
1859 		}
1860 	}
1861 
1862 	right = read_node_slot(fs_info, parent, pslot + 1);
1863 	if (IS_ERR(right))
1864 		right = NULL;
1865 
1866 	if (right) {
1867 		btrfs_tree_lock(right);
1868 		btrfs_set_lock_blocking(right);
1869 		wret = btrfs_cow_block(trans, root, right,
1870 				       parent, pslot + 1, &right);
1871 		if (wret) {
1872 			ret = wret;
1873 			goto enospc;
1874 		}
1875 	}
1876 
1877 	/* first, try to make some room in the middle buffer */
1878 	if (left) {
1879 		orig_slot += btrfs_header_nritems(left);
1880 		wret = push_node_left(trans, fs_info, left, mid, 1);
1881 		if (wret < 0)
1882 			ret = wret;
1883 	}
1884 
1885 	/*
1886 	 * then try to empty the right most buffer into the middle
1887 	 */
1888 	if (right) {
1889 		wret = push_node_left(trans, fs_info, mid, right, 1);
1890 		if (wret < 0 && wret != -ENOSPC)
1891 			ret = wret;
1892 		if (btrfs_header_nritems(right) == 0) {
1893 			clean_tree_block(fs_info, right);
1894 			btrfs_tree_unlock(right);
1895 			del_ptr(root, path, level + 1, pslot + 1);
1896 			root_sub_used(root, right->len);
1897 			btrfs_free_tree_block(trans, root, right, 0, 1);
1898 			free_extent_buffer_stale(right);
1899 			right = NULL;
1900 		} else {
1901 			struct btrfs_disk_key right_key;
1902 			btrfs_node_key(right, &right_key, 0);
1903 			ret = tree_mod_log_insert_key(parent, pslot + 1,
1904 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1905 			BUG_ON(ret < 0);
1906 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1907 			btrfs_mark_buffer_dirty(parent);
1908 		}
1909 	}
1910 	if (btrfs_header_nritems(mid) == 1) {
1911 		/*
1912 		 * we're not allowed to leave a node with one item in the
1913 		 * tree during a delete.  A deletion from lower in the tree
1914 		 * could try to delete the only pointer in this node.
1915 		 * So, pull some keys from the left.
1916 		 * There has to be a left pointer at this point because
1917 		 * otherwise we would have pulled some pointers from the
1918 		 * right
1919 		 */
1920 		if (!left) {
1921 			ret = -EROFS;
1922 			btrfs_handle_fs_error(fs_info, ret, NULL);
1923 			goto enospc;
1924 		}
1925 		wret = balance_node_right(trans, fs_info, mid, left);
1926 		if (wret < 0) {
1927 			ret = wret;
1928 			goto enospc;
1929 		}
1930 		if (wret == 1) {
1931 			wret = push_node_left(trans, fs_info, left, mid, 1);
1932 			if (wret < 0)
1933 				ret = wret;
1934 		}
1935 		BUG_ON(wret == 1);
1936 	}
1937 	if (btrfs_header_nritems(mid) == 0) {
1938 		clean_tree_block(fs_info, mid);
1939 		btrfs_tree_unlock(mid);
1940 		del_ptr(root, path, level + 1, pslot);
1941 		root_sub_used(root, mid->len);
1942 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1943 		free_extent_buffer_stale(mid);
1944 		mid = NULL;
1945 	} else {
1946 		/* update the parent key to reflect our changes */
1947 		struct btrfs_disk_key mid_key;
1948 		btrfs_node_key(mid, &mid_key, 0);
1949 		ret = tree_mod_log_insert_key(parent, pslot,
1950 				MOD_LOG_KEY_REPLACE, GFP_NOFS);
1951 		BUG_ON(ret < 0);
1952 		btrfs_set_node_key(parent, &mid_key, pslot);
1953 		btrfs_mark_buffer_dirty(parent);
1954 	}
1955 
1956 	/* update the path */
1957 	if (left) {
1958 		if (btrfs_header_nritems(left) > orig_slot) {
1959 			extent_buffer_get(left);
1960 			/* left was locked after cow */
1961 			path->nodes[level] = left;
1962 			path->slots[level + 1] -= 1;
1963 			path->slots[level] = orig_slot;
1964 			if (mid) {
1965 				btrfs_tree_unlock(mid);
1966 				free_extent_buffer(mid);
1967 			}
1968 		} else {
1969 			orig_slot -= btrfs_header_nritems(left);
1970 			path->slots[level] = orig_slot;
1971 		}
1972 	}
1973 	/* double check we haven't messed things up */
1974 	if (orig_ptr !=
1975 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1976 		BUG();
1977 enospc:
1978 	if (right) {
1979 		btrfs_tree_unlock(right);
1980 		free_extent_buffer(right);
1981 	}
1982 	if (left) {
1983 		if (path->nodes[level] != left)
1984 			btrfs_tree_unlock(left);
1985 		free_extent_buffer(left);
1986 	}
1987 	return ret;
1988 }
1989 
1990 /* Node balancing for insertion.  Here we only split or push nodes around
1991  * when they are completely full.  This is also done top down, so we
1992  * have to be pessimistic.
1993  */
1994 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1995 					  struct btrfs_root *root,
1996 					  struct btrfs_path *path, int level)
1997 {
1998 	struct btrfs_fs_info *fs_info = root->fs_info;
1999 	struct extent_buffer *right = NULL;
2000 	struct extent_buffer *mid;
2001 	struct extent_buffer *left = NULL;
2002 	struct extent_buffer *parent = NULL;
2003 	int ret = 0;
2004 	int wret;
2005 	int pslot;
2006 	int orig_slot = path->slots[level];
2007 
2008 	if (level == 0)
2009 		return 1;
2010 
2011 	mid = path->nodes[level];
2012 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2013 
2014 	if (level < BTRFS_MAX_LEVEL - 1) {
2015 		parent = path->nodes[level + 1];
2016 		pslot = path->slots[level + 1];
2017 	}
2018 
2019 	if (!parent)
2020 		return 1;
2021 
2022 	left = read_node_slot(fs_info, parent, pslot - 1);
2023 	if (IS_ERR(left))
2024 		left = NULL;
2025 
2026 	/* first, try to make some room in the middle buffer */
2027 	if (left) {
2028 		u32 left_nr;
2029 
2030 		btrfs_tree_lock(left);
2031 		btrfs_set_lock_blocking(left);
2032 
2033 		left_nr = btrfs_header_nritems(left);
2034 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2035 			wret = 1;
2036 		} else {
2037 			ret = btrfs_cow_block(trans, root, left, parent,
2038 					      pslot - 1, &left);
2039 			if (ret)
2040 				wret = 1;
2041 			else {
2042 				wret = push_node_left(trans, fs_info,
2043 						      left, mid, 0);
2044 			}
2045 		}
2046 		if (wret < 0)
2047 			ret = wret;
2048 		if (wret == 0) {
2049 			struct btrfs_disk_key disk_key;
2050 			orig_slot += left_nr;
2051 			btrfs_node_key(mid, &disk_key, 0);
2052 			ret = tree_mod_log_insert_key(parent, pslot,
2053 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2054 			BUG_ON(ret < 0);
2055 			btrfs_set_node_key(parent, &disk_key, pslot);
2056 			btrfs_mark_buffer_dirty(parent);
2057 			if (btrfs_header_nritems(left) > orig_slot) {
2058 				path->nodes[level] = left;
2059 				path->slots[level + 1] -= 1;
2060 				path->slots[level] = orig_slot;
2061 				btrfs_tree_unlock(mid);
2062 				free_extent_buffer(mid);
2063 			} else {
2064 				orig_slot -=
2065 					btrfs_header_nritems(left);
2066 				path->slots[level] = orig_slot;
2067 				btrfs_tree_unlock(left);
2068 				free_extent_buffer(left);
2069 			}
2070 			return 0;
2071 		}
2072 		btrfs_tree_unlock(left);
2073 		free_extent_buffer(left);
2074 	}
2075 	right = read_node_slot(fs_info, parent, pslot + 1);
2076 	if (IS_ERR(right))
2077 		right = NULL;
2078 
2079 	/*
2080 	 * then try to empty the right most buffer into the middle
2081 	 */
2082 	if (right) {
2083 		u32 right_nr;
2084 
2085 		btrfs_tree_lock(right);
2086 		btrfs_set_lock_blocking(right);
2087 
2088 		right_nr = btrfs_header_nritems(right);
2089 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2090 			wret = 1;
2091 		} else {
2092 			ret = btrfs_cow_block(trans, root, right,
2093 					      parent, pslot + 1,
2094 					      &right);
2095 			if (ret)
2096 				wret = 1;
2097 			else {
2098 				wret = balance_node_right(trans, fs_info,
2099 							  right, mid);
2100 			}
2101 		}
2102 		if (wret < 0)
2103 			ret = wret;
2104 		if (wret == 0) {
2105 			struct btrfs_disk_key disk_key;
2106 
2107 			btrfs_node_key(right, &disk_key, 0);
2108 			ret = tree_mod_log_insert_key(parent, pslot + 1,
2109 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
2110 			BUG_ON(ret < 0);
2111 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2112 			btrfs_mark_buffer_dirty(parent);
2113 
2114 			if (btrfs_header_nritems(mid) <= orig_slot) {
2115 				path->nodes[level] = right;
2116 				path->slots[level + 1] += 1;
2117 				path->slots[level] = orig_slot -
2118 					btrfs_header_nritems(mid);
2119 				btrfs_tree_unlock(mid);
2120 				free_extent_buffer(mid);
2121 			} else {
2122 				btrfs_tree_unlock(right);
2123 				free_extent_buffer(right);
2124 			}
2125 			return 0;
2126 		}
2127 		btrfs_tree_unlock(right);
2128 		free_extent_buffer(right);
2129 	}
2130 	return 1;
2131 }
2132 
2133 /*
2134  * readahead one full node of leaves, finding things that are close
2135  * to the block in 'slot', and triggering ra on them.
2136  */
2137 static void reada_for_search(struct btrfs_fs_info *fs_info,
2138 			     struct btrfs_path *path,
2139 			     int level, int slot, u64 objectid)
2140 {
2141 	struct extent_buffer *node;
2142 	struct btrfs_disk_key disk_key;
2143 	u32 nritems;
2144 	u64 search;
2145 	u64 target;
2146 	u64 nread = 0;
2147 	struct extent_buffer *eb;
2148 	u32 nr;
2149 	u32 blocksize;
2150 	u32 nscan = 0;
2151 
2152 	if (level != 1)
2153 		return;
2154 
2155 	if (!path->nodes[level])
2156 		return;
2157 
2158 	node = path->nodes[level];
2159 
2160 	search = btrfs_node_blockptr(node, slot);
2161 	blocksize = fs_info->nodesize;
2162 	eb = find_extent_buffer(fs_info, search);
2163 	if (eb) {
2164 		free_extent_buffer(eb);
2165 		return;
2166 	}
2167 
2168 	target = search;
2169 
2170 	nritems = btrfs_header_nritems(node);
2171 	nr = slot;
2172 
2173 	while (1) {
2174 		if (path->reada == READA_BACK) {
2175 			if (nr == 0)
2176 				break;
2177 			nr--;
2178 		} else if (path->reada == READA_FORWARD) {
2179 			nr++;
2180 			if (nr >= nritems)
2181 				break;
2182 		}
2183 		if (path->reada == READA_BACK && objectid) {
2184 			btrfs_node_key(node, &disk_key, nr);
2185 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2186 				break;
2187 		}
2188 		search = btrfs_node_blockptr(node, nr);
2189 		if ((search <= target && target - search <= 65536) ||
2190 		    (search > target && search - target <= 65536)) {
2191 			readahead_tree_block(fs_info, search);
2192 			nread += blocksize;
2193 		}
2194 		nscan++;
2195 		if ((nread > 65536 || nscan > 32))
2196 			break;
2197 	}
2198 }
2199 
2200 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2201 				       struct btrfs_path *path, int level)
2202 {
2203 	int slot;
2204 	int nritems;
2205 	struct extent_buffer *parent;
2206 	struct extent_buffer *eb;
2207 	u64 gen;
2208 	u64 block1 = 0;
2209 	u64 block2 = 0;
2210 
2211 	parent = path->nodes[level + 1];
2212 	if (!parent)
2213 		return;
2214 
2215 	nritems = btrfs_header_nritems(parent);
2216 	slot = path->slots[level + 1];
2217 
2218 	if (slot > 0) {
2219 		block1 = btrfs_node_blockptr(parent, slot - 1);
2220 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2221 		eb = find_extent_buffer(fs_info, block1);
2222 		/*
2223 		 * if we get -eagain from btrfs_buffer_uptodate, we
2224 		 * don't want to return eagain here.  That will loop
2225 		 * forever
2226 		 */
2227 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2228 			block1 = 0;
2229 		free_extent_buffer(eb);
2230 	}
2231 	if (slot + 1 < nritems) {
2232 		block2 = btrfs_node_blockptr(parent, slot + 1);
2233 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2234 		eb = find_extent_buffer(fs_info, block2);
2235 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2236 			block2 = 0;
2237 		free_extent_buffer(eb);
2238 	}
2239 
2240 	if (block1)
2241 		readahead_tree_block(fs_info, block1);
2242 	if (block2)
2243 		readahead_tree_block(fs_info, block2);
2244 }
2245 
2246 
2247 /*
2248  * when we walk down the tree, it is usually safe to unlock the higher layers
2249  * in the tree.  The exceptions are when our path goes through slot 0, because
2250  * operations on the tree might require changing key pointers higher up in the
2251  * tree.
2252  *
2253  * callers might also have set path->keep_locks, which tells this code to keep
2254  * the lock if the path points to the last slot in the block.  This is part of
2255  * walking through the tree, and selecting the next slot in the higher block.
2256  *
2257  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2258  * if lowest_unlock is 1, level 0 won't be unlocked
2259  */
2260 static noinline void unlock_up(struct btrfs_path *path, int level,
2261 			       int lowest_unlock, int min_write_lock_level,
2262 			       int *write_lock_level)
2263 {
2264 	int i;
2265 	int skip_level = level;
2266 	int no_skips = 0;
2267 	struct extent_buffer *t;
2268 
2269 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2270 		if (!path->nodes[i])
2271 			break;
2272 		if (!path->locks[i])
2273 			break;
2274 		if (!no_skips && path->slots[i] == 0) {
2275 			skip_level = i + 1;
2276 			continue;
2277 		}
2278 		if (!no_skips && path->keep_locks) {
2279 			u32 nritems;
2280 			t = path->nodes[i];
2281 			nritems = btrfs_header_nritems(t);
2282 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2283 				skip_level = i + 1;
2284 				continue;
2285 			}
2286 		}
2287 		if (skip_level < i && i >= lowest_unlock)
2288 			no_skips = 1;
2289 
2290 		t = path->nodes[i];
2291 		if (i >= lowest_unlock && i > skip_level) {
2292 			btrfs_tree_unlock_rw(t, path->locks[i]);
2293 			path->locks[i] = 0;
2294 			if (write_lock_level &&
2295 			    i > min_write_lock_level &&
2296 			    i <= *write_lock_level) {
2297 				*write_lock_level = i - 1;
2298 			}
2299 		}
2300 	}
2301 }
2302 
2303 /*
2304  * This releases any locks held in the path starting at level and
2305  * going all the way up to the root.
2306  *
2307  * btrfs_search_slot will keep the lock held on higher nodes in a few
2308  * corner cases, such as COW of the block at slot zero in the node.  This
2309  * ignores those rules, and it should only be called when there are no
2310  * more updates to be done higher up in the tree.
2311  */
2312 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2313 {
2314 	int i;
2315 
2316 	if (path->keep_locks)
2317 		return;
2318 
2319 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2320 		if (!path->nodes[i])
2321 			continue;
2322 		if (!path->locks[i])
2323 			continue;
2324 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2325 		path->locks[i] = 0;
2326 	}
2327 }
2328 
2329 /*
2330  * helper function for btrfs_search_slot.  The goal is to find a block
2331  * in cache without setting the path to blocking.  If we find the block
2332  * we return zero and the path is unchanged.
2333  *
2334  * If we can't find the block, we set the path blocking and do some
2335  * reada.  -EAGAIN is returned and the search must be repeated.
2336  */
2337 static int
2338 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2339 		      struct extent_buffer **eb_ret, int level, int slot,
2340 		      const struct btrfs_key *key)
2341 {
2342 	struct btrfs_fs_info *fs_info = root->fs_info;
2343 	u64 blocknr;
2344 	u64 gen;
2345 	struct extent_buffer *b = *eb_ret;
2346 	struct extent_buffer *tmp;
2347 	struct btrfs_key first_key;
2348 	int ret;
2349 	int parent_level;
2350 
2351 	blocknr = btrfs_node_blockptr(b, slot);
2352 	gen = btrfs_node_ptr_generation(b, slot);
2353 	parent_level = btrfs_header_level(b);
2354 	btrfs_node_key_to_cpu(b, &first_key, slot);
2355 
2356 	tmp = find_extent_buffer(fs_info, blocknr);
2357 	if (tmp) {
2358 		/* first we do an atomic uptodate check */
2359 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2360 			*eb_ret = tmp;
2361 			return 0;
2362 		}
2363 
2364 		/* the pages were up to date, but we failed
2365 		 * the generation number check.  Do a full
2366 		 * read for the generation number that is correct.
2367 		 * We must do this without dropping locks so
2368 		 * we can trust our generation number
2369 		 */
2370 		btrfs_set_path_blocking(p);
2371 
2372 		/* now we're allowed to do a blocking uptodate check */
2373 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2374 		if (!ret) {
2375 			*eb_ret = tmp;
2376 			return 0;
2377 		}
2378 		free_extent_buffer(tmp);
2379 		btrfs_release_path(p);
2380 		return -EIO;
2381 	}
2382 
2383 	/*
2384 	 * reduce lock contention at high levels
2385 	 * of the btree by dropping locks before
2386 	 * we read.  Don't release the lock on the current
2387 	 * level because we need to walk this node to figure
2388 	 * out which blocks to read.
2389 	 */
2390 	btrfs_unlock_up_safe(p, level + 1);
2391 	btrfs_set_path_blocking(p);
2392 
2393 	if (p->reada != READA_NONE)
2394 		reada_for_search(fs_info, p, level, slot, key->objectid);
2395 
2396 	ret = -EAGAIN;
2397 	tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2398 			      &first_key);
2399 	if (!IS_ERR(tmp)) {
2400 		/*
2401 		 * If the read above didn't mark this buffer up to date,
2402 		 * it will never end up being up to date.  Set ret to EIO now
2403 		 * and give up so that our caller doesn't loop forever
2404 		 * on our EAGAINs.
2405 		 */
2406 		if (!extent_buffer_uptodate(tmp))
2407 			ret = -EIO;
2408 		free_extent_buffer(tmp);
2409 	} else {
2410 		ret = PTR_ERR(tmp);
2411 	}
2412 
2413 	btrfs_release_path(p);
2414 	return ret;
2415 }
2416 
2417 /*
2418  * helper function for btrfs_search_slot.  This does all of the checks
2419  * for node-level blocks and does any balancing required based on
2420  * the ins_len.
2421  *
2422  * If no extra work was required, zero is returned.  If we had to
2423  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2424  * start over
2425  */
2426 static int
2427 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2428 		       struct btrfs_root *root, struct btrfs_path *p,
2429 		       struct extent_buffer *b, int level, int ins_len,
2430 		       int *write_lock_level)
2431 {
2432 	struct btrfs_fs_info *fs_info = root->fs_info;
2433 	int ret;
2434 
2435 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2436 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2437 		int sret;
2438 
2439 		if (*write_lock_level < level + 1) {
2440 			*write_lock_level = level + 1;
2441 			btrfs_release_path(p);
2442 			goto again;
2443 		}
2444 
2445 		btrfs_set_path_blocking(p);
2446 		reada_for_balance(fs_info, p, level);
2447 		sret = split_node(trans, root, p, level);
2448 
2449 		BUG_ON(sret > 0);
2450 		if (sret) {
2451 			ret = sret;
2452 			goto done;
2453 		}
2454 		b = p->nodes[level];
2455 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2456 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2457 		int sret;
2458 
2459 		if (*write_lock_level < level + 1) {
2460 			*write_lock_level = level + 1;
2461 			btrfs_release_path(p);
2462 			goto again;
2463 		}
2464 
2465 		btrfs_set_path_blocking(p);
2466 		reada_for_balance(fs_info, p, level);
2467 		sret = balance_level(trans, root, p, level);
2468 
2469 		if (sret) {
2470 			ret = sret;
2471 			goto done;
2472 		}
2473 		b = p->nodes[level];
2474 		if (!b) {
2475 			btrfs_release_path(p);
2476 			goto again;
2477 		}
2478 		BUG_ON(btrfs_header_nritems(b) == 1);
2479 	}
2480 	return 0;
2481 
2482 again:
2483 	ret = -EAGAIN;
2484 done:
2485 	return ret;
2486 }
2487 
2488 static void key_search_validate(struct extent_buffer *b,
2489 				const struct btrfs_key *key,
2490 				int level)
2491 {
2492 #ifdef CONFIG_BTRFS_ASSERT
2493 	struct btrfs_disk_key disk_key;
2494 
2495 	btrfs_cpu_key_to_disk(&disk_key, key);
2496 
2497 	if (level == 0)
2498 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2499 		    offsetof(struct btrfs_leaf, items[0].key),
2500 		    sizeof(disk_key)));
2501 	else
2502 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2503 		    offsetof(struct btrfs_node, ptrs[0].key),
2504 		    sizeof(disk_key)));
2505 #endif
2506 }
2507 
2508 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2509 		      int level, int *prev_cmp, int *slot)
2510 {
2511 	if (*prev_cmp != 0) {
2512 		*prev_cmp = btrfs_bin_search(b, key, level, slot);
2513 		return *prev_cmp;
2514 	}
2515 
2516 	key_search_validate(b, key, level);
2517 	*slot = 0;
2518 
2519 	return 0;
2520 }
2521 
2522 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2523 		u64 iobjectid, u64 ioff, u8 key_type,
2524 		struct btrfs_key *found_key)
2525 {
2526 	int ret;
2527 	struct btrfs_key key;
2528 	struct extent_buffer *eb;
2529 
2530 	ASSERT(path);
2531 	ASSERT(found_key);
2532 
2533 	key.type = key_type;
2534 	key.objectid = iobjectid;
2535 	key.offset = ioff;
2536 
2537 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2538 	if (ret < 0)
2539 		return ret;
2540 
2541 	eb = path->nodes[0];
2542 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2543 		ret = btrfs_next_leaf(fs_root, path);
2544 		if (ret)
2545 			return ret;
2546 		eb = path->nodes[0];
2547 	}
2548 
2549 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2550 	if (found_key->type != key.type ||
2551 			found_key->objectid != key.objectid)
2552 		return 1;
2553 
2554 	return 0;
2555 }
2556 
2557 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2558 							struct btrfs_path *p,
2559 							int write_lock_level)
2560 {
2561 	struct btrfs_fs_info *fs_info = root->fs_info;
2562 	struct extent_buffer *b;
2563 	int root_lock;
2564 	int level = 0;
2565 
2566 	/* We try very hard to do read locks on the root */
2567 	root_lock = BTRFS_READ_LOCK;
2568 
2569 	if (p->search_commit_root) {
2570 		/* The commit roots are read only so we always do read locks */
2571 		if (p->need_commit_sem)
2572 			down_read(&fs_info->commit_root_sem);
2573 		b = root->commit_root;
2574 		extent_buffer_get(b);
2575 		level = btrfs_header_level(b);
2576 		if (p->need_commit_sem)
2577 			up_read(&fs_info->commit_root_sem);
2578 		/*
2579 		 * Ensure that all callers have set skip_locking when
2580 		 * p->search_commit_root = 1.
2581 		 */
2582 		ASSERT(p->skip_locking == 1);
2583 
2584 		goto out;
2585 	}
2586 
2587 	if (p->skip_locking) {
2588 		b = btrfs_root_node(root);
2589 		level = btrfs_header_level(b);
2590 		goto out;
2591 	}
2592 
2593 	/*
2594 	 * If the level is set to maximum, we can skip trying to get the read
2595 	 * lock.
2596 	 */
2597 	if (write_lock_level < BTRFS_MAX_LEVEL) {
2598 		/*
2599 		 * We don't know the level of the root node until we actually
2600 		 * have it read locked
2601 		 */
2602 		b = btrfs_read_lock_root_node(root);
2603 		level = btrfs_header_level(b);
2604 		if (level > write_lock_level)
2605 			goto out;
2606 
2607 		/* Whoops, must trade for write lock */
2608 		btrfs_tree_read_unlock(b);
2609 		free_extent_buffer(b);
2610 	}
2611 
2612 	b = btrfs_lock_root_node(root);
2613 	root_lock = BTRFS_WRITE_LOCK;
2614 
2615 	/* The level might have changed, check again */
2616 	level = btrfs_header_level(b);
2617 
2618 out:
2619 	p->nodes[level] = b;
2620 	if (!p->skip_locking)
2621 		p->locks[level] = root_lock;
2622 	/*
2623 	 * Callers are responsible for dropping b's references.
2624 	 */
2625 	return b;
2626 }
2627 
2628 
2629 /*
2630  * btrfs_search_slot - look for a key in a tree and perform necessary
2631  * modifications to preserve tree invariants.
2632  *
2633  * @trans:	Handle of transaction, used when modifying the tree
2634  * @p:		Holds all btree nodes along the search path
2635  * @root:	The root node of the tree
2636  * @key:	The key we are looking for
2637  * @ins_len:	Indicates purpose of search, for inserts it is 1, for
2638  *		deletions it's -1. 0 for plain searches
2639  * @cow:	boolean should CoW operations be performed. Must always be 1
2640  *		when modifying the tree.
2641  *
2642  * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2643  * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2644  *
2645  * If @key is found, 0 is returned and you can find the item in the leaf level
2646  * of the path (level 0)
2647  *
2648  * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2649  * points to the slot where it should be inserted
2650  *
2651  * If an error is encountered while searching the tree a negative error number
2652  * is returned
2653  */
2654 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2655 		      const struct btrfs_key *key, struct btrfs_path *p,
2656 		      int ins_len, int cow)
2657 {
2658 	struct btrfs_fs_info *fs_info = root->fs_info;
2659 	struct extent_buffer *b;
2660 	int slot;
2661 	int ret;
2662 	int err;
2663 	int level;
2664 	int lowest_unlock = 1;
2665 	/* everything at write_lock_level or lower must be write locked */
2666 	int write_lock_level = 0;
2667 	u8 lowest_level = 0;
2668 	int min_write_lock_level;
2669 	int prev_cmp;
2670 
2671 	lowest_level = p->lowest_level;
2672 	WARN_ON(lowest_level && ins_len > 0);
2673 	WARN_ON(p->nodes[0] != NULL);
2674 	BUG_ON(!cow && ins_len);
2675 
2676 	if (ins_len < 0) {
2677 		lowest_unlock = 2;
2678 
2679 		/* when we are removing items, we might have to go up to level
2680 		 * two as we update tree pointers  Make sure we keep write
2681 		 * for those levels as well
2682 		 */
2683 		write_lock_level = 2;
2684 	} else if (ins_len > 0) {
2685 		/*
2686 		 * for inserting items, make sure we have a write lock on
2687 		 * level 1 so we can update keys
2688 		 */
2689 		write_lock_level = 1;
2690 	}
2691 
2692 	if (!cow)
2693 		write_lock_level = -1;
2694 
2695 	if (cow && (p->keep_locks || p->lowest_level))
2696 		write_lock_level = BTRFS_MAX_LEVEL;
2697 
2698 	min_write_lock_level = write_lock_level;
2699 
2700 again:
2701 	prev_cmp = -1;
2702 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
2703 
2704 	while (b) {
2705 		level = btrfs_header_level(b);
2706 
2707 		/*
2708 		 * setup the path here so we can release it under lock
2709 		 * contention with the cow code
2710 		 */
2711 		if (cow) {
2712 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2713 
2714 			/*
2715 			 * if we don't really need to cow this block
2716 			 * then we don't want to set the path blocking,
2717 			 * so we test it here
2718 			 */
2719 			if (!should_cow_block(trans, root, b)) {
2720 				trans->dirty = true;
2721 				goto cow_done;
2722 			}
2723 
2724 			/*
2725 			 * must have write locks on this node and the
2726 			 * parent
2727 			 */
2728 			if (level > write_lock_level ||
2729 			    (level + 1 > write_lock_level &&
2730 			    level + 1 < BTRFS_MAX_LEVEL &&
2731 			    p->nodes[level + 1])) {
2732 				write_lock_level = level + 1;
2733 				btrfs_release_path(p);
2734 				goto again;
2735 			}
2736 
2737 			btrfs_set_path_blocking(p);
2738 			if (last_level)
2739 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2740 						      &b);
2741 			else
2742 				err = btrfs_cow_block(trans, root, b,
2743 						      p->nodes[level + 1],
2744 						      p->slots[level + 1], &b);
2745 			if (err) {
2746 				ret = err;
2747 				goto done;
2748 			}
2749 		}
2750 cow_done:
2751 		p->nodes[level] = b;
2752 		/*
2753 		 * Leave path with blocking locks to avoid massive
2754 		 * lock context switch, this is made on purpose.
2755 		 */
2756 
2757 		/*
2758 		 * we have a lock on b and as long as we aren't changing
2759 		 * the tree, there is no way to for the items in b to change.
2760 		 * It is safe to drop the lock on our parent before we
2761 		 * go through the expensive btree search on b.
2762 		 *
2763 		 * If we're inserting or deleting (ins_len != 0), then we might
2764 		 * be changing slot zero, which may require changing the parent.
2765 		 * So, we can't drop the lock until after we know which slot
2766 		 * we're operating on.
2767 		 */
2768 		if (!ins_len && !p->keep_locks) {
2769 			int u = level + 1;
2770 
2771 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2772 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2773 				p->locks[u] = 0;
2774 			}
2775 		}
2776 
2777 		ret = key_search(b, key, level, &prev_cmp, &slot);
2778 		if (ret < 0)
2779 			goto done;
2780 
2781 		if (level != 0) {
2782 			int dec = 0;
2783 			if (ret && slot > 0) {
2784 				dec = 1;
2785 				slot -= 1;
2786 			}
2787 			p->slots[level] = slot;
2788 			err = setup_nodes_for_search(trans, root, p, b, level,
2789 					     ins_len, &write_lock_level);
2790 			if (err == -EAGAIN)
2791 				goto again;
2792 			if (err) {
2793 				ret = err;
2794 				goto done;
2795 			}
2796 			b = p->nodes[level];
2797 			slot = p->slots[level];
2798 
2799 			/*
2800 			 * slot 0 is special, if we change the key
2801 			 * we have to update the parent pointer
2802 			 * which means we must have a write lock
2803 			 * on the parent
2804 			 */
2805 			if (slot == 0 && ins_len &&
2806 			    write_lock_level < level + 1) {
2807 				write_lock_level = level + 1;
2808 				btrfs_release_path(p);
2809 				goto again;
2810 			}
2811 
2812 			unlock_up(p, level, lowest_unlock,
2813 				  min_write_lock_level, &write_lock_level);
2814 
2815 			if (level == lowest_level) {
2816 				if (dec)
2817 					p->slots[level]++;
2818 				goto done;
2819 			}
2820 
2821 			err = read_block_for_search(root, p, &b, level,
2822 						    slot, key);
2823 			if (err == -EAGAIN)
2824 				goto again;
2825 			if (err) {
2826 				ret = err;
2827 				goto done;
2828 			}
2829 
2830 			if (!p->skip_locking) {
2831 				level = btrfs_header_level(b);
2832 				if (level <= write_lock_level) {
2833 					err = btrfs_try_tree_write_lock(b);
2834 					if (!err) {
2835 						btrfs_set_path_blocking(p);
2836 						btrfs_tree_lock(b);
2837 					}
2838 					p->locks[level] = BTRFS_WRITE_LOCK;
2839 				} else {
2840 					err = btrfs_tree_read_lock_atomic(b);
2841 					if (!err) {
2842 						btrfs_set_path_blocking(p);
2843 						btrfs_tree_read_lock(b);
2844 					}
2845 					p->locks[level] = BTRFS_READ_LOCK;
2846 				}
2847 				p->nodes[level] = b;
2848 			}
2849 		} else {
2850 			p->slots[level] = slot;
2851 			if (ins_len > 0 &&
2852 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2853 				if (write_lock_level < 1) {
2854 					write_lock_level = 1;
2855 					btrfs_release_path(p);
2856 					goto again;
2857 				}
2858 
2859 				btrfs_set_path_blocking(p);
2860 				err = split_leaf(trans, root, key,
2861 						 p, ins_len, ret == 0);
2862 
2863 				BUG_ON(err > 0);
2864 				if (err) {
2865 					ret = err;
2866 					goto done;
2867 				}
2868 			}
2869 			if (!p->search_for_split)
2870 				unlock_up(p, level, lowest_unlock,
2871 					  min_write_lock_level, NULL);
2872 			goto done;
2873 		}
2874 	}
2875 	ret = 1;
2876 done:
2877 	/*
2878 	 * we don't really know what they plan on doing with the path
2879 	 * from here on, so for now just mark it as blocking
2880 	 */
2881 	if (!p->leave_spinning)
2882 		btrfs_set_path_blocking(p);
2883 	if (ret < 0 && !p->skip_release_on_error)
2884 		btrfs_release_path(p);
2885 	return ret;
2886 }
2887 
2888 /*
2889  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2890  * current state of the tree together with the operations recorded in the tree
2891  * modification log to search for the key in a previous version of this tree, as
2892  * denoted by the time_seq parameter.
2893  *
2894  * Naturally, there is no support for insert, delete or cow operations.
2895  *
2896  * The resulting path and return value will be set up as if we called
2897  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2898  */
2899 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2900 			  struct btrfs_path *p, u64 time_seq)
2901 {
2902 	struct btrfs_fs_info *fs_info = root->fs_info;
2903 	struct extent_buffer *b;
2904 	int slot;
2905 	int ret;
2906 	int err;
2907 	int level;
2908 	int lowest_unlock = 1;
2909 	u8 lowest_level = 0;
2910 	int prev_cmp = -1;
2911 
2912 	lowest_level = p->lowest_level;
2913 	WARN_ON(p->nodes[0] != NULL);
2914 
2915 	if (p->search_commit_root) {
2916 		BUG_ON(time_seq);
2917 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2918 	}
2919 
2920 again:
2921 	b = get_old_root(root, time_seq);
2922 	if (!b) {
2923 		ret = -EIO;
2924 		goto done;
2925 	}
2926 	level = btrfs_header_level(b);
2927 	p->locks[level] = BTRFS_READ_LOCK;
2928 
2929 	while (b) {
2930 		level = btrfs_header_level(b);
2931 		p->nodes[level] = b;
2932 
2933 		/*
2934 		 * we have a lock on b and as long as we aren't changing
2935 		 * the tree, there is no way to for the items in b to change.
2936 		 * It is safe to drop the lock on our parent before we
2937 		 * go through the expensive btree search on b.
2938 		 */
2939 		btrfs_unlock_up_safe(p, level + 1);
2940 
2941 		/*
2942 		 * Since we can unwind ebs we want to do a real search every
2943 		 * time.
2944 		 */
2945 		prev_cmp = -1;
2946 		ret = key_search(b, key, level, &prev_cmp, &slot);
2947 
2948 		if (level != 0) {
2949 			int dec = 0;
2950 			if (ret && slot > 0) {
2951 				dec = 1;
2952 				slot -= 1;
2953 			}
2954 			p->slots[level] = slot;
2955 			unlock_up(p, level, lowest_unlock, 0, NULL);
2956 
2957 			if (level == lowest_level) {
2958 				if (dec)
2959 					p->slots[level]++;
2960 				goto done;
2961 			}
2962 
2963 			err = read_block_for_search(root, p, &b, level,
2964 						    slot, key);
2965 			if (err == -EAGAIN)
2966 				goto again;
2967 			if (err) {
2968 				ret = err;
2969 				goto done;
2970 			}
2971 
2972 			level = btrfs_header_level(b);
2973 			err = btrfs_tree_read_lock_atomic(b);
2974 			if (!err) {
2975 				btrfs_set_path_blocking(p);
2976 				btrfs_tree_read_lock(b);
2977 			}
2978 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
2979 			if (!b) {
2980 				ret = -ENOMEM;
2981 				goto done;
2982 			}
2983 			p->locks[level] = BTRFS_READ_LOCK;
2984 			p->nodes[level] = b;
2985 		} else {
2986 			p->slots[level] = slot;
2987 			unlock_up(p, level, lowest_unlock, 0, NULL);
2988 			goto done;
2989 		}
2990 	}
2991 	ret = 1;
2992 done:
2993 	if (!p->leave_spinning)
2994 		btrfs_set_path_blocking(p);
2995 	if (ret < 0)
2996 		btrfs_release_path(p);
2997 
2998 	return ret;
2999 }
3000 
3001 /*
3002  * helper to use instead of search slot if no exact match is needed but
3003  * instead the next or previous item should be returned.
3004  * When find_higher is true, the next higher item is returned, the next lower
3005  * otherwise.
3006  * When return_any and find_higher are both true, and no higher item is found,
3007  * return the next lower instead.
3008  * When return_any is true and find_higher is false, and no lower item is found,
3009  * return the next higher instead.
3010  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3011  * < 0 on error
3012  */
3013 int btrfs_search_slot_for_read(struct btrfs_root *root,
3014 			       const struct btrfs_key *key,
3015 			       struct btrfs_path *p, int find_higher,
3016 			       int return_any)
3017 {
3018 	int ret;
3019 	struct extent_buffer *leaf;
3020 
3021 again:
3022 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3023 	if (ret <= 0)
3024 		return ret;
3025 	/*
3026 	 * a return value of 1 means the path is at the position where the
3027 	 * item should be inserted. Normally this is the next bigger item,
3028 	 * but in case the previous item is the last in a leaf, path points
3029 	 * to the first free slot in the previous leaf, i.e. at an invalid
3030 	 * item.
3031 	 */
3032 	leaf = p->nodes[0];
3033 
3034 	if (find_higher) {
3035 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3036 			ret = btrfs_next_leaf(root, p);
3037 			if (ret <= 0)
3038 				return ret;
3039 			if (!return_any)
3040 				return 1;
3041 			/*
3042 			 * no higher item found, return the next
3043 			 * lower instead
3044 			 */
3045 			return_any = 0;
3046 			find_higher = 0;
3047 			btrfs_release_path(p);
3048 			goto again;
3049 		}
3050 	} else {
3051 		if (p->slots[0] == 0) {
3052 			ret = btrfs_prev_leaf(root, p);
3053 			if (ret < 0)
3054 				return ret;
3055 			if (!ret) {
3056 				leaf = p->nodes[0];
3057 				if (p->slots[0] == btrfs_header_nritems(leaf))
3058 					p->slots[0]--;
3059 				return 0;
3060 			}
3061 			if (!return_any)
3062 				return 1;
3063 			/*
3064 			 * no lower item found, return the next
3065 			 * higher instead
3066 			 */
3067 			return_any = 0;
3068 			find_higher = 1;
3069 			btrfs_release_path(p);
3070 			goto again;
3071 		} else {
3072 			--p->slots[0];
3073 		}
3074 	}
3075 	return 0;
3076 }
3077 
3078 /*
3079  * adjust the pointers going up the tree, starting at level
3080  * making sure the right key of each node is points to 'key'.
3081  * This is used after shifting pointers to the left, so it stops
3082  * fixing up pointers when a given leaf/node is not in slot 0 of the
3083  * higher levels
3084  *
3085  */
3086 static void fixup_low_keys(struct btrfs_path *path,
3087 			   struct btrfs_disk_key *key, int level)
3088 {
3089 	int i;
3090 	struct extent_buffer *t;
3091 	int ret;
3092 
3093 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3094 		int tslot = path->slots[i];
3095 
3096 		if (!path->nodes[i])
3097 			break;
3098 		t = path->nodes[i];
3099 		ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3100 				GFP_ATOMIC);
3101 		BUG_ON(ret < 0);
3102 		btrfs_set_node_key(t, key, tslot);
3103 		btrfs_mark_buffer_dirty(path->nodes[i]);
3104 		if (tslot != 0)
3105 			break;
3106 	}
3107 }
3108 
3109 /*
3110  * update item key.
3111  *
3112  * This function isn't completely safe. It's the caller's responsibility
3113  * that the new key won't break the order
3114  */
3115 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3116 			     struct btrfs_path *path,
3117 			     const struct btrfs_key *new_key)
3118 {
3119 	struct btrfs_disk_key disk_key;
3120 	struct extent_buffer *eb;
3121 	int slot;
3122 
3123 	eb = path->nodes[0];
3124 	slot = path->slots[0];
3125 	if (slot > 0) {
3126 		btrfs_item_key(eb, &disk_key, slot - 1);
3127 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3128 	}
3129 	if (slot < btrfs_header_nritems(eb) - 1) {
3130 		btrfs_item_key(eb, &disk_key, slot + 1);
3131 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3132 	}
3133 
3134 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3135 	btrfs_set_item_key(eb, &disk_key, slot);
3136 	btrfs_mark_buffer_dirty(eb);
3137 	if (slot == 0)
3138 		fixup_low_keys(path, &disk_key, 1);
3139 }
3140 
3141 /*
3142  * try to push data from one node into the next node left in the
3143  * tree.
3144  *
3145  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3146  * error, and > 0 if there was no room in the left hand block.
3147  */
3148 static int push_node_left(struct btrfs_trans_handle *trans,
3149 			  struct btrfs_fs_info *fs_info,
3150 			  struct extent_buffer *dst,
3151 			  struct extent_buffer *src, int empty)
3152 {
3153 	int push_items = 0;
3154 	int src_nritems;
3155 	int dst_nritems;
3156 	int ret = 0;
3157 
3158 	src_nritems = btrfs_header_nritems(src);
3159 	dst_nritems = btrfs_header_nritems(dst);
3160 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3161 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3162 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3163 
3164 	if (!empty && src_nritems <= 8)
3165 		return 1;
3166 
3167 	if (push_items <= 0)
3168 		return 1;
3169 
3170 	if (empty) {
3171 		push_items = min(src_nritems, push_items);
3172 		if (push_items < src_nritems) {
3173 			/* leave at least 8 pointers in the node if
3174 			 * we aren't going to empty it
3175 			 */
3176 			if (src_nritems - push_items < 8) {
3177 				if (push_items <= 8)
3178 					return 1;
3179 				push_items -= 8;
3180 			}
3181 		}
3182 	} else
3183 		push_items = min(src_nritems - 8, push_items);
3184 
3185 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3186 				   push_items);
3187 	if (ret) {
3188 		btrfs_abort_transaction(trans, ret);
3189 		return ret;
3190 	}
3191 	copy_extent_buffer(dst, src,
3192 			   btrfs_node_key_ptr_offset(dst_nritems),
3193 			   btrfs_node_key_ptr_offset(0),
3194 			   push_items * sizeof(struct btrfs_key_ptr));
3195 
3196 	if (push_items < src_nritems) {
3197 		/*
3198 		 * Don't call tree_mod_log_insert_move here, key removal was
3199 		 * already fully logged by tree_mod_log_eb_copy above.
3200 		 */
3201 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3202 				      btrfs_node_key_ptr_offset(push_items),
3203 				      (src_nritems - push_items) *
3204 				      sizeof(struct btrfs_key_ptr));
3205 	}
3206 	btrfs_set_header_nritems(src, src_nritems - push_items);
3207 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3208 	btrfs_mark_buffer_dirty(src);
3209 	btrfs_mark_buffer_dirty(dst);
3210 
3211 	return ret;
3212 }
3213 
3214 /*
3215  * try to push data from one node into the next node right in the
3216  * tree.
3217  *
3218  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3219  * error, and > 0 if there was no room in the right hand block.
3220  *
3221  * this will  only push up to 1/2 the contents of the left node over
3222  */
3223 static int balance_node_right(struct btrfs_trans_handle *trans,
3224 			      struct btrfs_fs_info *fs_info,
3225 			      struct extent_buffer *dst,
3226 			      struct extent_buffer *src)
3227 {
3228 	int push_items = 0;
3229 	int max_push;
3230 	int src_nritems;
3231 	int dst_nritems;
3232 	int ret = 0;
3233 
3234 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3235 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3236 
3237 	src_nritems = btrfs_header_nritems(src);
3238 	dst_nritems = btrfs_header_nritems(dst);
3239 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3240 	if (push_items <= 0)
3241 		return 1;
3242 
3243 	if (src_nritems < 4)
3244 		return 1;
3245 
3246 	max_push = src_nritems / 2 + 1;
3247 	/* don't try to empty the node */
3248 	if (max_push >= src_nritems)
3249 		return 1;
3250 
3251 	if (max_push < push_items)
3252 		push_items = max_push;
3253 
3254 	ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3255 	BUG_ON(ret < 0);
3256 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3257 				      btrfs_node_key_ptr_offset(0),
3258 				      (dst_nritems) *
3259 				      sizeof(struct btrfs_key_ptr));
3260 
3261 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3262 				   src_nritems - push_items, push_items);
3263 	if (ret) {
3264 		btrfs_abort_transaction(trans, ret);
3265 		return ret;
3266 	}
3267 	copy_extent_buffer(dst, src,
3268 			   btrfs_node_key_ptr_offset(0),
3269 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3270 			   push_items * sizeof(struct btrfs_key_ptr));
3271 
3272 	btrfs_set_header_nritems(src, src_nritems - push_items);
3273 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3274 
3275 	btrfs_mark_buffer_dirty(src);
3276 	btrfs_mark_buffer_dirty(dst);
3277 
3278 	return ret;
3279 }
3280 
3281 /*
3282  * helper function to insert a new root level in the tree.
3283  * A new node is allocated, and a single item is inserted to
3284  * point to the existing root
3285  *
3286  * returns zero on success or < 0 on failure.
3287  */
3288 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3289 			   struct btrfs_root *root,
3290 			   struct btrfs_path *path, int level)
3291 {
3292 	struct btrfs_fs_info *fs_info = root->fs_info;
3293 	u64 lower_gen;
3294 	struct extent_buffer *lower;
3295 	struct extent_buffer *c;
3296 	struct extent_buffer *old;
3297 	struct btrfs_disk_key lower_key;
3298 	int ret;
3299 
3300 	BUG_ON(path->nodes[level]);
3301 	BUG_ON(path->nodes[level-1] != root->node);
3302 
3303 	lower = path->nodes[level-1];
3304 	if (level == 1)
3305 		btrfs_item_key(lower, &lower_key, 0);
3306 	else
3307 		btrfs_node_key(lower, &lower_key, 0);
3308 
3309 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3310 				   &lower_key, level, root->node->start, 0);
3311 	if (IS_ERR(c))
3312 		return PTR_ERR(c);
3313 
3314 	root_add_used(root, fs_info->nodesize);
3315 
3316 	btrfs_set_header_nritems(c, 1);
3317 	btrfs_set_node_key(c, &lower_key, 0);
3318 	btrfs_set_node_blockptr(c, 0, lower->start);
3319 	lower_gen = btrfs_header_generation(lower);
3320 	WARN_ON(lower_gen != trans->transid);
3321 
3322 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3323 
3324 	btrfs_mark_buffer_dirty(c);
3325 
3326 	old = root->node;
3327 	ret = tree_mod_log_insert_root(root->node, c, 0);
3328 	BUG_ON(ret < 0);
3329 	rcu_assign_pointer(root->node, c);
3330 
3331 	/* the super has an extra ref to root->node */
3332 	free_extent_buffer(old);
3333 
3334 	add_root_to_dirty_list(root);
3335 	extent_buffer_get(c);
3336 	path->nodes[level] = c;
3337 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3338 	path->slots[level] = 0;
3339 	return 0;
3340 }
3341 
3342 /*
3343  * worker function to insert a single pointer in a node.
3344  * the node should have enough room for the pointer already
3345  *
3346  * slot and level indicate where you want the key to go, and
3347  * blocknr is the block the key points to.
3348  */
3349 static void insert_ptr(struct btrfs_trans_handle *trans,
3350 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3351 		       struct btrfs_disk_key *key, u64 bytenr,
3352 		       int slot, int level)
3353 {
3354 	struct extent_buffer *lower;
3355 	int nritems;
3356 	int ret;
3357 
3358 	BUG_ON(!path->nodes[level]);
3359 	btrfs_assert_tree_locked(path->nodes[level]);
3360 	lower = path->nodes[level];
3361 	nritems = btrfs_header_nritems(lower);
3362 	BUG_ON(slot > nritems);
3363 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3364 	if (slot != nritems) {
3365 		if (level) {
3366 			ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3367 					nritems - slot);
3368 			BUG_ON(ret < 0);
3369 		}
3370 		memmove_extent_buffer(lower,
3371 			      btrfs_node_key_ptr_offset(slot + 1),
3372 			      btrfs_node_key_ptr_offset(slot),
3373 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3374 	}
3375 	if (level) {
3376 		ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3377 				GFP_NOFS);
3378 		BUG_ON(ret < 0);
3379 	}
3380 	btrfs_set_node_key(lower, key, slot);
3381 	btrfs_set_node_blockptr(lower, slot, bytenr);
3382 	WARN_ON(trans->transid == 0);
3383 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3384 	btrfs_set_header_nritems(lower, nritems + 1);
3385 	btrfs_mark_buffer_dirty(lower);
3386 }
3387 
3388 /*
3389  * split the node at the specified level in path in two.
3390  * The path is corrected to point to the appropriate node after the split
3391  *
3392  * Before splitting this tries to make some room in the node by pushing
3393  * left and right, if either one works, it returns right away.
3394  *
3395  * returns 0 on success and < 0 on failure
3396  */
3397 static noinline int split_node(struct btrfs_trans_handle *trans,
3398 			       struct btrfs_root *root,
3399 			       struct btrfs_path *path, int level)
3400 {
3401 	struct btrfs_fs_info *fs_info = root->fs_info;
3402 	struct extent_buffer *c;
3403 	struct extent_buffer *split;
3404 	struct btrfs_disk_key disk_key;
3405 	int mid;
3406 	int ret;
3407 	u32 c_nritems;
3408 
3409 	c = path->nodes[level];
3410 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3411 	if (c == root->node) {
3412 		/*
3413 		 * trying to split the root, lets make a new one
3414 		 *
3415 		 * tree mod log: We don't log_removal old root in
3416 		 * insert_new_root, because that root buffer will be kept as a
3417 		 * normal node. We are going to log removal of half of the
3418 		 * elements below with tree_mod_log_eb_copy. We're holding a
3419 		 * tree lock on the buffer, which is why we cannot race with
3420 		 * other tree_mod_log users.
3421 		 */
3422 		ret = insert_new_root(trans, root, path, level + 1);
3423 		if (ret)
3424 			return ret;
3425 	} else {
3426 		ret = push_nodes_for_insert(trans, root, path, level);
3427 		c = path->nodes[level];
3428 		if (!ret && btrfs_header_nritems(c) <
3429 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3430 			return 0;
3431 		if (ret < 0)
3432 			return ret;
3433 	}
3434 
3435 	c_nritems = btrfs_header_nritems(c);
3436 	mid = (c_nritems + 1) / 2;
3437 	btrfs_node_key(c, &disk_key, mid);
3438 
3439 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3440 			&disk_key, level, c->start, 0);
3441 	if (IS_ERR(split))
3442 		return PTR_ERR(split);
3443 
3444 	root_add_used(root, fs_info->nodesize);
3445 	ASSERT(btrfs_header_level(c) == level);
3446 
3447 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3448 	if (ret) {
3449 		btrfs_abort_transaction(trans, ret);
3450 		return ret;
3451 	}
3452 	copy_extent_buffer(split, c,
3453 			   btrfs_node_key_ptr_offset(0),
3454 			   btrfs_node_key_ptr_offset(mid),
3455 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3456 	btrfs_set_header_nritems(split, c_nritems - mid);
3457 	btrfs_set_header_nritems(c, mid);
3458 	ret = 0;
3459 
3460 	btrfs_mark_buffer_dirty(c);
3461 	btrfs_mark_buffer_dirty(split);
3462 
3463 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3464 		   path->slots[level + 1] + 1, level + 1);
3465 
3466 	if (path->slots[level] >= mid) {
3467 		path->slots[level] -= mid;
3468 		btrfs_tree_unlock(c);
3469 		free_extent_buffer(c);
3470 		path->nodes[level] = split;
3471 		path->slots[level + 1] += 1;
3472 	} else {
3473 		btrfs_tree_unlock(split);
3474 		free_extent_buffer(split);
3475 	}
3476 	return ret;
3477 }
3478 
3479 /*
3480  * how many bytes are required to store the items in a leaf.  start
3481  * and nr indicate which items in the leaf to check.  This totals up the
3482  * space used both by the item structs and the item data
3483  */
3484 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3485 {
3486 	struct btrfs_item *start_item;
3487 	struct btrfs_item *end_item;
3488 	struct btrfs_map_token token;
3489 	int data_len;
3490 	int nritems = btrfs_header_nritems(l);
3491 	int end = min(nritems, start + nr) - 1;
3492 
3493 	if (!nr)
3494 		return 0;
3495 	btrfs_init_map_token(&token);
3496 	start_item = btrfs_item_nr(start);
3497 	end_item = btrfs_item_nr(end);
3498 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3499 		btrfs_token_item_size(l, start_item, &token);
3500 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3501 	data_len += sizeof(struct btrfs_item) * nr;
3502 	WARN_ON(data_len < 0);
3503 	return data_len;
3504 }
3505 
3506 /*
3507  * The space between the end of the leaf items and
3508  * the start of the leaf data.  IOW, how much room
3509  * the leaf has left for both items and data
3510  */
3511 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3512 				   struct extent_buffer *leaf)
3513 {
3514 	int nritems = btrfs_header_nritems(leaf);
3515 	int ret;
3516 
3517 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3518 	if (ret < 0) {
3519 		btrfs_crit(fs_info,
3520 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3521 			   ret,
3522 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3523 			   leaf_space_used(leaf, 0, nritems), nritems);
3524 	}
3525 	return ret;
3526 }
3527 
3528 /*
3529  * min slot controls the lowest index we're willing to push to the
3530  * right.  We'll push up to and including min_slot, but no lower
3531  */
3532 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3533 				      struct btrfs_path *path,
3534 				      int data_size, int empty,
3535 				      struct extent_buffer *right,
3536 				      int free_space, u32 left_nritems,
3537 				      u32 min_slot)
3538 {
3539 	struct extent_buffer *left = path->nodes[0];
3540 	struct extent_buffer *upper = path->nodes[1];
3541 	struct btrfs_map_token token;
3542 	struct btrfs_disk_key disk_key;
3543 	int slot;
3544 	u32 i;
3545 	int push_space = 0;
3546 	int push_items = 0;
3547 	struct btrfs_item *item;
3548 	u32 nr;
3549 	u32 right_nritems;
3550 	u32 data_end;
3551 	u32 this_item_size;
3552 
3553 	btrfs_init_map_token(&token);
3554 
3555 	if (empty)
3556 		nr = 0;
3557 	else
3558 		nr = max_t(u32, 1, min_slot);
3559 
3560 	if (path->slots[0] >= left_nritems)
3561 		push_space += data_size;
3562 
3563 	slot = path->slots[1];
3564 	i = left_nritems - 1;
3565 	while (i >= nr) {
3566 		item = btrfs_item_nr(i);
3567 
3568 		if (!empty && push_items > 0) {
3569 			if (path->slots[0] > i)
3570 				break;
3571 			if (path->slots[0] == i) {
3572 				int space = btrfs_leaf_free_space(fs_info, left);
3573 				if (space + push_space * 2 > free_space)
3574 					break;
3575 			}
3576 		}
3577 
3578 		if (path->slots[0] == i)
3579 			push_space += data_size;
3580 
3581 		this_item_size = btrfs_item_size(left, item);
3582 		if (this_item_size + sizeof(*item) + push_space > free_space)
3583 			break;
3584 
3585 		push_items++;
3586 		push_space += this_item_size + sizeof(*item);
3587 		if (i == 0)
3588 			break;
3589 		i--;
3590 	}
3591 
3592 	if (push_items == 0)
3593 		goto out_unlock;
3594 
3595 	WARN_ON(!empty && push_items == left_nritems);
3596 
3597 	/* push left to right */
3598 	right_nritems = btrfs_header_nritems(right);
3599 
3600 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3601 	push_space -= leaf_data_end(fs_info, left);
3602 
3603 	/* make room in the right data area */
3604 	data_end = leaf_data_end(fs_info, right);
3605 	memmove_extent_buffer(right,
3606 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3607 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3608 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3609 
3610 	/* copy from the left data area */
3611 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3612 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3613 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3614 		     push_space);
3615 
3616 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3617 			      btrfs_item_nr_offset(0),
3618 			      right_nritems * sizeof(struct btrfs_item));
3619 
3620 	/* copy the items from left to right */
3621 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3622 		   btrfs_item_nr_offset(left_nritems - push_items),
3623 		   push_items * sizeof(struct btrfs_item));
3624 
3625 	/* update the item pointers */
3626 	right_nritems += push_items;
3627 	btrfs_set_header_nritems(right, right_nritems);
3628 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3629 	for (i = 0; i < right_nritems; i++) {
3630 		item = btrfs_item_nr(i);
3631 		push_space -= btrfs_token_item_size(right, item, &token);
3632 		btrfs_set_token_item_offset(right, item, push_space, &token);
3633 	}
3634 
3635 	left_nritems -= push_items;
3636 	btrfs_set_header_nritems(left, left_nritems);
3637 
3638 	if (left_nritems)
3639 		btrfs_mark_buffer_dirty(left);
3640 	else
3641 		clean_tree_block(fs_info, left);
3642 
3643 	btrfs_mark_buffer_dirty(right);
3644 
3645 	btrfs_item_key(right, &disk_key, 0);
3646 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3647 	btrfs_mark_buffer_dirty(upper);
3648 
3649 	/* then fixup the leaf pointer in the path */
3650 	if (path->slots[0] >= left_nritems) {
3651 		path->slots[0] -= left_nritems;
3652 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3653 			clean_tree_block(fs_info, path->nodes[0]);
3654 		btrfs_tree_unlock(path->nodes[0]);
3655 		free_extent_buffer(path->nodes[0]);
3656 		path->nodes[0] = right;
3657 		path->slots[1] += 1;
3658 	} else {
3659 		btrfs_tree_unlock(right);
3660 		free_extent_buffer(right);
3661 	}
3662 	return 0;
3663 
3664 out_unlock:
3665 	btrfs_tree_unlock(right);
3666 	free_extent_buffer(right);
3667 	return 1;
3668 }
3669 
3670 /*
3671  * push some data in the path leaf to the right, trying to free up at
3672  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3673  *
3674  * returns 1 if the push failed because the other node didn't have enough
3675  * room, 0 if everything worked out and < 0 if there were major errors.
3676  *
3677  * this will push starting from min_slot to the end of the leaf.  It won't
3678  * push any slot lower than min_slot
3679  */
3680 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3681 			   *root, struct btrfs_path *path,
3682 			   int min_data_size, int data_size,
3683 			   int empty, u32 min_slot)
3684 {
3685 	struct btrfs_fs_info *fs_info = root->fs_info;
3686 	struct extent_buffer *left = path->nodes[0];
3687 	struct extent_buffer *right;
3688 	struct extent_buffer *upper;
3689 	int slot;
3690 	int free_space;
3691 	u32 left_nritems;
3692 	int ret;
3693 
3694 	if (!path->nodes[1])
3695 		return 1;
3696 
3697 	slot = path->slots[1];
3698 	upper = path->nodes[1];
3699 	if (slot >= btrfs_header_nritems(upper) - 1)
3700 		return 1;
3701 
3702 	btrfs_assert_tree_locked(path->nodes[1]);
3703 
3704 	right = read_node_slot(fs_info, upper, slot + 1);
3705 	/*
3706 	 * slot + 1 is not valid or we fail to read the right node,
3707 	 * no big deal, just return.
3708 	 */
3709 	if (IS_ERR(right))
3710 		return 1;
3711 
3712 	btrfs_tree_lock(right);
3713 	btrfs_set_lock_blocking(right);
3714 
3715 	free_space = btrfs_leaf_free_space(fs_info, right);
3716 	if (free_space < data_size)
3717 		goto out_unlock;
3718 
3719 	/* cow and double check */
3720 	ret = btrfs_cow_block(trans, root, right, upper,
3721 			      slot + 1, &right);
3722 	if (ret)
3723 		goto out_unlock;
3724 
3725 	free_space = btrfs_leaf_free_space(fs_info, right);
3726 	if (free_space < data_size)
3727 		goto out_unlock;
3728 
3729 	left_nritems = btrfs_header_nritems(left);
3730 	if (left_nritems == 0)
3731 		goto out_unlock;
3732 
3733 	if (path->slots[0] == left_nritems && !empty) {
3734 		/* Key greater than all keys in the leaf, right neighbor has
3735 		 * enough room for it and we're not emptying our leaf to delete
3736 		 * it, therefore use right neighbor to insert the new item and
3737 		 * no need to touch/dirty our left leaft. */
3738 		btrfs_tree_unlock(left);
3739 		free_extent_buffer(left);
3740 		path->nodes[0] = right;
3741 		path->slots[0] = 0;
3742 		path->slots[1]++;
3743 		return 0;
3744 	}
3745 
3746 	return __push_leaf_right(fs_info, path, min_data_size, empty,
3747 				right, free_space, left_nritems, min_slot);
3748 out_unlock:
3749 	btrfs_tree_unlock(right);
3750 	free_extent_buffer(right);
3751 	return 1;
3752 }
3753 
3754 /*
3755  * push some data in the path leaf to the left, trying to free up at
3756  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3757  *
3758  * max_slot can put a limit on how far into the leaf we'll push items.  The
3759  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3760  * items
3761  */
3762 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3763 				     struct btrfs_path *path, int data_size,
3764 				     int empty, struct extent_buffer *left,
3765 				     int free_space, u32 right_nritems,
3766 				     u32 max_slot)
3767 {
3768 	struct btrfs_disk_key disk_key;
3769 	struct extent_buffer *right = path->nodes[0];
3770 	int i;
3771 	int push_space = 0;
3772 	int push_items = 0;
3773 	struct btrfs_item *item;
3774 	u32 old_left_nritems;
3775 	u32 nr;
3776 	int ret = 0;
3777 	u32 this_item_size;
3778 	u32 old_left_item_size;
3779 	struct btrfs_map_token token;
3780 
3781 	btrfs_init_map_token(&token);
3782 
3783 	if (empty)
3784 		nr = min(right_nritems, max_slot);
3785 	else
3786 		nr = min(right_nritems - 1, max_slot);
3787 
3788 	for (i = 0; i < nr; i++) {
3789 		item = btrfs_item_nr(i);
3790 
3791 		if (!empty && push_items > 0) {
3792 			if (path->slots[0] < i)
3793 				break;
3794 			if (path->slots[0] == i) {
3795 				int space = btrfs_leaf_free_space(fs_info, right);
3796 				if (space + push_space * 2 > free_space)
3797 					break;
3798 			}
3799 		}
3800 
3801 		if (path->slots[0] == i)
3802 			push_space += data_size;
3803 
3804 		this_item_size = btrfs_item_size(right, item);
3805 		if (this_item_size + sizeof(*item) + push_space > free_space)
3806 			break;
3807 
3808 		push_items++;
3809 		push_space += this_item_size + sizeof(*item);
3810 	}
3811 
3812 	if (push_items == 0) {
3813 		ret = 1;
3814 		goto out;
3815 	}
3816 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3817 
3818 	/* push data from right to left */
3819 	copy_extent_buffer(left, right,
3820 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3821 			   btrfs_item_nr_offset(0),
3822 			   push_items * sizeof(struct btrfs_item));
3823 
3824 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3825 		     btrfs_item_offset_nr(right, push_items - 1);
3826 
3827 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3828 		     leaf_data_end(fs_info, left) - push_space,
3829 		     BTRFS_LEAF_DATA_OFFSET +
3830 		     btrfs_item_offset_nr(right, push_items - 1),
3831 		     push_space);
3832 	old_left_nritems = btrfs_header_nritems(left);
3833 	BUG_ON(old_left_nritems <= 0);
3834 
3835 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3836 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3837 		u32 ioff;
3838 
3839 		item = btrfs_item_nr(i);
3840 
3841 		ioff = btrfs_token_item_offset(left, item, &token);
3842 		btrfs_set_token_item_offset(left, item,
3843 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3844 		      &token);
3845 	}
3846 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3847 
3848 	/* fixup right node */
3849 	if (push_items > right_nritems)
3850 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3851 		       right_nritems);
3852 
3853 	if (push_items < right_nritems) {
3854 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3855 						  leaf_data_end(fs_info, right);
3856 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3857 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3858 				      BTRFS_LEAF_DATA_OFFSET +
3859 				      leaf_data_end(fs_info, right), push_space);
3860 
3861 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3862 			      btrfs_item_nr_offset(push_items),
3863 			     (btrfs_header_nritems(right) - push_items) *
3864 			     sizeof(struct btrfs_item));
3865 	}
3866 	right_nritems -= push_items;
3867 	btrfs_set_header_nritems(right, right_nritems);
3868 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3869 	for (i = 0; i < right_nritems; i++) {
3870 		item = btrfs_item_nr(i);
3871 
3872 		push_space = push_space - btrfs_token_item_size(right,
3873 								item, &token);
3874 		btrfs_set_token_item_offset(right, item, push_space, &token);
3875 	}
3876 
3877 	btrfs_mark_buffer_dirty(left);
3878 	if (right_nritems)
3879 		btrfs_mark_buffer_dirty(right);
3880 	else
3881 		clean_tree_block(fs_info, right);
3882 
3883 	btrfs_item_key(right, &disk_key, 0);
3884 	fixup_low_keys(path, &disk_key, 1);
3885 
3886 	/* then fixup the leaf pointer in the path */
3887 	if (path->slots[0] < push_items) {
3888 		path->slots[0] += old_left_nritems;
3889 		btrfs_tree_unlock(path->nodes[0]);
3890 		free_extent_buffer(path->nodes[0]);
3891 		path->nodes[0] = left;
3892 		path->slots[1] -= 1;
3893 	} else {
3894 		btrfs_tree_unlock(left);
3895 		free_extent_buffer(left);
3896 		path->slots[0] -= push_items;
3897 	}
3898 	BUG_ON(path->slots[0] < 0);
3899 	return ret;
3900 out:
3901 	btrfs_tree_unlock(left);
3902 	free_extent_buffer(left);
3903 	return ret;
3904 }
3905 
3906 /*
3907  * push some data in the path leaf to the left, trying to free up at
3908  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3909  *
3910  * max_slot can put a limit on how far into the leaf we'll push items.  The
3911  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3912  * items
3913  */
3914 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3915 			  *root, struct btrfs_path *path, int min_data_size,
3916 			  int data_size, int empty, u32 max_slot)
3917 {
3918 	struct btrfs_fs_info *fs_info = root->fs_info;
3919 	struct extent_buffer *right = path->nodes[0];
3920 	struct extent_buffer *left;
3921 	int slot;
3922 	int free_space;
3923 	u32 right_nritems;
3924 	int ret = 0;
3925 
3926 	slot = path->slots[1];
3927 	if (slot == 0)
3928 		return 1;
3929 	if (!path->nodes[1])
3930 		return 1;
3931 
3932 	right_nritems = btrfs_header_nritems(right);
3933 	if (right_nritems == 0)
3934 		return 1;
3935 
3936 	btrfs_assert_tree_locked(path->nodes[1]);
3937 
3938 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3939 	/*
3940 	 * slot - 1 is not valid or we fail to read the left node,
3941 	 * no big deal, just return.
3942 	 */
3943 	if (IS_ERR(left))
3944 		return 1;
3945 
3946 	btrfs_tree_lock(left);
3947 	btrfs_set_lock_blocking(left);
3948 
3949 	free_space = btrfs_leaf_free_space(fs_info, left);
3950 	if (free_space < data_size) {
3951 		ret = 1;
3952 		goto out;
3953 	}
3954 
3955 	/* cow and double check */
3956 	ret = btrfs_cow_block(trans, root, left,
3957 			      path->nodes[1], slot - 1, &left);
3958 	if (ret) {
3959 		/* we hit -ENOSPC, but it isn't fatal here */
3960 		if (ret == -ENOSPC)
3961 			ret = 1;
3962 		goto out;
3963 	}
3964 
3965 	free_space = btrfs_leaf_free_space(fs_info, left);
3966 	if (free_space < data_size) {
3967 		ret = 1;
3968 		goto out;
3969 	}
3970 
3971 	return __push_leaf_left(fs_info, path, min_data_size,
3972 			       empty, left, free_space, right_nritems,
3973 			       max_slot);
3974 out:
3975 	btrfs_tree_unlock(left);
3976 	free_extent_buffer(left);
3977 	return ret;
3978 }
3979 
3980 /*
3981  * split the path's leaf in two, making sure there is at least data_size
3982  * available for the resulting leaf level of the path.
3983  */
3984 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3985 				    struct btrfs_fs_info *fs_info,
3986 				    struct btrfs_path *path,
3987 				    struct extent_buffer *l,
3988 				    struct extent_buffer *right,
3989 				    int slot, int mid, int nritems)
3990 {
3991 	int data_copy_size;
3992 	int rt_data_off;
3993 	int i;
3994 	struct btrfs_disk_key disk_key;
3995 	struct btrfs_map_token token;
3996 
3997 	btrfs_init_map_token(&token);
3998 
3999 	nritems = nritems - mid;
4000 	btrfs_set_header_nritems(right, nritems);
4001 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4002 
4003 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4004 			   btrfs_item_nr_offset(mid),
4005 			   nritems * sizeof(struct btrfs_item));
4006 
4007 	copy_extent_buffer(right, l,
4008 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4009 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4010 		     leaf_data_end(fs_info, l), data_copy_size);
4011 
4012 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4013 
4014 	for (i = 0; i < nritems; i++) {
4015 		struct btrfs_item *item = btrfs_item_nr(i);
4016 		u32 ioff;
4017 
4018 		ioff = btrfs_token_item_offset(right, item, &token);
4019 		btrfs_set_token_item_offset(right, item,
4020 					    ioff + rt_data_off, &token);
4021 	}
4022 
4023 	btrfs_set_header_nritems(l, mid);
4024 	btrfs_item_key(right, &disk_key, 0);
4025 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4026 		   path->slots[1] + 1, 1);
4027 
4028 	btrfs_mark_buffer_dirty(right);
4029 	btrfs_mark_buffer_dirty(l);
4030 	BUG_ON(path->slots[0] != slot);
4031 
4032 	if (mid <= slot) {
4033 		btrfs_tree_unlock(path->nodes[0]);
4034 		free_extent_buffer(path->nodes[0]);
4035 		path->nodes[0] = right;
4036 		path->slots[0] -= mid;
4037 		path->slots[1] += 1;
4038 	} else {
4039 		btrfs_tree_unlock(right);
4040 		free_extent_buffer(right);
4041 	}
4042 
4043 	BUG_ON(path->slots[0] < 0);
4044 }
4045 
4046 /*
4047  * double splits happen when we need to insert a big item in the middle
4048  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4049  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4050  *          A                 B                 C
4051  *
4052  * We avoid this by trying to push the items on either side of our target
4053  * into the adjacent leaves.  If all goes well we can avoid the double split
4054  * completely.
4055  */
4056 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4057 					  struct btrfs_root *root,
4058 					  struct btrfs_path *path,
4059 					  int data_size)
4060 {
4061 	struct btrfs_fs_info *fs_info = root->fs_info;
4062 	int ret;
4063 	int progress = 0;
4064 	int slot;
4065 	u32 nritems;
4066 	int space_needed = data_size;
4067 
4068 	slot = path->slots[0];
4069 	if (slot < btrfs_header_nritems(path->nodes[0]))
4070 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4071 
4072 	/*
4073 	 * try to push all the items after our slot into the
4074 	 * right leaf
4075 	 */
4076 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4077 	if (ret < 0)
4078 		return ret;
4079 
4080 	if (ret == 0)
4081 		progress++;
4082 
4083 	nritems = btrfs_header_nritems(path->nodes[0]);
4084 	/*
4085 	 * our goal is to get our slot at the start or end of a leaf.  If
4086 	 * we've done so we're done
4087 	 */
4088 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4089 		return 0;
4090 
4091 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4092 		return 0;
4093 
4094 	/* try to push all the items before our slot into the next leaf */
4095 	slot = path->slots[0];
4096 	space_needed = data_size;
4097 	if (slot > 0)
4098 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4099 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4100 	if (ret < 0)
4101 		return ret;
4102 
4103 	if (ret == 0)
4104 		progress++;
4105 
4106 	if (progress)
4107 		return 0;
4108 	return 1;
4109 }
4110 
4111 /*
4112  * split the path's leaf in two, making sure there is at least data_size
4113  * available for the resulting leaf level of the path.
4114  *
4115  * returns 0 if all went well and < 0 on failure.
4116  */
4117 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4118 			       struct btrfs_root *root,
4119 			       const struct btrfs_key *ins_key,
4120 			       struct btrfs_path *path, int data_size,
4121 			       int extend)
4122 {
4123 	struct btrfs_disk_key disk_key;
4124 	struct extent_buffer *l;
4125 	u32 nritems;
4126 	int mid;
4127 	int slot;
4128 	struct extent_buffer *right;
4129 	struct btrfs_fs_info *fs_info = root->fs_info;
4130 	int ret = 0;
4131 	int wret;
4132 	int split;
4133 	int num_doubles = 0;
4134 	int tried_avoid_double = 0;
4135 
4136 	l = path->nodes[0];
4137 	slot = path->slots[0];
4138 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4139 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4140 		return -EOVERFLOW;
4141 
4142 	/* first try to make some room by pushing left and right */
4143 	if (data_size && path->nodes[1]) {
4144 		int space_needed = data_size;
4145 
4146 		if (slot < btrfs_header_nritems(l))
4147 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4148 
4149 		wret = push_leaf_right(trans, root, path, space_needed,
4150 				       space_needed, 0, 0);
4151 		if (wret < 0)
4152 			return wret;
4153 		if (wret) {
4154 			space_needed = data_size;
4155 			if (slot > 0)
4156 				space_needed -= btrfs_leaf_free_space(fs_info,
4157 								      l);
4158 			wret = push_leaf_left(trans, root, path, space_needed,
4159 					      space_needed, 0, (u32)-1);
4160 			if (wret < 0)
4161 				return wret;
4162 		}
4163 		l = path->nodes[0];
4164 
4165 		/* did the pushes work? */
4166 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4167 			return 0;
4168 	}
4169 
4170 	if (!path->nodes[1]) {
4171 		ret = insert_new_root(trans, root, path, 1);
4172 		if (ret)
4173 			return ret;
4174 	}
4175 again:
4176 	split = 1;
4177 	l = path->nodes[0];
4178 	slot = path->slots[0];
4179 	nritems = btrfs_header_nritems(l);
4180 	mid = (nritems + 1) / 2;
4181 
4182 	if (mid <= slot) {
4183 		if (nritems == 1 ||
4184 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4185 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4186 			if (slot >= nritems) {
4187 				split = 0;
4188 			} else {
4189 				mid = slot;
4190 				if (mid != nritems &&
4191 				    leaf_space_used(l, mid, nritems - mid) +
4192 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4193 					if (data_size && !tried_avoid_double)
4194 						goto push_for_double;
4195 					split = 2;
4196 				}
4197 			}
4198 		}
4199 	} else {
4200 		if (leaf_space_used(l, 0, mid) + data_size >
4201 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4202 			if (!extend && data_size && slot == 0) {
4203 				split = 0;
4204 			} else if ((extend || !data_size) && slot == 0) {
4205 				mid = 1;
4206 			} else {
4207 				mid = slot;
4208 				if (mid != nritems &&
4209 				    leaf_space_used(l, mid, nritems - mid) +
4210 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4211 					if (data_size && !tried_avoid_double)
4212 						goto push_for_double;
4213 					split = 2;
4214 				}
4215 			}
4216 		}
4217 	}
4218 
4219 	if (split == 0)
4220 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4221 	else
4222 		btrfs_item_key(l, &disk_key, mid);
4223 
4224 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4225 			&disk_key, 0, l->start, 0);
4226 	if (IS_ERR(right))
4227 		return PTR_ERR(right);
4228 
4229 	root_add_used(root, fs_info->nodesize);
4230 
4231 	if (split == 0) {
4232 		if (mid <= slot) {
4233 			btrfs_set_header_nritems(right, 0);
4234 			insert_ptr(trans, fs_info, path, &disk_key,
4235 				   right->start, path->slots[1] + 1, 1);
4236 			btrfs_tree_unlock(path->nodes[0]);
4237 			free_extent_buffer(path->nodes[0]);
4238 			path->nodes[0] = right;
4239 			path->slots[0] = 0;
4240 			path->slots[1] += 1;
4241 		} else {
4242 			btrfs_set_header_nritems(right, 0);
4243 			insert_ptr(trans, fs_info, path, &disk_key,
4244 				   right->start, path->slots[1], 1);
4245 			btrfs_tree_unlock(path->nodes[0]);
4246 			free_extent_buffer(path->nodes[0]);
4247 			path->nodes[0] = right;
4248 			path->slots[0] = 0;
4249 			if (path->slots[1] == 0)
4250 				fixup_low_keys(path, &disk_key, 1);
4251 		}
4252 		/*
4253 		 * We create a new leaf 'right' for the required ins_len and
4254 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4255 		 * the content of ins_len to 'right'.
4256 		 */
4257 		return ret;
4258 	}
4259 
4260 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4261 
4262 	if (split == 2) {
4263 		BUG_ON(num_doubles != 0);
4264 		num_doubles++;
4265 		goto again;
4266 	}
4267 
4268 	return 0;
4269 
4270 push_for_double:
4271 	push_for_double_split(trans, root, path, data_size);
4272 	tried_avoid_double = 1;
4273 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4274 		return 0;
4275 	goto again;
4276 }
4277 
4278 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4279 					 struct btrfs_root *root,
4280 					 struct btrfs_path *path, int ins_len)
4281 {
4282 	struct btrfs_fs_info *fs_info = root->fs_info;
4283 	struct btrfs_key key;
4284 	struct extent_buffer *leaf;
4285 	struct btrfs_file_extent_item *fi;
4286 	u64 extent_len = 0;
4287 	u32 item_size;
4288 	int ret;
4289 
4290 	leaf = path->nodes[0];
4291 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4292 
4293 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4294 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4295 
4296 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4297 		return 0;
4298 
4299 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4300 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4301 		fi = btrfs_item_ptr(leaf, path->slots[0],
4302 				    struct btrfs_file_extent_item);
4303 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4304 	}
4305 	btrfs_release_path(path);
4306 
4307 	path->keep_locks = 1;
4308 	path->search_for_split = 1;
4309 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4310 	path->search_for_split = 0;
4311 	if (ret > 0)
4312 		ret = -EAGAIN;
4313 	if (ret < 0)
4314 		goto err;
4315 
4316 	ret = -EAGAIN;
4317 	leaf = path->nodes[0];
4318 	/* if our item isn't there, return now */
4319 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4320 		goto err;
4321 
4322 	/* the leaf has  changed, it now has room.  return now */
4323 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4324 		goto err;
4325 
4326 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4327 		fi = btrfs_item_ptr(leaf, path->slots[0],
4328 				    struct btrfs_file_extent_item);
4329 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4330 			goto err;
4331 	}
4332 
4333 	btrfs_set_path_blocking(path);
4334 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4335 	if (ret)
4336 		goto err;
4337 
4338 	path->keep_locks = 0;
4339 	btrfs_unlock_up_safe(path, 1);
4340 	return 0;
4341 err:
4342 	path->keep_locks = 0;
4343 	return ret;
4344 }
4345 
4346 static noinline int split_item(struct btrfs_fs_info *fs_info,
4347 			       struct btrfs_path *path,
4348 			       const struct btrfs_key *new_key,
4349 			       unsigned long split_offset)
4350 {
4351 	struct extent_buffer *leaf;
4352 	struct btrfs_item *item;
4353 	struct btrfs_item *new_item;
4354 	int slot;
4355 	char *buf;
4356 	u32 nritems;
4357 	u32 item_size;
4358 	u32 orig_offset;
4359 	struct btrfs_disk_key disk_key;
4360 
4361 	leaf = path->nodes[0];
4362 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4363 
4364 	btrfs_set_path_blocking(path);
4365 
4366 	item = btrfs_item_nr(path->slots[0]);
4367 	orig_offset = btrfs_item_offset(leaf, item);
4368 	item_size = btrfs_item_size(leaf, item);
4369 
4370 	buf = kmalloc(item_size, GFP_NOFS);
4371 	if (!buf)
4372 		return -ENOMEM;
4373 
4374 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4375 			    path->slots[0]), item_size);
4376 
4377 	slot = path->slots[0] + 1;
4378 	nritems = btrfs_header_nritems(leaf);
4379 	if (slot != nritems) {
4380 		/* shift the items */
4381 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4382 				btrfs_item_nr_offset(slot),
4383 				(nritems - slot) * sizeof(struct btrfs_item));
4384 	}
4385 
4386 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4387 	btrfs_set_item_key(leaf, &disk_key, slot);
4388 
4389 	new_item = btrfs_item_nr(slot);
4390 
4391 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4392 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4393 
4394 	btrfs_set_item_offset(leaf, item,
4395 			      orig_offset + item_size - split_offset);
4396 	btrfs_set_item_size(leaf, item, split_offset);
4397 
4398 	btrfs_set_header_nritems(leaf, nritems + 1);
4399 
4400 	/* write the data for the start of the original item */
4401 	write_extent_buffer(leaf, buf,
4402 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4403 			    split_offset);
4404 
4405 	/* write the data for the new item */
4406 	write_extent_buffer(leaf, buf + split_offset,
4407 			    btrfs_item_ptr_offset(leaf, slot),
4408 			    item_size - split_offset);
4409 	btrfs_mark_buffer_dirty(leaf);
4410 
4411 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4412 	kfree(buf);
4413 	return 0;
4414 }
4415 
4416 /*
4417  * This function splits a single item into two items,
4418  * giving 'new_key' to the new item and splitting the
4419  * old one at split_offset (from the start of the item).
4420  *
4421  * The path may be released by this operation.  After
4422  * the split, the path is pointing to the old item.  The
4423  * new item is going to be in the same node as the old one.
4424  *
4425  * Note, the item being split must be smaller enough to live alone on
4426  * a tree block with room for one extra struct btrfs_item
4427  *
4428  * This allows us to split the item in place, keeping a lock on the
4429  * leaf the entire time.
4430  */
4431 int btrfs_split_item(struct btrfs_trans_handle *trans,
4432 		     struct btrfs_root *root,
4433 		     struct btrfs_path *path,
4434 		     const struct btrfs_key *new_key,
4435 		     unsigned long split_offset)
4436 {
4437 	int ret;
4438 	ret = setup_leaf_for_split(trans, root, path,
4439 				   sizeof(struct btrfs_item));
4440 	if (ret)
4441 		return ret;
4442 
4443 	ret = split_item(root->fs_info, path, new_key, split_offset);
4444 	return ret;
4445 }
4446 
4447 /*
4448  * This function duplicate a item, giving 'new_key' to the new item.
4449  * It guarantees both items live in the same tree leaf and the new item
4450  * is contiguous with the original item.
4451  *
4452  * This allows us to split file extent in place, keeping a lock on the
4453  * leaf the entire time.
4454  */
4455 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4456 			 struct btrfs_root *root,
4457 			 struct btrfs_path *path,
4458 			 const struct btrfs_key *new_key)
4459 {
4460 	struct extent_buffer *leaf;
4461 	int ret;
4462 	u32 item_size;
4463 
4464 	leaf = path->nodes[0];
4465 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4466 	ret = setup_leaf_for_split(trans, root, path,
4467 				   item_size + sizeof(struct btrfs_item));
4468 	if (ret)
4469 		return ret;
4470 
4471 	path->slots[0]++;
4472 	setup_items_for_insert(root, path, new_key, &item_size,
4473 			       item_size, item_size +
4474 			       sizeof(struct btrfs_item), 1);
4475 	leaf = path->nodes[0];
4476 	memcpy_extent_buffer(leaf,
4477 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4478 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4479 			     item_size);
4480 	return 0;
4481 }
4482 
4483 /*
4484  * make the item pointed to by the path smaller.  new_size indicates
4485  * how small to make it, and from_end tells us if we just chop bytes
4486  * off the end of the item or if we shift the item to chop bytes off
4487  * the front.
4488  */
4489 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4490 			 struct btrfs_path *path, u32 new_size, int from_end)
4491 {
4492 	int slot;
4493 	struct extent_buffer *leaf;
4494 	struct btrfs_item *item;
4495 	u32 nritems;
4496 	unsigned int data_end;
4497 	unsigned int old_data_start;
4498 	unsigned int old_size;
4499 	unsigned int size_diff;
4500 	int i;
4501 	struct btrfs_map_token token;
4502 
4503 	btrfs_init_map_token(&token);
4504 
4505 	leaf = path->nodes[0];
4506 	slot = path->slots[0];
4507 
4508 	old_size = btrfs_item_size_nr(leaf, slot);
4509 	if (old_size == new_size)
4510 		return;
4511 
4512 	nritems = btrfs_header_nritems(leaf);
4513 	data_end = leaf_data_end(fs_info, leaf);
4514 
4515 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4516 
4517 	size_diff = old_size - new_size;
4518 
4519 	BUG_ON(slot < 0);
4520 	BUG_ON(slot >= nritems);
4521 
4522 	/*
4523 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4524 	 */
4525 	/* first correct the data pointers */
4526 	for (i = slot; i < nritems; i++) {
4527 		u32 ioff;
4528 		item = btrfs_item_nr(i);
4529 
4530 		ioff = btrfs_token_item_offset(leaf, item, &token);
4531 		btrfs_set_token_item_offset(leaf, item,
4532 					    ioff + size_diff, &token);
4533 	}
4534 
4535 	/* shift the data */
4536 	if (from_end) {
4537 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4538 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4539 			      data_end, old_data_start + new_size - data_end);
4540 	} else {
4541 		struct btrfs_disk_key disk_key;
4542 		u64 offset;
4543 
4544 		btrfs_item_key(leaf, &disk_key, slot);
4545 
4546 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4547 			unsigned long ptr;
4548 			struct btrfs_file_extent_item *fi;
4549 
4550 			fi = btrfs_item_ptr(leaf, slot,
4551 					    struct btrfs_file_extent_item);
4552 			fi = (struct btrfs_file_extent_item *)(
4553 			     (unsigned long)fi - size_diff);
4554 
4555 			if (btrfs_file_extent_type(leaf, fi) ==
4556 			    BTRFS_FILE_EXTENT_INLINE) {
4557 				ptr = btrfs_item_ptr_offset(leaf, slot);
4558 				memmove_extent_buffer(leaf, ptr,
4559 				      (unsigned long)fi,
4560 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4561 			}
4562 		}
4563 
4564 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4565 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4566 			      data_end, old_data_start - data_end);
4567 
4568 		offset = btrfs_disk_key_offset(&disk_key);
4569 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4570 		btrfs_set_item_key(leaf, &disk_key, slot);
4571 		if (slot == 0)
4572 			fixup_low_keys(path, &disk_key, 1);
4573 	}
4574 
4575 	item = btrfs_item_nr(slot);
4576 	btrfs_set_item_size(leaf, item, new_size);
4577 	btrfs_mark_buffer_dirty(leaf);
4578 
4579 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4580 		btrfs_print_leaf(leaf);
4581 		BUG();
4582 	}
4583 }
4584 
4585 /*
4586  * make the item pointed to by the path bigger, data_size is the added size.
4587  */
4588 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4589 		       u32 data_size)
4590 {
4591 	int slot;
4592 	struct extent_buffer *leaf;
4593 	struct btrfs_item *item;
4594 	u32 nritems;
4595 	unsigned int data_end;
4596 	unsigned int old_data;
4597 	unsigned int old_size;
4598 	int i;
4599 	struct btrfs_map_token token;
4600 
4601 	btrfs_init_map_token(&token);
4602 
4603 	leaf = path->nodes[0];
4604 
4605 	nritems = btrfs_header_nritems(leaf);
4606 	data_end = leaf_data_end(fs_info, leaf);
4607 
4608 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4609 		btrfs_print_leaf(leaf);
4610 		BUG();
4611 	}
4612 	slot = path->slots[0];
4613 	old_data = btrfs_item_end_nr(leaf, slot);
4614 
4615 	BUG_ON(slot < 0);
4616 	if (slot >= nritems) {
4617 		btrfs_print_leaf(leaf);
4618 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4619 			   slot, nritems);
4620 		BUG_ON(1);
4621 	}
4622 
4623 	/*
4624 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4625 	 */
4626 	/* first correct the data pointers */
4627 	for (i = slot; i < nritems; i++) {
4628 		u32 ioff;
4629 		item = btrfs_item_nr(i);
4630 
4631 		ioff = btrfs_token_item_offset(leaf, item, &token);
4632 		btrfs_set_token_item_offset(leaf, item,
4633 					    ioff - data_size, &token);
4634 	}
4635 
4636 	/* shift the data */
4637 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4638 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4639 		      data_end, old_data - data_end);
4640 
4641 	data_end = old_data;
4642 	old_size = btrfs_item_size_nr(leaf, slot);
4643 	item = btrfs_item_nr(slot);
4644 	btrfs_set_item_size(leaf, item, old_size + data_size);
4645 	btrfs_mark_buffer_dirty(leaf);
4646 
4647 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4648 		btrfs_print_leaf(leaf);
4649 		BUG();
4650 	}
4651 }
4652 
4653 /*
4654  * this is a helper for btrfs_insert_empty_items, the main goal here is
4655  * to save stack depth by doing the bulk of the work in a function
4656  * that doesn't call btrfs_search_slot
4657  */
4658 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4659 			    const struct btrfs_key *cpu_key, u32 *data_size,
4660 			    u32 total_data, u32 total_size, int nr)
4661 {
4662 	struct btrfs_fs_info *fs_info = root->fs_info;
4663 	struct btrfs_item *item;
4664 	int i;
4665 	u32 nritems;
4666 	unsigned int data_end;
4667 	struct btrfs_disk_key disk_key;
4668 	struct extent_buffer *leaf;
4669 	int slot;
4670 	struct btrfs_map_token token;
4671 
4672 	if (path->slots[0] == 0) {
4673 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4674 		fixup_low_keys(path, &disk_key, 1);
4675 	}
4676 	btrfs_unlock_up_safe(path, 1);
4677 
4678 	btrfs_init_map_token(&token);
4679 
4680 	leaf = path->nodes[0];
4681 	slot = path->slots[0];
4682 
4683 	nritems = btrfs_header_nritems(leaf);
4684 	data_end = leaf_data_end(fs_info, leaf);
4685 
4686 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4687 		btrfs_print_leaf(leaf);
4688 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4689 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4690 		BUG();
4691 	}
4692 
4693 	if (slot != nritems) {
4694 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4695 
4696 		if (old_data < data_end) {
4697 			btrfs_print_leaf(leaf);
4698 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4699 				   slot, old_data, data_end);
4700 			BUG_ON(1);
4701 		}
4702 		/*
4703 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4704 		 */
4705 		/* first correct the data pointers */
4706 		for (i = slot; i < nritems; i++) {
4707 			u32 ioff;
4708 
4709 			item = btrfs_item_nr(i);
4710 			ioff = btrfs_token_item_offset(leaf, item, &token);
4711 			btrfs_set_token_item_offset(leaf, item,
4712 						    ioff - total_data, &token);
4713 		}
4714 		/* shift the items */
4715 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4716 			      btrfs_item_nr_offset(slot),
4717 			      (nritems - slot) * sizeof(struct btrfs_item));
4718 
4719 		/* shift the data */
4720 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4721 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4722 			      data_end, old_data - data_end);
4723 		data_end = old_data;
4724 	}
4725 
4726 	/* setup the item for the new data */
4727 	for (i = 0; i < nr; i++) {
4728 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4729 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4730 		item = btrfs_item_nr(slot + i);
4731 		btrfs_set_token_item_offset(leaf, item,
4732 					    data_end - data_size[i], &token);
4733 		data_end -= data_size[i];
4734 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4735 	}
4736 
4737 	btrfs_set_header_nritems(leaf, nritems + nr);
4738 	btrfs_mark_buffer_dirty(leaf);
4739 
4740 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4741 		btrfs_print_leaf(leaf);
4742 		BUG();
4743 	}
4744 }
4745 
4746 /*
4747  * Given a key and some data, insert items into the tree.
4748  * This does all the path init required, making room in the tree if needed.
4749  */
4750 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4751 			    struct btrfs_root *root,
4752 			    struct btrfs_path *path,
4753 			    const struct btrfs_key *cpu_key, u32 *data_size,
4754 			    int nr)
4755 {
4756 	int ret = 0;
4757 	int slot;
4758 	int i;
4759 	u32 total_size = 0;
4760 	u32 total_data = 0;
4761 
4762 	for (i = 0; i < nr; i++)
4763 		total_data += data_size[i];
4764 
4765 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4766 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4767 	if (ret == 0)
4768 		return -EEXIST;
4769 	if (ret < 0)
4770 		return ret;
4771 
4772 	slot = path->slots[0];
4773 	BUG_ON(slot < 0);
4774 
4775 	setup_items_for_insert(root, path, cpu_key, data_size,
4776 			       total_data, total_size, nr);
4777 	return 0;
4778 }
4779 
4780 /*
4781  * Given a key and some data, insert an item into the tree.
4782  * This does all the path init required, making room in the tree if needed.
4783  */
4784 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4785 		      const struct btrfs_key *cpu_key, void *data,
4786 		      u32 data_size)
4787 {
4788 	int ret = 0;
4789 	struct btrfs_path *path;
4790 	struct extent_buffer *leaf;
4791 	unsigned long ptr;
4792 
4793 	path = btrfs_alloc_path();
4794 	if (!path)
4795 		return -ENOMEM;
4796 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4797 	if (!ret) {
4798 		leaf = path->nodes[0];
4799 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4800 		write_extent_buffer(leaf, data, ptr, data_size);
4801 		btrfs_mark_buffer_dirty(leaf);
4802 	}
4803 	btrfs_free_path(path);
4804 	return ret;
4805 }
4806 
4807 /*
4808  * delete the pointer from a given node.
4809  *
4810  * the tree should have been previously balanced so the deletion does not
4811  * empty a node.
4812  */
4813 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4814 		    int level, int slot)
4815 {
4816 	struct extent_buffer *parent = path->nodes[level];
4817 	u32 nritems;
4818 	int ret;
4819 
4820 	nritems = btrfs_header_nritems(parent);
4821 	if (slot != nritems - 1) {
4822 		if (level) {
4823 			ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4824 					nritems - slot - 1);
4825 			BUG_ON(ret < 0);
4826 		}
4827 		memmove_extent_buffer(parent,
4828 			      btrfs_node_key_ptr_offset(slot),
4829 			      btrfs_node_key_ptr_offset(slot + 1),
4830 			      sizeof(struct btrfs_key_ptr) *
4831 			      (nritems - slot - 1));
4832 	} else if (level) {
4833 		ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4834 				GFP_NOFS);
4835 		BUG_ON(ret < 0);
4836 	}
4837 
4838 	nritems--;
4839 	btrfs_set_header_nritems(parent, nritems);
4840 	if (nritems == 0 && parent == root->node) {
4841 		BUG_ON(btrfs_header_level(root->node) != 1);
4842 		/* just turn the root into a leaf and break */
4843 		btrfs_set_header_level(root->node, 0);
4844 	} else if (slot == 0) {
4845 		struct btrfs_disk_key disk_key;
4846 
4847 		btrfs_node_key(parent, &disk_key, 0);
4848 		fixup_low_keys(path, &disk_key, level + 1);
4849 	}
4850 	btrfs_mark_buffer_dirty(parent);
4851 }
4852 
4853 /*
4854  * a helper function to delete the leaf pointed to by path->slots[1] and
4855  * path->nodes[1].
4856  *
4857  * This deletes the pointer in path->nodes[1] and frees the leaf
4858  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4859  *
4860  * The path must have already been setup for deleting the leaf, including
4861  * all the proper balancing.  path->nodes[1] must be locked.
4862  */
4863 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4864 				    struct btrfs_root *root,
4865 				    struct btrfs_path *path,
4866 				    struct extent_buffer *leaf)
4867 {
4868 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4869 	del_ptr(root, path, 1, path->slots[1]);
4870 
4871 	/*
4872 	 * btrfs_free_extent is expensive, we want to make sure we
4873 	 * aren't holding any locks when we call it
4874 	 */
4875 	btrfs_unlock_up_safe(path, 0);
4876 
4877 	root_sub_used(root, leaf->len);
4878 
4879 	extent_buffer_get(leaf);
4880 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4881 	free_extent_buffer_stale(leaf);
4882 }
4883 /*
4884  * delete the item at the leaf level in path.  If that empties
4885  * the leaf, remove it from the tree
4886  */
4887 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4888 		    struct btrfs_path *path, int slot, int nr)
4889 {
4890 	struct btrfs_fs_info *fs_info = root->fs_info;
4891 	struct extent_buffer *leaf;
4892 	struct btrfs_item *item;
4893 	u32 last_off;
4894 	u32 dsize = 0;
4895 	int ret = 0;
4896 	int wret;
4897 	int i;
4898 	u32 nritems;
4899 	struct btrfs_map_token token;
4900 
4901 	btrfs_init_map_token(&token);
4902 
4903 	leaf = path->nodes[0];
4904 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4905 
4906 	for (i = 0; i < nr; i++)
4907 		dsize += btrfs_item_size_nr(leaf, slot + i);
4908 
4909 	nritems = btrfs_header_nritems(leaf);
4910 
4911 	if (slot + nr != nritems) {
4912 		int data_end = leaf_data_end(fs_info, leaf);
4913 
4914 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4915 			      data_end + dsize,
4916 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4917 			      last_off - data_end);
4918 
4919 		for (i = slot + nr; i < nritems; i++) {
4920 			u32 ioff;
4921 
4922 			item = btrfs_item_nr(i);
4923 			ioff = btrfs_token_item_offset(leaf, item, &token);
4924 			btrfs_set_token_item_offset(leaf, item,
4925 						    ioff + dsize, &token);
4926 		}
4927 
4928 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4929 			      btrfs_item_nr_offset(slot + nr),
4930 			      sizeof(struct btrfs_item) *
4931 			      (nritems - slot - nr));
4932 	}
4933 	btrfs_set_header_nritems(leaf, nritems - nr);
4934 	nritems -= nr;
4935 
4936 	/* delete the leaf if we've emptied it */
4937 	if (nritems == 0) {
4938 		if (leaf == root->node) {
4939 			btrfs_set_header_level(leaf, 0);
4940 		} else {
4941 			btrfs_set_path_blocking(path);
4942 			clean_tree_block(fs_info, leaf);
4943 			btrfs_del_leaf(trans, root, path, leaf);
4944 		}
4945 	} else {
4946 		int used = leaf_space_used(leaf, 0, nritems);
4947 		if (slot == 0) {
4948 			struct btrfs_disk_key disk_key;
4949 
4950 			btrfs_item_key(leaf, &disk_key, 0);
4951 			fixup_low_keys(path, &disk_key, 1);
4952 		}
4953 
4954 		/* delete the leaf if it is mostly empty */
4955 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4956 			/* push_leaf_left fixes the path.
4957 			 * make sure the path still points to our leaf
4958 			 * for possible call to del_ptr below
4959 			 */
4960 			slot = path->slots[1];
4961 			extent_buffer_get(leaf);
4962 
4963 			btrfs_set_path_blocking(path);
4964 			wret = push_leaf_left(trans, root, path, 1, 1,
4965 					      1, (u32)-1);
4966 			if (wret < 0 && wret != -ENOSPC)
4967 				ret = wret;
4968 
4969 			if (path->nodes[0] == leaf &&
4970 			    btrfs_header_nritems(leaf)) {
4971 				wret = push_leaf_right(trans, root, path, 1,
4972 						       1, 1, 0);
4973 				if (wret < 0 && wret != -ENOSPC)
4974 					ret = wret;
4975 			}
4976 
4977 			if (btrfs_header_nritems(leaf) == 0) {
4978 				path->slots[1] = slot;
4979 				btrfs_del_leaf(trans, root, path, leaf);
4980 				free_extent_buffer(leaf);
4981 				ret = 0;
4982 			} else {
4983 				/* if we're still in the path, make sure
4984 				 * we're dirty.  Otherwise, one of the
4985 				 * push_leaf functions must have already
4986 				 * dirtied this buffer
4987 				 */
4988 				if (path->nodes[0] == leaf)
4989 					btrfs_mark_buffer_dirty(leaf);
4990 				free_extent_buffer(leaf);
4991 			}
4992 		} else {
4993 			btrfs_mark_buffer_dirty(leaf);
4994 		}
4995 	}
4996 	return ret;
4997 }
4998 
4999 /*
5000  * search the tree again to find a leaf with lesser keys
5001  * returns 0 if it found something or 1 if there are no lesser leaves.
5002  * returns < 0 on io errors.
5003  *
5004  * This may release the path, and so you may lose any locks held at the
5005  * time you call it.
5006  */
5007 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5008 {
5009 	struct btrfs_key key;
5010 	struct btrfs_disk_key found_key;
5011 	int ret;
5012 
5013 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5014 
5015 	if (key.offset > 0) {
5016 		key.offset--;
5017 	} else if (key.type > 0) {
5018 		key.type--;
5019 		key.offset = (u64)-1;
5020 	} else if (key.objectid > 0) {
5021 		key.objectid--;
5022 		key.type = (u8)-1;
5023 		key.offset = (u64)-1;
5024 	} else {
5025 		return 1;
5026 	}
5027 
5028 	btrfs_release_path(path);
5029 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5030 	if (ret < 0)
5031 		return ret;
5032 	btrfs_item_key(path->nodes[0], &found_key, 0);
5033 	ret = comp_keys(&found_key, &key);
5034 	/*
5035 	 * We might have had an item with the previous key in the tree right
5036 	 * before we released our path. And after we released our path, that
5037 	 * item might have been pushed to the first slot (0) of the leaf we
5038 	 * were holding due to a tree balance. Alternatively, an item with the
5039 	 * previous key can exist as the only element of a leaf (big fat item).
5040 	 * Therefore account for these 2 cases, so that our callers (like
5041 	 * btrfs_previous_item) don't miss an existing item with a key matching
5042 	 * the previous key we computed above.
5043 	 */
5044 	if (ret <= 0)
5045 		return 0;
5046 	return 1;
5047 }
5048 
5049 /*
5050  * A helper function to walk down the tree starting at min_key, and looking
5051  * for nodes or leaves that are have a minimum transaction id.
5052  * This is used by the btree defrag code, and tree logging
5053  *
5054  * This does not cow, but it does stuff the starting key it finds back
5055  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5056  * key and get a writable path.
5057  *
5058  * This honors path->lowest_level to prevent descent past a given level
5059  * of the tree.
5060  *
5061  * min_trans indicates the oldest transaction that you are interested
5062  * in walking through.  Any nodes or leaves older than min_trans are
5063  * skipped over (without reading them).
5064  *
5065  * returns zero if something useful was found, < 0 on error and 1 if there
5066  * was nothing in the tree that matched the search criteria.
5067  */
5068 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5069 			 struct btrfs_path *path,
5070 			 u64 min_trans)
5071 {
5072 	struct btrfs_fs_info *fs_info = root->fs_info;
5073 	struct extent_buffer *cur;
5074 	struct btrfs_key found_key;
5075 	int slot;
5076 	int sret;
5077 	u32 nritems;
5078 	int level;
5079 	int ret = 1;
5080 	int keep_locks = path->keep_locks;
5081 
5082 	path->keep_locks = 1;
5083 again:
5084 	cur = btrfs_read_lock_root_node(root);
5085 	level = btrfs_header_level(cur);
5086 	WARN_ON(path->nodes[level]);
5087 	path->nodes[level] = cur;
5088 	path->locks[level] = BTRFS_READ_LOCK;
5089 
5090 	if (btrfs_header_generation(cur) < min_trans) {
5091 		ret = 1;
5092 		goto out;
5093 	}
5094 	while (1) {
5095 		nritems = btrfs_header_nritems(cur);
5096 		level = btrfs_header_level(cur);
5097 		sret = btrfs_bin_search(cur, min_key, level, &slot);
5098 
5099 		/* at the lowest level, we're done, setup the path and exit */
5100 		if (level == path->lowest_level) {
5101 			if (slot >= nritems)
5102 				goto find_next_key;
5103 			ret = 0;
5104 			path->slots[level] = slot;
5105 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5106 			goto out;
5107 		}
5108 		if (sret && slot > 0)
5109 			slot--;
5110 		/*
5111 		 * check this node pointer against the min_trans parameters.
5112 		 * If it is too old, old, skip to the next one.
5113 		 */
5114 		while (slot < nritems) {
5115 			u64 gen;
5116 
5117 			gen = btrfs_node_ptr_generation(cur, slot);
5118 			if (gen < min_trans) {
5119 				slot++;
5120 				continue;
5121 			}
5122 			break;
5123 		}
5124 find_next_key:
5125 		/*
5126 		 * we didn't find a candidate key in this node, walk forward
5127 		 * and find another one
5128 		 */
5129 		if (slot >= nritems) {
5130 			path->slots[level] = slot;
5131 			btrfs_set_path_blocking(path);
5132 			sret = btrfs_find_next_key(root, path, min_key, level,
5133 						  min_trans);
5134 			if (sret == 0) {
5135 				btrfs_release_path(path);
5136 				goto again;
5137 			} else {
5138 				goto out;
5139 			}
5140 		}
5141 		/* save our key for returning back */
5142 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5143 		path->slots[level] = slot;
5144 		if (level == path->lowest_level) {
5145 			ret = 0;
5146 			goto out;
5147 		}
5148 		btrfs_set_path_blocking(path);
5149 		cur = read_node_slot(fs_info, cur, slot);
5150 		if (IS_ERR(cur)) {
5151 			ret = PTR_ERR(cur);
5152 			goto out;
5153 		}
5154 
5155 		btrfs_tree_read_lock(cur);
5156 
5157 		path->locks[level - 1] = BTRFS_READ_LOCK;
5158 		path->nodes[level - 1] = cur;
5159 		unlock_up(path, level, 1, 0, NULL);
5160 	}
5161 out:
5162 	path->keep_locks = keep_locks;
5163 	if (ret == 0) {
5164 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5165 		btrfs_set_path_blocking(path);
5166 		memcpy(min_key, &found_key, sizeof(found_key));
5167 	}
5168 	return ret;
5169 }
5170 
5171 static int tree_move_down(struct btrfs_fs_info *fs_info,
5172 			   struct btrfs_path *path,
5173 			   int *level)
5174 {
5175 	struct extent_buffer *eb;
5176 
5177 	BUG_ON(*level == 0);
5178 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5179 	if (IS_ERR(eb))
5180 		return PTR_ERR(eb);
5181 
5182 	path->nodes[*level - 1] = eb;
5183 	path->slots[*level - 1] = 0;
5184 	(*level)--;
5185 	return 0;
5186 }
5187 
5188 static int tree_move_next_or_upnext(struct btrfs_path *path,
5189 				    int *level, int root_level)
5190 {
5191 	int ret = 0;
5192 	int nritems;
5193 	nritems = btrfs_header_nritems(path->nodes[*level]);
5194 
5195 	path->slots[*level]++;
5196 
5197 	while (path->slots[*level] >= nritems) {
5198 		if (*level == root_level)
5199 			return -1;
5200 
5201 		/* move upnext */
5202 		path->slots[*level] = 0;
5203 		free_extent_buffer(path->nodes[*level]);
5204 		path->nodes[*level] = NULL;
5205 		(*level)++;
5206 		path->slots[*level]++;
5207 
5208 		nritems = btrfs_header_nritems(path->nodes[*level]);
5209 		ret = 1;
5210 	}
5211 	return ret;
5212 }
5213 
5214 /*
5215  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5216  * or down.
5217  */
5218 static int tree_advance(struct btrfs_fs_info *fs_info,
5219 			struct btrfs_path *path,
5220 			int *level, int root_level,
5221 			int allow_down,
5222 			struct btrfs_key *key)
5223 {
5224 	int ret;
5225 
5226 	if (*level == 0 || !allow_down) {
5227 		ret = tree_move_next_or_upnext(path, level, root_level);
5228 	} else {
5229 		ret = tree_move_down(fs_info, path, level);
5230 	}
5231 	if (ret >= 0) {
5232 		if (*level == 0)
5233 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5234 					path->slots[*level]);
5235 		else
5236 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5237 					path->slots[*level]);
5238 	}
5239 	return ret;
5240 }
5241 
5242 static int tree_compare_item(struct btrfs_path *left_path,
5243 			     struct btrfs_path *right_path,
5244 			     char *tmp_buf)
5245 {
5246 	int cmp;
5247 	int len1, len2;
5248 	unsigned long off1, off2;
5249 
5250 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5251 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5252 	if (len1 != len2)
5253 		return 1;
5254 
5255 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5256 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5257 				right_path->slots[0]);
5258 
5259 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5260 
5261 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5262 	if (cmp)
5263 		return 1;
5264 	return 0;
5265 }
5266 
5267 #define ADVANCE 1
5268 #define ADVANCE_ONLY_NEXT -1
5269 
5270 /*
5271  * This function compares two trees and calls the provided callback for
5272  * every changed/new/deleted item it finds.
5273  * If shared tree blocks are encountered, whole subtrees are skipped, making
5274  * the compare pretty fast on snapshotted subvolumes.
5275  *
5276  * This currently works on commit roots only. As commit roots are read only,
5277  * we don't do any locking. The commit roots are protected with transactions.
5278  * Transactions are ended and rejoined when a commit is tried in between.
5279  *
5280  * This function checks for modifications done to the trees while comparing.
5281  * If it detects a change, it aborts immediately.
5282  */
5283 int btrfs_compare_trees(struct btrfs_root *left_root,
5284 			struct btrfs_root *right_root,
5285 			btrfs_changed_cb_t changed_cb, void *ctx)
5286 {
5287 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5288 	int ret;
5289 	int cmp;
5290 	struct btrfs_path *left_path = NULL;
5291 	struct btrfs_path *right_path = NULL;
5292 	struct btrfs_key left_key;
5293 	struct btrfs_key right_key;
5294 	char *tmp_buf = NULL;
5295 	int left_root_level;
5296 	int right_root_level;
5297 	int left_level;
5298 	int right_level;
5299 	int left_end_reached;
5300 	int right_end_reached;
5301 	int advance_left;
5302 	int advance_right;
5303 	u64 left_blockptr;
5304 	u64 right_blockptr;
5305 	u64 left_gen;
5306 	u64 right_gen;
5307 
5308 	left_path = btrfs_alloc_path();
5309 	if (!left_path) {
5310 		ret = -ENOMEM;
5311 		goto out;
5312 	}
5313 	right_path = btrfs_alloc_path();
5314 	if (!right_path) {
5315 		ret = -ENOMEM;
5316 		goto out;
5317 	}
5318 
5319 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5320 	if (!tmp_buf) {
5321 		ret = -ENOMEM;
5322 		goto out;
5323 	}
5324 
5325 	left_path->search_commit_root = 1;
5326 	left_path->skip_locking = 1;
5327 	right_path->search_commit_root = 1;
5328 	right_path->skip_locking = 1;
5329 
5330 	/*
5331 	 * Strategy: Go to the first items of both trees. Then do
5332 	 *
5333 	 * If both trees are at level 0
5334 	 *   Compare keys of current items
5335 	 *     If left < right treat left item as new, advance left tree
5336 	 *       and repeat
5337 	 *     If left > right treat right item as deleted, advance right tree
5338 	 *       and repeat
5339 	 *     If left == right do deep compare of items, treat as changed if
5340 	 *       needed, advance both trees and repeat
5341 	 * If both trees are at the same level but not at level 0
5342 	 *   Compare keys of current nodes/leafs
5343 	 *     If left < right advance left tree and repeat
5344 	 *     If left > right advance right tree and repeat
5345 	 *     If left == right compare blockptrs of the next nodes/leafs
5346 	 *       If they match advance both trees but stay at the same level
5347 	 *         and repeat
5348 	 *       If they don't match advance both trees while allowing to go
5349 	 *         deeper and repeat
5350 	 * If tree levels are different
5351 	 *   Advance the tree that needs it and repeat
5352 	 *
5353 	 * Advancing a tree means:
5354 	 *   If we are at level 0, try to go to the next slot. If that's not
5355 	 *   possible, go one level up and repeat. Stop when we found a level
5356 	 *   where we could go to the next slot. We may at this point be on a
5357 	 *   node or a leaf.
5358 	 *
5359 	 *   If we are not at level 0 and not on shared tree blocks, go one
5360 	 *   level deeper.
5361 	 *
5362 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5363 	 *   the right if possible or go up and right.
5364 	 */
5365 
5366 	down_read(&fs_info->commit_root_sem);
5367 	left_level = btrfs_header_level(left_root->commit_root);
5368 	left_root_level = left_level;
5369 	left_path->nodes[left_level] =
5370 			btrfs_clone_extent_buffer(left_root->commit_root);
5371 	if (!left_path->nodes[left_level]) {
5372 		up_read(&fs_info->commit_root_sem);
5373 		ret = -ENOMEM;
5374 		goto out;
5375 	}
5376 	extent_buffer_get(left_path->nodes[left_level]);
5377 
5378 	right_level = btrfs_header_level(right_root->commit_root);
5379 	right_root_level = right_level;
5380 	right_path->nodes[right_level] =
5381 			btrfs_clone_extent_buffer(right_root->commit_root);
5382 	if (!right_path->nodes[right_level]) {
5383 		up_read(&fs_info->commit_root_sem);
5384 		ret = -ENOMEM;
5385 		goto out;
5386 	}
5387 	extent_buffer_get(right_path->nodes[right_level]);
5388 	up_read(&fs_info->commit_root_sem);
5389 
5390 	if (left_level == 0)
5391 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5392 				&left_key, left_path->slots[left_level]);
5393 	else
5394 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5395 				&left_key, left_path->slots[left_level]);
5396 	if (right_level == 0)
5397 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5398 				&right_key, right_path->slots[right_level]);
5399 	else
5400 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5401 				&right_key, right_path->slots[right_level]);
5402 
5403 	left_end_reached = right_end_reached = 0;
5404 	advance_left = advance_right = 0;
5405 
5406 	while (1) {
5407 		if (advance_left && !left_end_reached) {
5408 			ret = tree_advance(fs_info, left_path, &left_level,
5409 					left_root_level,
5410 					advance_left != ADVANCE_ONLY_NEXT,
5411 					&left_key);
5412 			if (ret == -1)
5413 				left_end_reached = ADVANCE;
5414 			else if (ret < 0)
5415 				goto out;
5416 			advance_left = 0;
5417 		}
5418 		if (advance_right && !right_end_reached) {
5419 			ret = tree_advance(fs_info, right_path, &right_level,
5420 					right_root_level,
5421 					advance_right != ADVANCE_ONLY_NEXT,
5422 					&right_key);
5423 			if (ret == -1)
5424 				right_end_reached = ADVANCE;
5425 			else if (ret < 0)
5426 				goto out;
5427 			advance_right = 0;
5428 		}
5429 
5430 		if (left_end_reached && right_end_reached) {
5431 			ret = 0;
5432 			goto out;
5433 		} else if (left_end_reached) {
5434 			if (right_level == 0) {
5435 				ret = changed_cb(left_path, right_path,
5436 						&right_key,
5437 						BTRFS_COMPARE_TREE_DELETED,
5438 						ctx);
5439 				if (ret < 0)
5440 					goto out;
5441 			}
5442 			advance_right = ADVANCE;
5443 			continue;
5444 		} else if (right_end_reached) {
5445 			if (left_level == 0) {
5446 				ret = changed_cb(left_path, right_path,
5447 						&left_key,
5448 						BTRFS_COMPARE_TREE_NEW,
5449 						ctx);
5450 				if (ret < 0)
5451 					goto out;
5452 			}
5453 			advance_left = ADVANCE;
5454 			continue;
5455 		}
5456 
5457 		if (left_level == 0 && right_level == 0) {
5458 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5459 			if (cmp < 0) {
5460 				ret = changed_cb(left_path, right_path,
5461 						&left_key,
5462 						BTRFS_COMPARE_TREE_NEW,
5463 						ctx);
5464 				if (ret < 0)
5465 					goto out;
5466 				advance_left = ADVANCE;
5467 			} else if (cmp > 0) {
5468 				ret = changed_cb(left_path, right_path,
5469 						&right_key,
5470 						BTRFS_COMPARE_TREE_DELETED,
5471 						ctx);
5472 				if (ret < 0)
5473 					goto out;
5474 				advance_right = ADVANCE;
5475 			} else {
5476 				enum btrfs_compare_tree_result result;
5477 
5478 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5479 				ret = tree_compare_item(left_path, right_path,
5480 							tmp_buf);
5481 				if (ret)
5482 					result = BTRFS_COMPARE_TREE_CHANGED;
5483 				else
5484 					result = BTRFS_COMPARE_TREE_SAME;
5485 				ret = changed_cb(left_path, right_path,
5486 						 &left_key, result, ctx);
5487 				if (ret < 0)
5488 					goto out;
5489 				advance_left = ADVANCE;
5490 				advance_right = ADVANCE;
5491 			}
5492 		} else if (left_level == right_level) {
5493 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5494 			if (cmp < 0) {
5495 				advance_left = ADVANCE;
5496 			} else if (cmp > 0) {
5497 				advance_right = ADVANCE;
5498 			} else {
5499 				left_blockptr = btrfs_node_blockptr(
5500 						left_path->nodes[left_level],
5501 						left_path->slots[left_level]);
5502 				right_blockptr = btrfs_node_blockptr(
5503 						right_path->nodes[right_level],
5504 						right_path->slots[right_level]);
5505 				left_gen = btrfs_node_ptr_generation(
5506 						left_path->nodes[left_level],
5507 						left_path->slots[left_level]);
5508 				right_gen = btrfs_node_ptr_generation(
5509 						right_path->nodes[right_level],
5510 						right_path->slots[right_level]);
5511 				if (left_blockptr == right_blockptr &&
5512 				    left_gen == right_gen) {
5513 					/*
5514 					 * As we're on a shared block, don't
5515 					 * allow to go deeper.
5516 					 */
5517 					advance_left = ADVANCE_ONLY_NEXT;
5518 					advance_right = ADVANCE_ONLY_NEXT;
5519 				} else {
5520 					advance_left = ADVANCE;
5521 					advance_right = ADVANCE;
5522 				}
5523 			}
5524 		} else if (left_level < right_level) {
5525 			advance_right = ADVANCE;
5526 		} else {
5527 			advance_left = ADVANCE;
5528 		}
5529 	}
5530 
5531 out:
5532 	btrfs_free_path(left_path);
5533 	btrfs_free_path(right_path);
5534 	kvfree(tmp_buf);
5535 	return ret;
5536 }
5537 
5538 /*
5539  * this is similar to btrfs_next_leaf, but does not try to preserve
5540  * and fixup the path.  It looks for and returns the next key in the
5541  * tree based on the current path and the min_trans parameters.
5542  *
5543  * 0 is returned if another key is found, < 0 if there are any errors
5544  * and 1 is returned if there are no higher keys in the tree
5545  *
5546  * path->keep_locks should be set to 1 on the search made before
5547  * calling this function.
5548  */
5549 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5550 			struct btrfs_key *key, int level, u64 min_trans)
5551 {
5552 	int slot;
5553 	struct extent_buffer *c;
5554 
5555 	WARN_ON(!path->keep_locks);
5556 	while (level < BTRFS_MAX_LEVEL) {
5557 		if (!path->nodes[level])
5558 			return 1;
5559 
5560 		slot = path->slots[level] + 1;
5561 		c = path->nodes[level];
5562 next:
5563 		if (slot >= btrfs_header_nritems(c)) {
5564 			int ret;
5565 			int orig_lowest;
5566 			struct btrfs_key cur_key;
5567 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5568 			    !path->nodes[level + 1])
5569 				return 1;
5570 
5571 			if (path->locks[level + 1]) {
5572 				level++;
5573 				continue;
5574 			}
5575 
5576 			slot = btrfs_header_nritems(c) - 1;
5577 			if (level == 0)
5578 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5579 			else
5580 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5581 
5582 			orig_lowest = path->lowest_level;
5583 			btrfs_release_path(path);
5584 			path->lowest_level = level;
5585 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5586 						0, 0);
5587 			path->lowest_level = orig_lowest;
5588 			if (ret < 0)
5589 				return ret;
5590 
5591 			c = path->nodes[level];
5592 			slot = path->slots[level];
5593 			if (ret == 0)
5594 				slot++;
5595 			goto next;
5596 		}
5597 
5598 		if (level == 0)
5599 			btrfs_item_key_to_cpu(c, key, slot);
5600 		else {
5601 			u64 gen = btrfs_node_ptr_generation(c, slot);
5602 
5603 			if (gen < min_trans) {
5604 				slot++;
5605 				goto next;
5606 			}
5607 			btrfs_node_key_to_cpu(c, key, slot);
5608 		}
5609 		return 0;
5610 	}
5611 	return 1;
5612 }
5613 
5614 /*
5615  * search the tree again to find a leaf with greater keys
5616  * returns 0 if it found something or 1 if there are no greater leaves.
5617  * returns < 0 on io errors.
5618  */
5619 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5620 {
5621 	return btrfs_next_old_leaf(root, path, 0);
5622 }
5623 
5624 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5625 			u64 time_seq)
5626 {
5627 	int slot;
5628 	int level;
5629 	struct extent_buffer *c;
5630 	struct extent_buffer *next;
5631 	struct btrfs_key key;
5632 	u32 nritems;
5633 	int ret;
5634 	int old_spinning = path->leave_spinning;
5635 	int next_rw_lock = 0;
5636 
5637 	nritems = btrfs_header_nritems(path->nodes[0]);
5638 	if (nritems == 0)
5639 		return 1;
5640 
5641 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5642 again:
5643 	level = 1;
5644 	next = NULL;
5645 	next_rw_lock = 0;
5646 	btrfs_release_path(path);
5647 
5648 	path->keep_locks = 1;
5649 	path->leave_spinning = 1;
5650 
5651 	if (time_seq)
5652 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5653 	else
5654 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5655 	path->keep_locks = 0;
5656 
5657 	if (ret < 0)
5658 		return ret;
5659 
5660 	nritems = btrfs_header_nritems(path->nodes[0]);
5661 	/*
5662 	 * by releasing the path above we dropped all our locks.  A balance
5663 	 * could have added more items next to the key that used to be
5664 	 * at the very end of the block.  So, check again here and
5665 	 * advance the path if there are now more items available.
5666 	 */
5667 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5668 		if (ret == 0)
5669 			path->slots[0]++;
5670 		ret = 0;
5671 		goto done;
5672 	}
5673 	/*
5674 	 * So the above check misses one case:
5675 	 * - after releasing the path above, someone has removed the item that
5676 	 *   used to be at the very end of the block, and balance between leafs
5677 	 *   gets another one with bigger key.offset to replace it.
5678 	 *
5679 	 * This one should be returned as well, or we can get leaf corruption
5680 	 * later(esp. in __btrfs_drop_extents()).
5681 	 *
5682 	 * And a bit more explanation about this check,
5683 	 * with ret > 0, the key isn't found, the path points to the slot
5684 	 * where it should be inserted, so the path->slots[0] item must be the
5685 	 * bigger one.
5686 	 */
5687 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5688 		ret = 0;
5689 		goto done;
5690 	}
5691 
5692 	while (level < BTRFS_MAX_LEVEL) {
5693 		if (!path->nodes[level]) {
5694 			ret = 1;
5695 			goto done;
5696 		}
5697 
5698 		slot = path->slots[level] + 1;
5699 		c = path->nodes[level];
5700 		if (slot >= btrfs_header_nritems(c)) {
5701 			level++;
5702 			if (level == BTRFS_MAX_LEVEL) {
5703 				ret = 1;
5704 				goto done;
5705 			}
5706 			continue;
5707 		}
5708 
5709 		if (next) {
5710 			btrfs_tree_unlock_rw(next, next_rw_lock);
5711 			free_extent_buffer(next);
5712 		}
5713 
5714 		next = c;
5715 		next_rw_lock = path->locks[level];
5716 		ret = read_block_for_search(root, path, &next, level,
5717 					    slot, &key);
5718 		if (ret == -EAGAIN)
5719 			goto again;
5720 
5721 		if (ret < 0) {
5722 			btrfs_release_path(path);
5723 			goto done;
5724 		}
5725 
5726 		if (!path->skip_locking) {
5727 			ret = btrfs_try_tree_read_lock(next);
5728 			if (!ret && time_seq) {
5729 				/*
5730 				 * If we don't get the lock, we may be racing
5731 				 * with push_leaf_left, holding that lock while
5732 				 * itself waiting for the leaf we've currently
5733 				 * locked. To solve this situation, we give up
5734 				 * on our lock and cycle.
5735 				 */
5736 				free_extent_buffer(next);
5737 				btrfs_release_path(path);
5738 				cond_resched();
5739 				goto again;
5740 			}
5741 			if (!ret) {
5742 				btrfs_set_path_blocking(path);
5743 				btrfs_tree_read_lock(next);
5744 			}
5745 			next_rw_lock = BTRFS_READ_LOCK;
5746 		}
5747 		break;
5748 	}
5749 	path->slots[level] = slot;
5750 	while (1) {
5751 		level--;
5752 		c = path->nodes[level];
5753 		if (path->locks[level])
5754 			btrfs_tree_unlock_rw(c, path->locks[level]);
5755 
5756 		free_extent_buffer(c);
5757 		path->nodes[level] = next;
5758 		path->slots[level] = 0;
5759 		if (!path->skip_locking)
5760 			path->locks[level] = next_rw_lock;
5761 		if (!level)
5762 			break;
5763 
5764 		ret = read_block_for_search(root, path, &next, level,
5765 					    0, &key);
5766 		if (ret == -EAGAIN)
5767 			goto again;
5768 
5769 		if (ret < 0) {
5770 			btrfs_release_path(path);
5771 			goto done;
5772 		}
5773 
5774 		if (!path->skip_locking) {
5775 			ret = btrfs_try_tree_read_lock(next);
5776 			if (!ret) {
5777 				btrfs_set_path_blocking(path);
5778 				btrfs_tree_read_lock(next);
5779 			}
5780 			next_rw_lock = BTRFS_READ_LOCK;
5781 		}
5782 	}
5783 	ret = 0;
5784 done:
5785 	unlock_up(path, 0, 1, 0, NULL);
5786 	path->leave_spinning = old_spinning;
5787 	if (!old_spinning)
5788 		btrfs_set_path_blocking(path);
5789 
5790 	return ret;
5791 }
5792 
5793 /*
5794  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5795  * searching until it gets past min_objectid or finds an item of 'type'
5796  *
5797  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5798  */
5799 int btrfs_previous_item(struct btrfs_root *root,
5800 			struct btrfs_path *path, u64 min_objectid,
5801 			int type)
5802 {
5803 	struct btrfs_key found_key;
5804 	struct extent_buffer *leaf;
5805 	u32 nritems;
5806 	int ret;
5807 
5808 	while (1) {
5809 		if (path->slots[0] == 0) {
5810 			btrfs_set_path_blocking(path);
5811 			ret = btrfs_prev_leaf(root, path);
5812 			if (ret != 0)
5813 				return ret;
5814 		} else {
5815 			path->slots[0]--;
5816 		}
5817 		leaf = path->nodes[0];
5818 		nritems = btrfs_header_nritems(leaf);
5819 		if (nritems == 0)
5820 			return 1;
5821 		if (path->slots[0] == nritems)
5822 			path->slots[0]--;
5823 
5824 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5825 		if (found_key.objectid < min_objectid)
5826 			break;
5827 		if (found_key.type == type)
5828 			return 0;
5829 		if (found_key.objectid == min_objectid &&
5830 		    found_key.type < type)
5831 			break;
5832 	}
5833 	return 1;
5834 }
5835 
5836 /*
5837  * search in extent tree to find a previous Metadata/Data extent item with
5838  * min objecitd.
5839  *
5840  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5841  */
5842 int btrfs_previous_extent_item(struct btrfs_root *root,
5843 			struct btrfs_path *path, u64 min_objectid)
5844 {
5845 	struct btrfs_key found_key;
5846 	struct extent_buffer *leaf;
5847 	u32 nritems;
5848 	int ret;
5849 
5850 	while (1) {
5851 		if (path->slots[0] == 0) {
5852 			btrfs_set_path_blocking(path);
5853 			ret = btrfs_prev_leaf(root, path);
5854 			if (ret != 0)
5855 				return ret;
5856 		} else {
5857 			path->slots[0]--;
5858 		}
5859 		leaf = path->nodes[0];
5860 		nritems = btrfs_header_nritems(leaf);
5861 		if (nritems == 0)
5862 			return 1;
5863 		if (path->slots[0] == nritems)
5864 			path->slots[0]--;
5865 
5866 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5867 		if (found_key.objectid < min_objectid)
5868 			break;
5869 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5870 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5871 			return 0;
5872 		if (found_key.objectid == min_objectid &&
5873 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5874 			break;
5875 	}
5876 	return 1;
5877 }
5878