xref: /linux/fs/btrfs/extent_io.c (revision 95188aaf9fc81e9539606cad5c9579bd27604f92)
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/spinlock.h>
8 #include <linux/blkdev.h>
9 #include <linux/swap.h>
10 #include <linux/writeback.h>
11 #include <linux/pagevec.h>
12 #include <linux/prefetch.h>
13 #include <linux/cleancache.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "compat.h"
17 #include "ctree.h"
18 #include "btrfs_inode.h"
19 #include "volumes.h"
20 #include "check-integrity.h"
21 #include "locking.h"
22 #include "rcu-string.h"
23 
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29 
30 #define LEAK_DEBUG 0
31 #if LEAK_DEBUG
32 static DEFINE_SPINLOCK(leak_lock);
33 #endif
34 
35 #define BUFFER_LRU_MAX 64
36 
37 struct tree_entry {
38 	u64 start;
39 	u64 end;
40 	struct rb_node rb_node;
41 };
42 
43 struct extent_page_data {
44 	struct bio *bio;
45 	struct extent_io_tree *tree;
46 	get_extent_t *get_extent;
47 	unsigned long bio_flags;
48 
49 	/* tells writepage not to lock the state bits for this range
50 	 * it still does the unlocking
51 	 */
52 	unsigned int extent_locked:1;
53 
54 	/* tells the submit_bio code to use a WRITE_SYNC */
55 	unsigned int sync_io:1;
56 };
57 
58 static noinline void flush_write_bio(void *data);
59 static inline struct btrfs_fs_info *
60 tree_fs_info(struct extent_io_tree *tree)
61 {
62 	return btrfs_sb(tree->mapping->host->i_sb);
63 }
64 
65 int __init extent_io_init(void)
66 {
67 	extent_state_cache = kmem_cache_create("btrfs_extent_state",
68 			sizeof(struct extent_state), 0,
69 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
70 	if (!extent_state_cache)
71 		return -ENOMEM;
72 
73 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
74 			sizeof(struct extent_buffer), 0,
75 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
76 	if (!extent_buffer_cache)
77 		goto free_state_cache;
78 	return 0;
79 
80 free_state_cache:
81 	kmem_cache_destroy(extent_state_cache);
82 	return -ENOMEM;
83 }
84 
85 void extent_io_exit(void)
86 {
87 	struct extent_state *state;
88 	struct extent_buffer *eb;
89 
90 	while (!list_empty(&states)) {
91 		state = list_entry(states.next, struct extent_state, leak_list);
92 		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93 		       "state %lu in tree %p refs %d\n",
94 		       (unsigned long long)state->start,
95 		       (unsigned long long)state->end,
96 		       state->state, state->tree, atomic_read(&state->refs));
97 		list_del(&state->leak_list);
98 		kmem_cache_free(extent_state_cache, state);
99 
100 	}
101 
102 	while (!list_empty(&buffers)) {
103 		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
104 		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105 		       "refs %d\n", (unsigned long long)eb->start,
106 		       eb->len, atomic_read(&eb->refs));
107 		list_del(&eb->leak_list);
108 		kmem_cache_free(extent_buffer_cache, eb);
109 	}
110 
111 	/*
112 	 * Make sure all delayed rcu free are flushed before we
113 	 * destroy caches.
114 	 */
115 	rcu_barrier();
116 	if (extent_state_cache)
117 		kmem_cache_destroy(extent_state_cache);
118 	if (extent_buffer_cache)
119 		kmem_cache_destroy(extent_buffer_cache);
120 }
121 
122 void extent_io_tree_init(struct extent_io_tree *tree,
123 			 struct address_space *mapping)
124 {
125 	tree->state = RB_ROOT;
126 	INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
127 	tree->ops = NULL;
128 	tree->dirty_bytes = 0;
129 	spin_lock_init(&tree->lock);
130 	spin_lock_init(&tree->buffer_lock);
131 	tree->mapping = mapping;
132 }
133 
134 static struct extent_state *alloc_extent_state(gfp_t mask)
135 {
136 	struct extent_state *state;
137 #if LEAK_DEBUG
138 	unsigned long flags;
139 #endif
140 
141 	state = kmem_cache_alloc(extent_state_cache, mask);
142 	if (!state)
143 		return state;
144 	state->state = 0;
145 	state->private = 0;
146 	state->tree = NULL;
147 #if LEAK_DEBUG
148 	spin_lock_irqsave(&leak_lock, flags);
149 	list_add(&state->leak_list, &states);
150 	spin_unlock_irqrestore(&leak_lock, flags);
151 #endif
152 	atomic_set(&state->refs, 1);
153 	init_waitqueue_head(&state->wq);
154 	trace_alloc_extent_state(state, mask, _RET_IP_);
155 	return state;
156 }
157 
158 void free_extent_state(struct extent_state *state)
159 {
160 	if (!state)
161 		return;
162 	if (atomic_dec_and_test(&state->refs)) {
163 #if LEAK_DEBUG
164 		unsigned long flags;
165 #endif
166 		WARN_ON(state->tree);
167 #if LEAK_DEBUG
168 		spin_lock_irqsave(&leak_lock, flags);
169 		list_del(&state->leak_list);
170 		spin_unlock_irqrestore(&leak_lock, flags);
171 #endif
172 		trace_free_extent_state(state, _RET_IP_);
173 		kmem_cache_free(extent_state_cache, state);
174 	}
175 }
176 
177 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
178 				   struct rb_node *node)
179 {
180 	struct rb_node **p = &root->rb_node;
181 	struct rb_node *parent = NULL;
182 	struct tree_entry *entry;
183 
184 	while (*p) {
185 		parent = *p;
186 		entry = rb_entry(parent, struct tree_entry, rb_node);
187 
188 		if (offset < entry->start)
189 			p = &(*p)->rb_left;
190 		else if (offset > entry->end)
191 			p = &(*p)->rb_right;
192 		else
193 			return parent;
194 	}
195 
196 	rb_link_node(node, parent, p);
197 	rb_insert_color(node, root);
198 	return NULL;
199 }
200 
201 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
202 				     struct rb_node **prev_ret,
203 				     struct rb_node **next_ret)
204 {
205 	struct rb_root *root = &tree->state;
206 	struct rb_node *n = root->rb_node;
207 	struct rb_node *prev = NULL;
208 	struct rb_node *orig_prev = NULL;
209 	struct tree_entry *entry;
210 	struct tree_entry *prev_entry = NULL;
211 
212 	while (n) {
213 		entry = rb_entry(n, struct tree_entry, rb_node);
214 		prev = n;
215 		prev_entry = entry;
216 
217 		if (offset < entry->start)
218 			n = n->rb_left;
219 		else if (offset > entry->end)
220 			n = n->rb_right;
221 		else
222 			return n;
223 	}
224 
225 	if (prev_ret) {
226 		orig_prev = prev;
227 		while (prev && offset > prev_entry->end) {
228 			prev = rb_next(prev);
229 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
230 		}
231 		*prev_ret = prev;
232 		prev = orig_prev;
233 	}
234 
235 	if (next_ret) {
236 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
237 		while (prev && offset < prev_entry->start) {
238 			prev = rb_prev(prev);
239 			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
240 		}
241 		*next_ret = prev;
242 	}
243 	return NULL;
244 }
245 
246 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
247 					  u64 offset)
248 {
249 	struct rb_node *prev = NULL;
250 	struct rb_node *ret;
251 
252 	ret = __etree_search(tree, offset, &prev, NULL);
253 	if (!ret)
254 		return prev;
255 	return ret;
256 }
257 
258 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
259 		     struct extent_state *other)
260 {
261 	if (tree->ops && tree->ops->merge_extent_hook)
262 		tree->ops->merge_extent_hook(tree->mapping->host, new,
263 					     other);
264 }
265 
266 /*
267  * utility function to look for merge candidates inside a given range.
268  * Any extents with matching state are merged together into a single
269  * extent in the tree.  Extents with EXTENT_IO in their state field
270  * are not merged because the end_io handlers need to be able to do
271  * operations on them without sleeping (or doing allocations/splits).
272  *
273  * This should be called with the tree lock held.
274  */
275 static void merge_state(struct extent_io_tree *tree,
276 		        struct extent_state *state)
277 {
278 	struct extent_state *other;
279 	struct rb_node *other_node;
280 
281 	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
282 		return;
283 
284 	other_node = rb_prev(&state->rb_node);
285 	if (other_node) {
286 		other = rb_entry(other_node, struct extent_state, rb_node);
287 		if (other->end == state->start - 1 &&
288 		    other->state == state->state) {
289 			merge_cb(tree, state, other);
290 			state->start = other->start;
291 			other->tree = NULL;
292 			rb_erase(&other->rb_node, &tree->state);
293 			free_extent_state(other);
294 		}
295 	}
296 	other_node = rb_next(&state->rb_node);
297 	if (other_node) {
298 		other = rb_entry(other_node, struct extent_state, rb_node);
299 		if (other->start == state->end + 1 &&
300 		    other->state == state->state) {
301 			merge_cb(tree, state, other);
302 			state->end = other->end;
303 			other->tree = NULL;
304 			rb_erase(&other->rb_node, &tree->state);
305 			free_extent_state(other);
306 		}
307 	}
308 }
309 
310 static void set_state_cb(struct extent_io_tree *tree,
311 			 struct extent_state *state, int *bits)
312 {
313 	if (tree->ops && tree->ops->set_bit_hook)
314 		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
315 }
316 
317 static void clear_state_cb(struct extent_io_tree *tree,
318 			   struct extent_state *state, int *bits)
319 {
320 	if (tree->ops && tree->ops->clear_bit_hook)
321 		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
322 }
323 
324 static void set_state_bits(struct extent_io_tree *tree,
325 			   struct extent_state *state, int *bits);
326 
327 /*
328  * insert an extent_state struct into the tree.  'bits' are set on the
329  * struct before it is inserted.
330  *
331  * This may return -EEXIST if the extent is already there, in which case the
332  * state struct is freed.
333  *
334  * The tree lock is not taken internally.  This is a utility function and
335  * probably isn't what you want to call (see set/clear_extent_bit).
336  */
337 static int insert_state(struct extent_io_tree *tree,
338 			struct extent_state *state, u64 start, u64 end,
339 			int *bits)
340 {
341 	struct rb_node *node;
342 
343 	if (end < start)
344 		WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
345 		       (unsigned long long)end,
346 		       (unsigned long long)start);
347 	state->start = start;
348 	state->end = end;
349 
350 	set_state_bits(tree, state, bits);
351 
352 	node = tree_insert(&tree->state, end, &state->rb_node);
353 	if (node) {
354 		struct extent_state *found;
355 		found = rb_entry(node, struct extent_state, rb_node);
356 		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
357 		       "%llu %llu\n", (unsigned long long)found->start,
358 		       (unsigned long long)found->end,
359 		       (unsigned long long)start, (unsigned long long)end);
360 		return -EEXIST;
361 	}
362 	state->tree = tree;
363 	merge_state(tree, state);
364 	return 0;
365 }
366 
367 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
368 		     u64 split)
369 {
370 	if (tree->ops && tree->ops->split_extent_hook)
371 		tree->ops->split_extent_hook(tree->mapping->host, orig, split);
372 }
373 
374 /*
375  * split a given extent state struct in two, inserting the preallocated
376  * struct 'prealloc' as the newly created second half.  'split' indicates an
377  * offset inside 'orig' where it should be split.
378  *
379  * Before calling,
380  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
381  * are two extent state structs in the tree:
382  * prealloc: [orig->start, split - 1]
383  * orig: [ split, orig->end ]
384  *
385  * The tree locks are not taken by this function. They need to be held
386  * by the caller.
387  */
388 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
389 		       struct extent_state *prealloc, u64 split)
390 {
391 	struct rb_node *node;
392 
393 	split_cb(tree, orig, split);
394 
395 	prealloc->start = orig->start;
396 	prealloc->end = split - 1;
397 	prealloc->state = orig->state;
398 	orig->start = split;
399 
400 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
401 	if (node) {
402 		free_extent_state(prealloc);
403 		return -EEXIST;
404 	}
405 	prealloc->tree = tree;
406 	return 0;
407 }
408 
409 static struct extent_state *next_state(struct extent_state *state)
410 {
411 	struct rb_node *next = rb_next(&state->rb_node);
412 	if (next)
413 		return rb_entry(next, struct extent_state, rb_node);
414 	else
415 		return NULL;
416 }
417 
418 /*
419  * utility function to clear some bits in an extent state struct.
420  * it will optionally wake up any one waiting on this state (wake == 1).
421  *
422  * If no bits are set on the state struct after clearing things, the
423  * struct is freed and removed from the tree
424  */
425 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
426 					    struct extent_state *state,
427 					    int *bits, int wake)
428 {
429 	struct extent_state *next;
430 	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
431 
432 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433 		u64 range = state->end - state->start + 1;
434 		WARN_ON(range > tree->dirty_bytes);
435 		tree->dirty_bytes -= range;
436 	}
437 	clear_state_cb(tree, state, bits);
438 	state->state &= ~bits_to_clear;
439 	if (wake)
440 		wake_up(&state->wq);
441 	if (state->state == 0) {
442 		next = next_state(state);
443 		if (state->tree) {
444 			rb_erase(&state->rb_node, &tree->state);
445 			state->tree = NULL;
446 			free_extent_state(state);
447 		} else {
448 			WARN_ON(1);
449 		}
450 	} else {
451 		merge_state(tree, state);
452 		next = next_state(state);
453 	}
454 	return next;
455 }
456 
457 static struct extent_state *
458 alloc_extent_state_atomic(struct extent_state *prealloc)
459 {
460 	if (!prealloc)
461 		prealloc = alloc_extent_state(GFP_ATOMIC);
462 
463 	return prealloc;
464 }
465 
466 void extent_io_tree_panic(struct extent_io_tree *tree, int err)
467 {
468 	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
469 		    "Extent tree was modified by another "
470 		    "thread while locked.");
471 }
472 
473 /*
474  * clear some bits on a range in the tree.  This may require splitting
475  * or inserting elements in the tree, so the gfp mask is used to
476  * indicate which allocations or sleeping are allowed.
477  *
478  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
479  * the given range from the tree regardless of state (ie for truncate).
480  *
481  * the range [start, end] is inclusive.
482  *
483  * This takes the tree lock, and returns 0 on success and < 0 on error.
484  */
485 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
486 		     int bits, int wake, int delete,
487 		     struct extent_state **cached_state,
488 		     gfp_t mask)
489 {
490 	struct extent_state *state;
491 	struct extent_state *cached;
492 	struct extent_state *prealloc = NULL;
493 	struct rb_node *node;
494 	u64 last_end;
495 	int err;
496 	int clear = 0;
497 
498 	if (delete)
499 		bits |= ~EXTENT_CTLBITS;
500 	bits |= EXTENT_FIRST_DELALLOC;
501 
502 	if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
503 		clear = 1;
504 again:
505 	if (!prealloc && (mask & __GFP_WAIT)) {
506 		prealloc = alloc_extent_state(mask);
507 		if (!prealloc)
508 			return -ENOMEM;
509 	}
510 
511 	spin_lock(&tree->lock);
512 	if (cached_state) {
513 		cached = *cached_state;
514 
515 		if (clear) {
516 			*cached_state = NULL;
517 			cached_state = NULL;
518 		}
519 
520 		if (cached && cached->tree && cached->start <= start &&
521 		    cached->end > start) {
522 			if (clear)
523 				atomic_dec(&cached->refs);
524 			state = cached;
525 			goto hit_next;
526 		}
527 		if (clear)
528 			free_extent_state(cached);
529 	}
530 	/*
531 	 * this search will find the extents that end after
532 	 * our range starts
533 	 */
534 	node = tree_search(tree, start);
535 	if (!node)
536 		goto out;
537 	state = rb_entry(node, struct extent_state, rb_node);
538 hit_next:
539 	if (state->start > end)
540 		goto out;
541 	WARN_ON(state->end < start);
542 	last_end = state->end;
543 
544 	/* the state doesn't have the wanted bits, go ahead */
545 	if (!(state->state & bits)) {
546 		state = next_state(state);
547 		goto next;
548 	}
549 
550 	/*
551 	 *     | ---- desired range ---- |
552 	 *  | state | or
553 	 *  | ------------- state -------------- |
554 	 *
555 	 * We need to split the extent we found, and may flip
556 	 * bits on second half.
557 	 *
558 	 * If the extent we found extends past our range, we
559 	 * just split and search again.  It'll get split again
560 	 * the next time though.
561 	 *
562 	 * If the extent we found is inside our range, we clear
563 	 * the desired bit on it.
564 	 */
565 
566 	if (state->start < start) {
567 		prealloc = alloc_extent_state_atomic(prealloc);
568 		BUG_ON(!prealloc);
569 		err = split_state(tree, state, prealloc, start);
570 		if (err)
571 			extent_io_tree_panic(tree, err);
572 
573 		prealloc = NULL;
574 		if (err)
575 			goto out;
576 		if (state->end <= end) {
577 			state = clear_state_bit(tree, state, &bits, wake);
578 			goto next;
579 		}
580 		goto search_again;
581 	}
582 	/*
583 	 * | ---- desired range ---- |
584 	 *                        | state |
585 	 * We need to split the extent, and clear the bit
586 	 * on the first half
587 	 */
588 	if (state->start <= end && state->end > end) {
589 		prealloc = alloc_extent_state_atomic(prealloc);
590 		BUG_ON(!prealloc);
591 		err = split_state(tree, state, prealloc, end + 1);
592 		if (err)
593 			extent_io_tree_panic(tree, err);
594 
595 		if (wake)
596 			wake_up(&state->wq);
597 
598 		clear_state_bit(tree, prealloc, &bits, wake);
599 
600 		prealloc = NULL;
601 		goto out;
602 	}
603 
604 	state = clear_state_bit(tree, state, &bits, wake);
605 next:
606 	if (last_end == (u64)-1)
607 		goto out;
608 	start = last_end + 1;
609 	if (start <= end && state && !need_resched())
610 		goto hit_next;
611 	goto search_again;
612 
613 out:
614 	spin_unlock(&tree->lock);
615 	if (prealloc)
616 		free_extent_state(prealloc);
617 
618 	return 0;
619 
620 search_again:
621 	if (start > end)
622 		goto out;
623 	spin_unlock(&tree->lock);
624 	if (mask & __GFP_WAIT)
625 		cond_resched();
626 	goto again;
627 }
628 
629 static void wait_on_state(struct extent_io_tree *tree,
630 			  struct extent_state *state)
631 		__releases(tree->lock)
632 		__acquires(tree->lock)
633 {
634 	DEFINE_WAIT(wait);
635 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
636 	spin_unlock(&tree->lock);
637 	schedule();
638 	spin_lock(&tree->lock);
639 	finish_wait(&state->wq, &wait);
640 }
641 
642 /*
643  * waits for one or more bits to clear on a range in the state tree.
644  * The range [start, end] is inclusive.
645  * The tree lock is taken by this function
646  */
647 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
648 {
649 	struct extent_state *state;
650 	struct rb_node *node;
651 
652 	spin_lock(&tree->lock);
653 again:
654 	while (1) {
655 		/*
656 		 * this search will find all the extents that end after
657 		 * our range starts
658 		 */
659 		node = tree_search(tree, start);
660 		if (!node)
661 			break;
662 
663 		state = rb_entry(node, struct extent_state, rb_node);
664 
665 		if (state->start > end)
666 			goto out;
667 
668 		if (state->state & bits) {
669 			start = state->start;
670 			atomic_inc(&state->refs);
671 			wait_on_state(tree, state);
672 			free_extent_state(state);
673 			goto again;
674 		}
675 		start = state->end + 1;
676 
677 		if (start > end)
678 			break;
679 
680 		cond_resched_lock(&tree->lock);
681 	}
682 out:
683 	spin_unlock(&tree->lock);
684 }
685 
686 static void set_state_bits(struct extent_io_tree *tree,
687 			   struct extent_state *state,
688 			   int *bits)
689 {
690 	int bits_to_set = *bits & ~EXTENT_CTLBITS;
691 
692 	set_state_cb(tree, state, bits);
693 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
694 		u64 range = state->end - state->start + 1;
695 		tree->dirty_bytes += range;
696 	}
697 	state->state |= bits_to_set;
698 }
699 
700 static void cache_state(struct extent_state *state,
701 			struct extent_state **cached_ptr)
702 {
703 	if (cached_ptr && !(*cached_ptr)) {
704 		if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
705 			*cached_ptr = state;
706 			atomic_inc(&state->refs);
707 		}
708 	}
709 }
710 
711 static void uncache_state(struct extent_state **cached_ptr)
712 {
713 	if (cached_ptr && (*cached_ptr)) {
714 		struct extent_state *state = *cached_ptr;
715 		*cached_ptr = NULL;
716 		free_extent_state(state);
717 	}
718 }
719 
720 /*
721  * set some bits on a range in the tree.  This may require allocations or
722  * sleeping, so the gfp mask is used to indicate what is allowed.
723  *
724  * If any of the exclusive bits are set, this will fail with -EEXIST if some
725  * part of the range already has the desired bits set.  The start of the
726  * existing range is returned in failed_start in this case.
727  *
728  * [start, end] is inclusive This takes the tree lock.
729  */
730 
731 static int __must_check
732 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
733 		 int bits, int exclusive_bits, u64 *failed_start,
734 		 struct extent_state **cached_state, gfp_t mask)
735 {
736 	struct extent_state *state;
737 	struct extent_state *prealloc = NULL;
738 	struct rb_node *node;
739 	int err = 0;
740 	u64 last_start;
741 	u64 last_end;
742 
743 	bits |= EXTENT_FIRST_DELALLOC;
744 again:
745 	if (!prealloc && (mask & __GFP_WAIT)) {
746 		prealloc = alloc_extent_state(mask);
747 		BUG_ON(!prealloc);
748 	}
749 
750 	spin_lock(&tree->lock);
751 	if (cached_state && *cached_state) {
752 		state = *cached_state;
753 		if (state->start <= start && state->end > start &&
754 		    state->tree) {
755 			node = &state->rb_node;
756 			goto hit_next;
757 		}
758 	}
759 	/*
760 	 * this search will find all the extents that end after
761 	 * our range starts.
762 	 */
763 	node = tree_search(tree, start);
764 	if (!node) {
765 		prealloc = alloc_extent_state_atomic(prealloc);
766 		BUG_ON(!prealloc);
767 		err = insert_state(tree, prealloc, start, end, &bits);
768 		if (err)
769 			extent_io_tree_panic(tree, err);
770 
771 		prealloc = NULL;
772 		goto out;
773 	}
774 	state = rb_entry(node, struct extent_state, rb_node);
775 hit_next:
776 	last_start = state->start;
777 	last_end = state->end;
778 
779 	/*
780 	 * | ---- desired range ---- |
781 	 * | state |
782 	 *
783 	 * Just lock what we found and keep going
784 	 */
785 	if (state->start == start && state->end <= end) {
786 		if (state->state & exclusive_bits) {
787 			*failed_start = state->start;
788 			err = -EEXIST;
789 			goto out;
790 		}
791 
792 		set_state_bits(tree, state, &bits);
793 		cache_state(state, cached_state);
794 		merge_state(tree, state);
795 		if (last_end == (u64)-1)
796 			goto out;
797 		start = last_end + 1;
798 		state = next_state(state);
799 		if (start < end && state && state->start == start &&
800 		    !need_resched())
801 			goto hit_next;
802 		goto search_again;
803 	}
804 
805 	/*
806 	 *     | ---- desired range ---- |
807 	 * | state |
808 	 *   or
809 	 * | ------------- state -------------- |
810 	 *
811 	 * We need to split the extent we found, and may flip bits on
812 	 * second half.
813 	 *
814 	 * If the extent we found extends past our
815 	 * range, we just split and search again.  It'll get split
816 	 * again the next time though.
817 	 *
818 	 * If the extent we found is inside our range, we set the
819 	 * desired bit on it.
820 	 */
821 	if (state->start < start) {
822 		if (state->state & exclusive_bits) {
823 			*failed_start = start;
824 			err = -EEXIST;
825 			goto out;
826 		}
827 
828 		prealloc = alloc_extent_state_atomic(prealloc);
829 		BUG_ON(!prealloc);
830 		err = split_state(tree, state, prealloc, start);
831 		if (err)
832 			extent_io_tree_panic(tree, err);
833 
834 		prealloc = NULL;
835 		if (err)
836 			goto out;
837 		if (state->end <= end) {
838 			set_state_bits(tree, state, &bits);
839 			cache_state(state, cached_state);
840 			merge_state(tree, state);
841 			if (last_end == (u64)-1)
842 				goto out;
843 			start = last_end + 1;
844 			state = next_state(state);
845 			if (start < end && state && state->start == start &&
846 			    !need_resched())
847 				goto hit_next;
848 		}
849 		goto search_again;
850 	}
851 	/*
852 	 * | ---- desired range ---- |
853 	 *     | state | or               | state |
854 	 *
855 	 * There's a hole, we need to insert something in it and
856 	 * ignore the extent we found.
857 	 */
858 	if (state->start > start) {
859 		u64 this_end;
860 		if (end < last_start)
861 			this_end = end;
862 		else
863 			this_end = last_start - 1;
864 
865 		prealloc = alloc_extent_state_atomic(prealloc);
866 		BUG_ON(!prealloc);
867 
868 		/*
869 		 * Avoid to free 'prealloc' if it can be merged with
870 		 * the later extent.
871 		 */
872 		err = insert_state(tree, prealloc, start, this_end,
873 				   &bits);
874 		if (err)
875 			extent_io_tree_panic(tree, err);
876 
877 		cache_state(prealloc, cached_state);
878 		prealloc = NULL;
879 		start = this_end + 1;
880 		goto search_again;
881 	}
882 	/*
883 	 * | ---- desired range ---- |
884 	 *                        | state |
885 	 * We need to split the extent, and set the bit
886 	 * on the first half
887 	 */
888 	if (state->start <= end && state->end > end) {
889 		if (state->state & exclusive_bits) {
890 			*failed_start = start;
891 			err = -EEXIST;
892 			goto out;
893 		}
894 
895 		prealloc = alloc_extent_state_atomic(prealloc);
896 		BUG_ON(!prealloc);
897 		err = split_state(tree, state, prealloc, end + 1);
898 		if (err)
899 			extent_io_tree_panic(tree, err);
900 
901 		set_state_bits(tree, prealloc, &bits);
902 		cache_state(prealloc, cached_state);
903 		merge_state(tree, prealloc);
904 		prealloc = NULL;
905 		goto out;
906 	}
907 
908 	goto search_again;
909 
910 out:
911 	spin_unlock(&tree->lock);
912 	if (prealloc)
913 		free_extent_state(prealloc);
914 
915 	return err;
916 
917 search_again:
918 	if (start > end)
919 		goto out;
920 	spin_unlock(&tree->lock);
921 	if (mask & __GFP_WAIT)
922 		cond_resched();
923 	goto again;
924 }
925 
926 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
927 		   u64 *failed_start, struct extent_state **cached_state,
928 		   gfp_t mask)
929 {
930 	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
931 				cached_state, mask);
932 }
933 
934 
935 /**
936  * convert_extent_bit - convert all bits in a given range from one bit to
937  * 			another
938  * @tree:	the io tree to search
939  * @start:	the start offset in bytes
940  * @end:	the end offset in bytes (inclusive)
941  * @bits:	the bits to set in this range
942  * @clear_bits:	the bits to clear in this range
943  * @cached_state:	state that we're going to cache
944  * @mask:	the allocation mask
945  *
946  * This will go through and set bits for the given range.  If any states exist
947  * already in this range they are set with the given bit and cleared of the
948  * clear_bits.  This is only meant to be used by things that are mergeable, ie
949  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
950  * boundary bits like LOCK.
951  */
952 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
953 		       int bits, int clear_bits,
954 		       struct extent_state **cached_state, gfp_t mask)
955 {
956 	struct extent_state *state;
957 	struct extent_state *prealloc = NULL;
958 	struct rb_node *node;
959 	int err = 0;
960 	u64 last_start;
961 	u64 last_end;
962 
963 again:
964 	if (!prealloc && (mask & __GFP_WAIT)) {
965 		prealloc = alloc_extent_state(mask);
966 		if (!prealloc)
967 			return -ENOMEM;
968 	}
969 
970 	spin_lock(&tree->lock);
971 	if (cached_state && *cached_state) {
972 		state = *cached_state;
973 		if (state->start <= start && state->end > start &&
974 		    state->tree) {
975 			node = &state->rb_node;
976 			goto hit_next;
977 		}
978 	}
979 
980 	/*
981 	 * this search will find all the extents that end after
982 	 * our range starts.
983 	 */
984 	node = tree_search(tree, start);
985 	if (!node) {
986 		prealloc = alloc_extent_state_atomic(prealloc);
987 		if (!prealloc) {
988 			err = -ENOMEM;
989 			goto out;
990 		}
991 		err = insert_state(tree, prealloc, start, end, &bits);
992 		prealloc = NULL;
993 		if (err)
994 			extent_io_tree_panic(tree, err);
995 		goto out;
996 	}
997 	state = rb_entry(node, struct extent_state, rb_node);
998 hit_next:
999 	last_start = state->start;
1000 	last_end = state->end;
1001 
1002 	/*
1003 	 * | ---- desired range ---- |
1004 	 * | state |
1005 	 *
1006 	 * Just lock what we found and keep going
1007 	 */
1008 	if (state->start == start && state->end <= end) {
1009 		set_state_bits(tree, state, &bits);
1010 		cache_state(state, cached_state);
1011 		state = clear_state_bit(tree, state, &clear_bits, 0);
1012 		if (last_end == (u64)-1)
1013 			goto out;
1014 		start = last_end + 1;
1015 		if (start < end && state && state->start == start &&
1016 		    !need_resched())
1017 			goto hit_next;
1018 		goto search_again;
1019 	}
1020 
1021 	/*
1022 	 *     | ---- desired range ---- |
1023 	 * | state |
1024 	 *   or
1025 	 * | ------------- state -------------- |
1026 	 *
1027 	 * We need to split the extent we found, and may flip bits on
1028 	 * second half.
1029 	 *
1030 	 * If the extent we found extends past our
1031 	 * range, we just split and search again.  It'll get split
1032 	 * again the next time though.
1033 	 *
1034 	 * If the extent we found is inside our range, we set the
1035 	 * desired bit on it.
1036 	 */
1037 	if (state->start < start) {
1038 		prealloc = alloc_extent_state_atomic(prealloc);
1039 		if (!prealloc) {
1040 			err = -ENOMEM;
1041 			goto out;
1042 		}
1043 		err = split_state(tree, state, prealloc, start);
1044 		if (err)
1045 			extent_io_tree_panic(tree, err);
1046 		prealloc = NULL;
1047 		if (err)
1048 			goto out;
1049 		if (state->end <= end) {
1050 			set_state_bits(tree, state, &bits);
1051 			cache_state(state, cached_state);
1052 			state = clear_state_bit(tree, state, &clear_bits, 0);
1053 			if (last_end == (u64)-1)
1054 				goto out;
1055 			start = last_end + 1;
1056 			if (start < end && state && state->start == start &&
1057 			    !need_resched())
1058 				goto hit_next;
1059 		}
1060 		goto search_again;
1061 	}
1062 	/*
1063 	 * | ---- desired range ---- |
1064 	 *     | state | or               | state |
1065 	 *
1066 	 * There's a hole, we need to insert something in it and
1067 	 * ignore the extent we found.
1068 	 */
1069 	if (state->start > start) {
1070 		u64 this_end;
1071 		if (end < last_start)
1072 			this_end = end;
1073 		else
1074 			this_end = last_start - 1;
1075 
1076 		prealloc = alloc_extent_state_atomic(prealloc);
1077 		if (!prealloc) {
1078 			err = -ENOMEM;
1079 			goto out;
1080 		}
1081 
1082 		/*
1083 		 * Avoid to free 'prealloc' if it can be merged with
1084 		 * the later extent.
1085 		 */
1086 		err = insert_state(tree, prealloc, start, this_end,
1087 				   &bits);
1088 		if (err)
1089 			extent_io_tree_panic(tree, err);
1090 		cache_state(prealloc, cached_state);
1091 		prealloc = NULL;
1092 		start = this_end + 1;
1093 		goto search_again;
1094 	}
1095 	/*
1096 	 * | ---- desired range ---- |
1097 	 *                        | state |
1098 	 * We need to split the extent, and set the bit
1099 	 * on the first half
1100 	 */
1101 	if (state->start <= end && state->end > end) {
1102 		prealloc = alloc_extent_state_atomic(prealloc);
1103 		if (!prealloc) {
1104 			err = -ENOMEM;
1105 			goto out;
1106 		}
1107 
1108 		err = split_state(tree, state, prealloc, end + 1);
1109 		if (err)
1110 			extent_io_tree_panic(tree, err);
1111 
1112 		set_state_bits(tree, prealloc, &bits);
1113 		cache_state(prealloc, cached_state);
1114 		clear_state_bit(tree, prealloc, &clear_bits, 0);
1115 		prealloc = NULL;
1116 		goto out;
1117 	}
1118 
1119 	goto search_again;
1120 
1121 out:
1122 	spin_unlock(&tree->lock);
1123 	if (prealloc)
1124 		free_extent_state(prealloc);
1125 
1126 	return err;
1127 
1128 search_again:
1129 	if (start > end)
1130 		goto out;
1131 	spin_unlock(&tree->lock);
1132 	if (mask & __GFP_WAIT)
1133 		cond_resched();
1134 	goto again;
1135 }
1136 
1137 /* wrappers around set/clear extent bit */
1138 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1139 		     gfp_t mask)
1140 {
1141 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1142 			      NULL, mask);
1143 }
1144 
1145 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1146 		    int bits, gfp_t mask)
1147 {
1148 	return set_extent_bit(tree, start, end, bits, NULL,
1149 			      NULL, mask);
1150 }
1151 
1152 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1153 		      int bits, gfp_t mask)
1154 {
1155 	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1156 }
1157 
1158 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1159 			struct extent_state **cached_state, gfp_t mask)
1160 {
1161 	return set_extent_bit(tree, start, end,
1162 			      EXTENT_DELALLOC | EXTENT_UPTODATE,
1163 			      NULL, cached_state, mask);
1164 }
1165 
1166 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1167 		      struct extent_state **cached_state, gfp_t mask)
1168 {
1169 	return set_extent_bit(tree, start, end,
1170 			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1171 			      NULL, cached_state, mask);
1172 }
1173 
1174 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1175 		       gfp_t mask)
1176 {
1177 	return clear_extent_bit(tree, start, end,
1178 				EXTENT_DIRTY | EXTENT_DELALLOC |
1179 				EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1180 }
1181 
1182 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1183 		     gfp_t mask)
1184 {
1185 	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1186 			      NULL, mask);
1187 }
1188 
1189 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1190 			struct extent_state **cached_state, gfp_t mask)
1191 {
1192 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1193 			      cached_state, mask);
1194 }
1195 
1196 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1197 			  struct extent_state **cached_state, gfp_t mask)
1198 {
1199 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1200 				cached_state, mask);
1201 }
1202 
1203 /*
1204  * either insert or lock state struct between start and end use mask to tell
1205  * us if waiting is desired.
1206  */
1207 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1208 		     int bits, struct extent_state **cached_state)
1209 {
1210 	int err;
1211 	u64 failed_start;
1212 	while (1) {
1213 		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1214 				       EXTENT_LOCKED, &failed_start,
1215 				       cached_state, GFP_NOFS);
1216 		if (err == -EEXIST) {
1217 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1218 			start = failed_start;
1219 		} else
1220 			break;
1221 		WARN_ON(start > end);
1222 	}
1223 	return err;
1224 }
1225 
1226 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1227 {
1228 	return lock_extent_bits(tree, start, end, 0, NULL);
1229 }
1230 
1231 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1232 {
1233 	int err;
1234 	u64 failed_start;
1235 
1236 	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1237 			       &failed_start, NULL, GFP_NOFS);
1238 	if (err == -EEXIST) {
1239 		if (failed_start > start)
1240 			clear_extent_bit(tree, start, failed_start - 1,
1241 					 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1242 		return 0;
1243 	}
1244 	return 1;
1245 }
1246 
1247 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1248 			 struct extent_state **cached, gfp_t mask)
1249 {
1250 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1251 				mask);
1252 }
1253 
1254 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1255 {
1256 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1257 				GFP_NOFS);
1258 }
1259 
1260 /*
1261  * helper function to set both pages and extents in the tree writeback
1262  */
1263 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1264 {
1265 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1266 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1267 	struct page *page;
1268 
1269 	while (index <= end_index) {
1270 		page = find_get_page(tree->mapping, index);
1271 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
1272 		set_page_writeback(page);
1273 		page_cache_release(page);
1274 		index++;
1275 	}
1276 	return 0;
1277 }
1278 
1279 /* find the first state struct with 'bits' set after 'start', and
1280  * return it.  tree->lock must be held.  NULL will returned if
1281  * nothing was found after 'start'
1282  */
1283 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1284 						 u64 start, int bits)
1285 {
1286 	struct rb_node *node;
1287 	struct extent_state *state;
1288 
1289 	/*
1290 	 * this search will find all the extents that end after
1291 	 * our range starts.
1292 	 */
1293 	node = tree_search(tree, start);
1294 	if (!node)
1295 		goto out;
1296 
1297 	while (1) {
1298 		state = rb_entry(node, struct extent_state, rb_node);
1299 		if (state->end >= start && (state->state & bits))
1300 			return state;
1301 
1302 		node = rb_next(node);
1303 		if (!node)
1304 			break;
1305 	}
1306 out:
1307 	return NULL;
1308 }
1309 
1310 /*
1311  * find the first offset in the io tree with 'bits' set. zero is
1312  * returned if we find something, and *start_ret and *end_ret are
1313  * set to reflect the state struct that was found.
1314  *
1315  * If nothing was found, 1 is returned. If found something, return 0.
1316  */
1317 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1318 			  u64 *start_ret, u64 *end_ret, int bits,
1319 			  struct extent_state **cached_state)
1320 {
1321 	struct extent_state *state;
1322 	struct rb_node *n;
1323 	int ret = 1;
1324 
1325 	spin_lock(&tree->lock);
1326 	if (cached_state && *cached_state) {
1327 		state = *cached_state;
1328 		if (state->end == start - 1 && state->tree) {
1329 			n = rb_next(&state->rb_node);
1330 			while (n) {
1331 				state = rb_entry(n, struct extent_state,
1332 						 rb_node);
1333 				if (state->state & bits)
1334 					goto got_it;
1335 				n = rb_next(n);
1336 			}
1337 			free_extent_state(*cached_state);
1338 			*cached_state = NULL;
1339 			goto out;
1340 		}
1341 		free_extent_state(*cached_state);
1342 		*cached_state = NULL;
1343 	}
1344 
1345 	state = find_first_extent_bit_state(tree, start, bits);
1346 got_it:
1347 	if (state) {
1348 		cache_state(state, cached_state);
1349 		*start_ret = state->start;
1350 		*end_ret = state->end;
1351 		ret = 0;
1352 	}
1353 out:
1354 	spin_unlock(&tree->lock);
1355 	return ret;
1356 }
1357 
1358 /*
1359  * find a contiguous range of bytes in the file marked as delalloc, not
1360  * more than 'max_bytes'.  start and end are used to return the range,
1361  *
1362  * 1 is returned if we find something, 0 if nothing was in the tree
1363  */
1364 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1365 					u64 *start, u64 *end, u64 max_bytes,
1366 					struct extent_state **cached_state)
1367 {
1368 	struct rb_node *node;
1369 	struct extent_state *state;
1370 	u64 cur_start = *start;
1371 	u64 found = 0;
1372 	u64 total_bytes = 0;
1373 
1374 	spin_lock(&tree->lock);
1375 
1376 	/*
1377 	 * this search will find all the extents that end after
1378 	 * our range starts.
1379 	 */
1380 	node = tree_search(tree, cur_start);
1381 	if (!node) {
1382 		if (!found)
1383 			*end = (u64)-1;
1384 		goto out;
1385 	}
1386 
1387 	while (1) {
1388 		state = rb_entry(node, struct extent_state, rb_node);
1389 		if (found && (state->start != cur_start ||
1390 			      (state->state & EXTENT_BOUNDARY))) {
1391 			goto out;
1392 		}
1393 		if (!(state->state & EXTENT_DELALLOC)) {
1394 			if (!found)
1395 				*end = state->end;
1396 			goto out;
1397 		}
1398 		if (!found) {
1399 			*start = state->start;
1400 			*cached_state = state;
1401 			atomic_inc(&state->refs);
1402 		}
1403 		found++;
1404 		*end = state->end;
1405 		cur_start = state->end + 1;
1406 		node = rb_next(node);
1407 		if (!node)
1408 			break;
1409 		total_bytes += state->end - state->start + 1;
1410 		if (total_bytes >= max_bytes)
1411 			break;
1412 	}
1413 out:
1414 	spin_unlock(&tree->lock);
1415 	return found;
1416 }
1417 
1418 static noinline void __unlock_for_delalloc(struct inode *inode,
1419 					   struct page *locked_page,
1420 					   u64 start, u64 end)
1421 {
1422 	int ret;
1423 	struct page *pages[16];
1424 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1425 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1426 	unsigned long nr_pages = end_index - index + 1;
1427 	int i;
1428 
1429 	if (index == locked_page->index && end_index == index)
1430 		return;
1431 
1432 	while (nr_pages > 0) {
1433 		ret = find_get_pages_contig(inode->i_mapping, index,
1434 				     min_t(unsigned long, nr_pages,
1435 				     ARRAY_SIZE(pages)), pages);
1436 		for (i = 0; i < ret; i++) {
1437 			if (pages[i] != locked_page)
1438 				unlock_page(pages[i]);
1439 			page_cache_release(pages[i]);
1440 		}
1441 		nr_pages -= ret;
1442 		index += ret;
1443 		cond_resched();
1444 	}
1445 }
1446 
1447 static noinline int lock_delalloc_pages(struct inode *inode,
1448 					struct page *locked_page,
1449 					u64 delalloc_start,
1450 					u64 delalloc_end)
1451 {
1452 	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1453 	unsigned long start_index = index;
1454 	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1455 	unsigned long pages_locked = 0;
1456 	struct page *pages[16];
1457 	unsigned long nrpages;
1458 	int ret;
1459 	int i;
1460 
1461 	/* the caller is responsible for locking the start index */
1462 	if (index == locked_page->index && index == end_index)
1463 		return 0;
1464 
1465 	/* skip the page at the start index */
1466 	nrpages = end_index - index + 1;
1467 	while (nrpages > 0) {
1468 		ret = find_get_pages_contig(inode->i_mapping, index,
1469 				     min_t(unsigned long,
1470 				     nrpages, ARRAY_SIZE(pages)), pages);
1471 		if (ret == 0) {
1472 			ret = -EAGAIN;
1473 			goto done;
1474 		}
1475 		/* now we have an array of pages, lock them all */
1476 		for (i = 0; i < ret; i++) {
1477 			/*
1478 			 * the caller is taking responsibility for
1479 			 * locked_page
1480 			 */
1481 			if (pages[i] != locked_page) {
1482 				lock_page(pages[i]);
1483 				if (!PageDirty(pages[i]) ||
1484 				    pages[i]->mapping != inode->i_mapping) {
1485 					ret = -EAGAIN;
1486 					unlock_page(pages[i]);
1487 					page_cache_release(pages[i]);
1488 					goto done;
1489 				}
1490 			}
1491 			page_cache_release(pages[i]);
1492 			pages_locked++;
1493 		}
1494 		nrpages -= ret;
1495 		index += ret;
1496 		cond_resched();
1497 	}
1498 	ret = 0;
1499 done:
1500 	if (ret && pages_locked) {
1501 		__unlock_for_delalloc(inode, locked_page,
1502 			      delalloc_start,
1503 			      ((u64)(start_index + pages_locked - 1)) <<
1504 			      PAGE_CACHE_SHIFT);
1505 	}
1506 	return ret;
1507 }
1508 
1509 /*
1510  * find a contiguous range of bytes in the file marked as delalloc, not
1511  * more than 'max_bytes'.  start and end are used to return the range,
1512  *
1513  * 1 is returned if we find something, 0 if nothing was in the tree
1514  */
1515 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1516 					     struct extent_io_tree *tree,
1517 					     struct page *locked_page,
1518 					     u64 *start, u64 *end,
1519 					     u64 max_bytes)
1520 {
1521 	u64 delalloc_start;
1522 	u64 delalloc_end;
1523 	u64 found;
1524 	struct extent_state *cached_state = NULL;
1525 	int ret;
1526 	int loops = 0;
1527 
1528 again:
1529 	/* step one, find a bunch of delalloc bytes starting at start */
1530 	delalloc_start = *start;
1531 	delalloc_end = 0;
1532 	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1533 				    max_bytes, &cached_state);
1534 	if (!found || delalloc_end <= *start) {
1535 		*start = delalloc_start;
1536 		*end = delalloc_end;
1537 		free_extent_state(cached_state);
1538 		return found;
1539 	}
1540 
1541 	/*
1542 	 * start comes from the offset of locked_page.  We have to lock
1543 	 * pages in order, so we can't process delalloc bytes before
1544 	 * locked_page
1545 	 */
1546 	if (delalloc_start < *start)
1547 		delalloc_start = *start;
1548 
1549 	/*
1550 	 * make sure to limit the number of pages we try to lock down
1551 	 * if we're looping.
1552 	 */
1553 	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1554 		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1555 
1556 	/* step two, lock all the pages after the page that has start */
1557 	ret = lock_delalloc_pages(inode, locked_page,
1558 				  delalloc_start, delalloc_end);
1559 	if (ret == -EAGAIN) {
1560 		/* some of the pages are gone, lets avoid looping by
1561 		 * shortening the size of the delalloc range we're searching
1562 		 */
1563 		free_extent_state(cached_state);
1564 		if (!loops) {
1565 			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1566 			max_bytes = PAGE_CACHE_SIZE - offset;
1567 			loops = 1;
1568 			goto again;
1569 		} else {
1570 			found = 0;
1571 			goto out_failed;
1572 		}
1573 	}
1574 	BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1575 
1576 	/* step three, lock the state bits for the whole range */
1577 	lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1578 
1579 	/* then test to make sure it is all still delalloc */
1580 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
1581 			     EXTENT_DELALLOC, 1, cached_state);
1582 	if (!ret) {
1583 		unlock_extent_cached(tree, delalloc_start, delalloc_end,
1584 				     &cached_state, GFP_NOFS);
1585 		__unlock_for_delalloc(inode, locked_page,
1586 			      delalloc_start, delalloc_end);
1587 		cond_resched();
1588 		goto again;
1589 	}
1590 	free_extent_state(cached_state);
1591 	*start = delalloc_start;
1592 	*end = delalloc_end;
1593 out_failed:
1594 	return found;
1595 }
1596 
1597 int extent_clear_unlock_delalloc(struct inode *inode,
1598 				struct extent_io_tree *tree,
1599 				u64 start, u64 end, struct page *locked_page,
1600 				unsigned long op)
1601 {
1602 	int ret;
1603 	struct page *pages[16];
1604 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1605 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1606 	unsigned long nr_pages = end_index - index + 1;
1607 	int i;
1608 	int clear_bits = 0;
1609 
1610 	if (op & EXTENT_CLEAR_UNLOCK)
1611 		clear_bits |= EXTENT_LOCKED;
1612 	if (op & EXTENT_CLEAR_DIRTY)
1613 		clear_bits |= EXTENT_DIRTY;
1614 
1615 	if (op & EXTENT_CLEAR_DELALLOC)
1616 		clear_bits |= EXTENT_DELALLOC;
1617 
1618 	clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1619 	if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1620 		    EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1621 		    EXTENT_SET_PRIVATE2)))
1622 		return 0;
1623 
1624 	while (nr_pages > 0) {
1625 		ret = find_get_pages_contig(inode->i_mapping, index,
1626 				     min_t(unsigned long,
1627 				     nr_pages, ARRAY_SIZE(pages)), pages);
1628 		for (i = 0; i < ret; i++) {
1629 
1630 			if (op & EXTENT_SET_PRIVATE2)
1631 				SetPagePrivate2(pages[i]);
1632 
1633 			if (pages[i] == locked_page) {
1634 				page_cache_release(pages[i]);
1635 				continue;
1636 			}
1637 			if (op & EXTENT_CLEAR_DIRTY)
1638 				clear_page_dirty_for_io(pages[i]);
1639 			if (op & EXTENT_SET_WRITEBACK)
1640 				set_page_writeback(pages[i]);
1641 			if (op & EXTENT_END_WRITEBACK)
1642 				end_page_writeback(pages[i]);
1643 			if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1644 				unlock_page(pages[i]);
1645 			page_cache_release(pages[i]);
1646 		}
1647 		nr_pages -= ret;
1648 		index += ret;
1649 		cond_resched();
1650 	}
1651 	return 0;
1652 }
1653 
1654 /*
1655  * count the number of bytes in the tree that have a given bit(s)
1656  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1657  * cached.  The total number found is returned.
1658  */
1659 u64 count_range_bits(struct extent_io_tree *tree,
1660 		     u64 *start, u64 search_end, u64 max_bytes,
1661 		     unsigned long bits, int contig)
1662 {
1663 	struct rb_node *node;
1664 	struct extent_state *state;
1665 	u64 cur_start = *start;
1666 	u64 total_bytes = 0;
1667 	u64 last = 0;
1668 	int found = 0;
1669 
1670 	if (search_end <= cur_start) {
1671 		WARN_ON(1);
1672 		return 0;
1673 	}
1674 
1675 	spin_lock(&tree->lock);
1676 	if (cur_start == 0 && bits == EXTENT_DIRTY) {
1677 		total_bytes = tree->dirty_bytes;
1678 		goto out;
1679 	}
1680 	/*
1681 	 * this search will find all the extents that end after
1682 	 * our range starts.
1683 	 */
1684 	node = tree_search(tree, cur_start);
1685 	if (!node)
1686 		goto out;
1687 
1688 	while (1) {
1689 		state = rb_entry(node, struct extent_state, rb_node);
1690 		if (state->start > search_end)
1691 			break;
1692 		if (contig && found && state->start > last + 1)
1693 			break;
1694 		if (state->end >= cur_start && (state->state & bits) == bits) {
1695 			total_bytes += min(search_end, state->end) + 1 -
1696 				       max(cur_start, state->start);
1697 			if (total_bytes >= max_bytes)
1698 				break;
1699 			if (!found) {
1700 				*start = max(cur_start, state->start);
1701 				found = 1;
1702 			}
1703 			last = state->end;
1704 		} else if (contig && found) {
1705 			break;
1706 		}
1707 		node = rb_next(node);
1708 		if (!node)
1709 			break;
1710 	}
1711 out:
1712 	spin_unlock(&tree->lock);
1713 	return total_bytes;
1714 }
1715 
1716 /*
1717  * set the private field for a given byte offset in the tree.  If there isn't
1718  * an extent_state there already, this does nothing.
1719  */
1720 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1721 {
1722 	struct rb_node *node;
1723 	struct extent_state *state;
1724 	int ret = 0;
1725 
1726 	spin_lock(&tree->lock);
1727 	/*
1728 	 * this search will find all the extents that end after
1729 	 * our range starts.
1730 	 */
1731 	node = tree_search(tree, start);
1732 	if (!node) {
1733 		ret = -ENOENT;
1734 		goto out;
1735 	}
1736 	state = rb_entry(node, struct extent_state, rb_node);
1737 	if (state->start != start) {
1738 		ret = -ENOENT;
1739 		goto out;
1740 	}
1741 	state->private = private;
1742 out:
1743 	spin_unlock(&tree->lock);
1744 	return ret;
1745 }
1746 
1747 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1748 {
1749 	struct rb_node *node;
1750 	struct extent_state *state;
1751 	int ret = 0;
1752 
1753 	spin_lock(&tree->lock);
1754 	/*
1755 	 * this search will find all the extents that end after
1756 	 * our range starts.
1757 	 */
1758 	node = tree_search(tree, start);
1759 	if (!node) {
1760 		ret = -ENOENT;
1761 		goto out;
1762 	}
1763 	state = rb_entry(node, struct extent_state, rb_node);
1764 	if (state->start != start) {
1765 		ret = -ENOENT;
1766 		goto out;
1767 	}
1768 	*private = state->private;
1769 out:
1770 	spin_unlock(&tree->lock);
1771 	return ret;
1772 }
1773 
1774 /*
1775  * searches a range in the state tree for a given mask.
1776  * If 'filled' == 1, this returns 1 only if every extent in the tree
1777  * has the bits set.  Otherwise, 1 is returned if any bit in the
1778  * range is found set.
1779  */
1780 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1781 		   int bits, int filled, struct extent_state *cached)
1782 {
1783 	struct extent_state *state = NULL;
1784 	struct rb_node *node;
1785 	int bitset = 0;
1786 
1787 	spin_lock(&tree->lock);
1788 	if (cached && cached->tree && cached->start <= start &&
1789 	    cached->end > start)
1790 		node = &cached->rb_node;
1791 	else
1792 		node = tree_search(tree, start);
1793 	while (node && start <= end) {
1794 		state = rb_entry(node, struct extent_state, rb_node);
1795 
1796 		if (filled && state->start > start) {
1797 			bitset = 0;
1798 			break;
1799 		}
1800 
1801 		if (state->start > end)
1802 			break;
1803 
1804 		if (state->state & bits) {
1805 			bitset = 1;
1806 			if (!filled)
1807 				break;
1808 		} else if (filled) {
1809 			bitset = 0;
1810 			break;
1811 		}
1812 
1813 		if (state->end == (u64)-1)
1814 			break;
1815 
1816 		start = state->end + 1;
1817 		if (start > end)
1818 			break;
1819 		node = rb_next(node);
1820 		if (!node) {
1821 			if (filled)
1822 				bitset = 0;
1823 			break;
1824 		}
1825 	}
1826 	spin_unlock(&tree->lock);
1827 	return bitset;
1828 }
1829 
1830 /*
1831  * helper function to set a given page up to date if all the
1832  * extents in the tree for that page are up to date
1833  */
1834 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1835 {
1836 	u64 start = page_offset(page);
1837 	u64 end = start + PAGE_CACHE_SIZE - 1;
1838 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1839 		SetPageUptodate(page);
1840 }
1841 
1842 /*
1843  * helper function to unlock a page if all the extents in the tree
1844  * for that page are unlocked
1845  */
1846 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1847 {
1848 	u64 start = page_offset(page);
1849 	u64 end = start + PAGE_CACHE_SIZE - 1;
1850 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1851 		unlock_page(page);
1852 }
1853 
1854 /*
1855  * helper function to end page writeback if all the extents
1856  * in the tree for that page are done with writeback
1857  */
1858 static void check_page_writeback(struct extent_io_tree *tree,
1859 				 struct page *page)
1860 {
1861 	end_page_writeback(page);
1862 }
1863 
1864 /*
1865  * When IO fails, either with EIO or csum verification fails, we
1866  * try other mirrors that might have a good copy of the data.  This
1867  * io_failure_record is used to record state as we go through all the
1868  * mirrors.  If another mirror has good data, the page is set up to date
1869  * and things continue.  If a good mirror can't be found, the original
1870  * bio end_io callback is called to indicate things have failed.
1871  */
1872 struct io_failure_record {
1873 	struct page *page;
1874 	u64 start;
1875 	u64 len;
1876 	u64 logical;
1877 	unsigned long bio_flags;
1878 	int this_mirror;
1879 	int failed_mirror;
1880 	int in_validation;
1881 };
1882 
1883 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1884 				int did_repair)
1885 {
1886 	int ret;
1887 	int err = 0;
1888 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1889 
1890 	set_state_private(failure_tree, rec->start, 0);
1891 	ret = clear_extent_bits(failure_tree, rec->start,
1892 				rec->start + rec->len - 1,
1893 				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1894 	if (ret)
1895 		err = ret;
1896 
1897 	ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1898 				rec->start + rec->len - 1,
1899 				EXTENT_DAMAGED, GFP_NOFS);
1900 	if (ret && !err)
1901 		err = ret;
1902 
1903 	kfree(rec);
1904 	return err;
1905 }
1906 
1907 static void repair_io_failure_callback(struct bio *bio, int err)
1908 {
1909 	complete(bio->bi_private);
1910 }
1911 
1912 /*
1913  * this bypasses the standard btrfs submit functions deliberately, as
1914  * the standard behavior is to write all copies in a raid setup. here we only
1915  * want to write the one bad copy. so we do the mapping for ourselves and issue
1916  * submit_bio directly.
1917  * to avoid any synchronization issues, wait for the data after writing, which
1918  * actually prevents the read that triggered the error from finishing.
1919  * currently, there can be no more than two copies of every data bit. thus,
1920  * exactly one rewrite is required.
1921  */
1922 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1923 			u64 length, u64 logical, struct page *page,
1924 			int mirror_num)
1925 {
1926 	struct bio *bio;
1927 	struct btrfs_device *dev;
1928 	DECLARE_COMPLETION_ONSTACK(compl);
1929 	u64 map_length = 0;
1930 	u64 sector;
1931 	struct btrfs_bio *bbio = NULL;
1932 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1933 	int ret;
1934 
1935 	BUG_ON(!mirror_num);
1936 
1937 	/* we can't repair anything in raid56 yet */
1938 	if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
1939 		return 0;
1940 
1941 	bio = bio_alloc(GFP_NOFS, 1);
1942 	if (!bio)
1943 		return -EIO;
1944 	bio->bi_private = &compl;
1945 	bio->bi_end_io = repair_io_failure_callback;
1946 	bio->bi_size = 0;
1947 	map_length = length;
1948 
1949 	ret = btrfs_map_block(fs_info, WRITE, logical,
1950 			      &map_length, &bbio, mirror_num);
1951 	if (ret) {
1952 		bio_put(bio);
1953 		return -EIO;
1954 	}
1955 	BUG_ON(mirror_num != bbio->mirror_num);
1956 	sector = bbio->stripes[mirror_num-1].physical >> 9;
1957 	bio->bi_sector = sector;
1958 	dev = bbio->stripes[mirror_num-1].dev;
1959 	kfree(bbio);
1960 	if (!dev || !dev->bdev || !dev->writeable) {
1961 		bio_put(bio);
1962 		return -EIO;
1963 	}
1964 	bio->bi_bdev = dev->bdev;
1965 	bio_add_page(bio, page, length, start - page_offset(page));
1966 	btrfsic_submit_bio(WRITE_SYNC, bio);
1967 	wait_for_completion(&compl);
1968 
1969 	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1970 		/* try to remap that extent elsewhere? */
1971 		bio_put(bio);
1972 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
1973 		return -EIO;
1974 	}
1975 
1976 	printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1977 		      "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1978 		      start, rcu_str_deref(dev->name), sector);
1979 
1980 	bio_put(bio);
1981 	return 0;
1982 }
1983 
1984 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1985 			 int mirror_num)
1986 {
1987 	u64 start = eb->start;
1988 	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1989 	int ret = 0;
1990 
1991 	for (i = 0; i < num_pages; i++) {
1992 		struct page *p = extent_buffer_page(eb, i);
1993 		ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
1994 					start, p, mirror_num);
1995 		if (ret)
1996 			break;
1997 		start += PAGE_CACHE_SIZE;
1998 	}
1999 
2000 	return ret;
2001 }
2002 
2003 /*
2004  * each time an IO finishes, we do a fast check in the IO failure tree
2005  * to see if we need to process or clean up an io_failure_record
2006  */
2007 static int clean_io_failure(u64 start, struct page *page)
2008 {
2009 	u64 private;
2010 	u64 private_failure;
2011 	struct io_failure_record *failrec;
2012 	struct btrfs_fs_info *fs_info;
2013 	struct extent_state *state;
2014 	int num_copies;
2015 	int did_repair = 0;
2016 	int ret;
2017 	struct inode *inode = page->mapping->host;
2018 
2019 	private = 0;
2020 	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2021 				(u64)-1, 1, EXTENT_DIRTY, 0);
2022 	if (!ret)
2023 		return 0;
2024 
2025 	ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2026 				&private_failure);
2027 	if (ret)
2028 		return 0;
2029 
2030 	failrec = (struct io_failure_record *)(unsigned long) private_failure;
2031 	BUG_ON(!failrec->this_mirror);
2032 
2033 	if (failrec->in_validation) {
2034 		/* there was no real error, just free the record */
2035 		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2036 			 failrec->start);
2037 		did_repair = 1;
2038 		goto out;
2039 	}
2040 
2041 	spin_lock(&BTRFS_I(inode)->io_tree.lock);
2042 	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2043 					    failrec->start,
2044 					    EXTENT_LOCKED);
2045 	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2046 
2047 	if (state && state->start == failrec->start) {
2048 		fs_info = BTRFS_I(inode)->root->fs_info;
2049 		num_copies = btrfs_num_copies(fs_info, failrec->logical,
2050 					      failrec->len);
2051 		if (num_copies > 1)  {
2052 			ret = repair_io_failure(fs_info, start, failrec->len,
2053 						failrec->logical, page,
2054 						failrec->failed_mirror);
2055 			did_repair = !ret;
2056 		}
2057 		ret = 0;
2058 	}
2059 
2060 out:
2061 	if (!ret)
2062 		ret = free_io_failure(inode, failrec, did_repair);
2063 
2064 	return ret;
2065 }
2066 
2067 /*
2068  * this is a generic handler for readpage errors (default
2069  * readpage_io_failed_hook). if other copies exist, read those and write back
2070  * good data to the failed position. does not investigate in remapping the
2071  * failed extent elsewhere, hoping the device will be smart enough to do this as
2072  * needed
2073  */
2074 
2075 static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2076 				u64 start, u64 end, int failed_mirror,
2077 				struct extent_state *state)
2078 {
2079 	struct io_failure_record *failrec = NULL;
2080 	u64 private;
2081 	struct extent_map *em;
2082 	struct inode *inode = page->mapping->host;
2083 	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2084 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2085 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2086 	struct bio *bio;
2087 	int num_copies;
2088 	int ret;
2089 	int read_mode;
2090 	u64 logical;
2091 
2092 	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2093 
2094 	ret = get_state_private(failure_tree, start, &private);
2095 	if (ret) {
2096 		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2097 		if (!failrec)
2098 			return -ENOMEM;
2099 		failrec->start = start;
2100 		failrec->len = end - start + 1;
2101 		failrec->this_mirror = 0;
2102 		failrec->bio_flags = 0;
2103 		failrec->in_validation = 0;
2104 
2105 		read_lock(&em_tree->lock);
2106 		em = lookup_extent_mapping(em_tree, start, failrec->len);
2107 		if (!em) {
2108 			read_unlock(&em_tree->lock);
2109 			kfree(failrec);
2110 			return -EIO;
2111 		}
2112 
2113 		if (em->start > start || em->start + em->len < start) {
2114 			free_extent_map(em);
2115 			em = NULL;
2116 		}
2117 		read_unlock(&em_tree->lock);
2118 
2119 		if (!em) {
2120 			kfree(failrec);
2121 			return -EIO;
2122 		}
2123 		logical = start - em->start;
2124 		logical = em->block_start + logical;
2125 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2126 			logical = em->block_start;
2127 			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2128 			extent_set_compress_type(&failrec->bio_flags,
2129 						 em->compress_type);
2130 		}
2131 		pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2132 			 "len=%llu\n", logical, start, failrec->len);
2133 		failrec->logical = logical;
2134 		free_extent_map(em);
2135 
2136 		/* set the bits in the private failure tree */
2137 		ret = set_extent_bits(failure_tree, start, end,
2138 					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2139 		if (ret >= 0)
2140 			ret = set_state_private(failure_tree, start,
2141 						(u64)(unsigned long)failrec);
2142 		/* set the bits in the inode's tree */
2143 		if (ret >= 0)
2144 			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2145 						GFP_NOFS);
2146 		if (ret < 0) {
2147 			kfree(failrec);
2148 			return ret;
2149 		}
2150 	} else {
2151 		failrec = (struct io_failure_record *)(unsigned long)private;
2152 		pr_debug("bio_readpage_error: (found) logical=%llu, "
2153 			 "start=%llu, len=%llu, validation=%d\n",
2154 			 failrec->logical, failrec->start, failrec->len,
2155 			 failrec->in_validation);
2156 		/*
2157 		 * when data can be on disk more than twice, add to failrec here
2158 		 * (e.g. with a list for failed_mirror) to make
2159 		 * clean_io_failure() clean all those errors at once.
2160 		 */
2161 	}
2162 	num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2163 				      failrec->logical, failrec->len);
2164 	if (num_copies == 1) {
2165 		/*
2166 		 * we only have a single copy of the data, so don't bother with
2167 		 * all the retry and error correction code that follows. no
2168 		 * matter what the error is, it is very likely to persist.
2169 		 */
2170 		pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2171 			 "state=%p, num_copies=%d, next_mirror %d, "
2172 			 "failed_mirror %d\n", state, num_copies,
2173 			 failrec->this_mirror, failed_mirror);
2174 		free_io_failure(inode, failrec, 0);
2175 		return -EIO;
2176 	}
2177 
2178 	if (!state) {
2179 		spin_lock(&tree->lock);
2180 		state = find_first_extent_bit_state(tree, failrec->start,
2181 						    EXTENT_LOCKED);
2182 		if (state && state->start != failrec->start)
2183 			state = NULL;
2184 		spin_unlock(&tree->lock);
2185 	}
2186 
2187 	/*
2188 	 * there are two premises:
2189 	 *	a) deliver good data to the caller
2190 	 *	b) correct the bad sectors on disk
2191 	 */
2192 	if (failed_bio->bi_vcnt > 1) {
2193 		/*
2194 		 * to fulfill b), we need to know the exact failing sectors, as
2195 		 * we don't want to rewrite any more than the failed ones. thus,
2196 		 * we need separate read requests for the failed bio
2197 		 *
2198 		 * if the following BUG_ON triggers, our validation request got
2199 		 * merged. we need separate requests for our algorithm to work.
2200 		 */
2201 		BUG_ON(failrec->in_validation);
2202 		failrec->in_validation = 1;
2203 		failrec->this_mirror = failed_mirror;
2204 		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2205 	} else {
2206 		/*
2207 		 * we're ready to fulfill a) and b) alongside. get a good copy
2208 		 * of the failed sector and if we succeed, we have setup
2209 		 * everything for repair_io_failure to do the rest for us.
2210 		 */
2211 		if (failrec->in_validation) {
2212 			BUG_ON(failrec->this_mirror != failed_mirror);
2213 			failrec->in_validation = 0;
2214 			failrec->this_mirror = 0;
2215 		}
2216 		failrec->failed_mirror = failed_mirror;
2217 		failrec->this_mirror++;
2218 		if (failrec->this_mirror == failed_mirror)
2219 			failrec->this_mirror++;
2220 		read_mode = READ_SYNC;
2221 	}
2222 
2223 	if (!state || failrec->this_mirror > num_copies) {
2224 		pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2225 			 "next_mirror %d, failed_mirror %d\n", state,
2226 			 num_copies, failrec->this_mirror, failed_mirror);
2227 		free_io_failure(inode, failrec, 0);
2228 		return -EIO;
2229 	}
2230 
2231 	bio = bio_alloc(GFP_NOFS, 1);
2232 	if (!bio) {
2233 		free_io_failure(inode, failrec, 0);
2234 		return -EIO;
2235 	}
2236 	bio->bi_private = state;
2237 	bio->bi_end_io = failed_bio->bi_end_io;
2238 	bio->bi_sector = failrec->logical >> 9;
2239 	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2240 	bio->bi_size = 0;
2241 
2242 	bio_add_page(bio, page, failrec->len, start - page_offset(page));
2243 
2244 	pr_debug("bio_readpage_error: submitting new read[%#x] to "
2245 		 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2246 		 failrec->this_mirror, num_copies, failrec->in_validation);
2247 
2248 	ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2249 					 failrec->this_mirror,
2250 					 failrec->bio_flags, 0);
2251 	return ret;
2252 }
2253 
2254 /* lots and lots of room for performance fixes in the end_bio funcs */
2255 
2256 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2257 {
2258 	int uptodate = (err == 0);
2259 	struct extent_io_tree *tree;
2260 	int ret;
2261 
2262 	tree = &BTRFS_I(page->mapping->host)->io_tree;
2263 
2264 	if (tree->ops && tree->ops->writepage_end_io_hook) {
2265 		ret = tree->ops->writepage_end_io_hook(page, start,
2266 					       end, NULL, uptodate);
2267 		if (ret)
2268 			uptodate = 0;
2269 	}
2270 
2271 	if (!uptodate) {
2272 		ClearPageUptodate(page);
2273 		SetPageError(page);
2274 	}
2275 	return 0;
2276 }
2277 
2278 /*
2279  * after a writepage IO is done, we need to:
2280  * clear the uptodate bits on error
2281  * clear the writeback bits in the extent tree for this IO
2282  * end_page_writeback if the page has no more pending IO
2283  *
2284  * Scheduling is not allowed, so the extent state tree is expected
2285  * to have one and only one object corresponding to this IO.
2286  */
2287 static void end_bio_extent_writepage(struct bio *bio, int err)
2288 {
2289 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2290 	struct extent_io_tree *tree;
2291 	u64 start;
2292 	u64 end;
2293 	int whole_page;
2294 
2295 	do {
2296 		struct page *page = bvec->bv_page;
2297 		tree = &BTRFS_I(page->mapping->host)->io_tree;
2298 
2299 		start = page_offset(page) + bvec->bv_offset;
2300 		end = start + bvec->bv_len - 1;
2301 
2302 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2303 			whole_page = 1;
2304 		else
2305 			whole_page = 0;
2306 
2307 		if (--bvec >= bio->bi_io_vec)
2308 			prefetchw(&bvec->bv_page->flags);
2309 
2310 		if (end_extent_writepage(page, err, start, end))
2311 			continue;
2312 
2313 		if (whole_page)
2314 			end_page_writeback(page);
2315 		else
2316 			check_page_writeback(tree, page);
2317 	} while (bvec >= bio->bi_io_vec);
2318 
2319 	bio_put(bio);
2320 }
2321 
2322 /*
2323  * after a readpage IO is done, we need to:
2324  * clear the uptodate bits on error
2325  * set the uptodate bits if things worked
2326  * set the page up to date if all extents in the tree are uptodate
2327  * clear the lock bit in the extent tree
2328  * unlock the page if there are no other extents locked for it
2329  *
2330  * Scheduling is not allowed, so the extent state tree is expected
2331  * to have one and only one object corresponding to this IO.
2332  */
2333 static void end_bio_extent_readpage(struct bio *bio, int err)
2334 {
2335 	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2336 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2337 	struct bio_vec *bvec = bio->bi_io_vec;
2338 	struct extent_io_tree *tree;
2339 	u64 start;
2340 	u64 end;
2341 	int whole_page;
2342 	int mirror;
2343 	int ret;
2344 
2345 	if (err)
2346 		uptodate = 0;
2347 
2348 	do {
2349 		struct page *page = bvec->bv_page;
2350 		struct extent_state *cached = NULL;
2351 		struct extent_state *state;
2352 
2353 		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2354 			 "mirror=%ld\n", (u64)bio->bi_sector, err,
2355 			 (long int)bio->bi_bdev);
2356 		tree = &BTRFS_I(page->mapping->host)->io_tree;
2357 
2358 		start = page_offset(page) + bvec->bv_offset;
2359 		end = start + bvec->bv_len - 1;
2360 
2361 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2362 			whole_page = 1;
2363 		else
2364 			whole_page = 0;
2365 
2366 		if (++bvec <= bvec_end)
2367 			prefetchw(&bvec->bv_page->flags);
2368 
2369 		spin_lock(&tree->lock);
2370 		state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2371 		if (state && state->start == start) {
2372 			/*
2373 			 * take a reference on the state, unlock will drop
2374 			 * the ref
2375 			 */
2376 			cache_state(state, &cached);
2377 		}
2378 		spin_unlock(&tree->lock);
2379 
2380 		mirror = (int)(unsigned long)bio->bi_bdev;
2381 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2382 			ret = tree->ops->readpage_end_io_hook(page, start, end,
2383 							      state, mirror);
2384 			if (ret)
2385 				uptodate = 0;
2386 			else
2387 				clean_io_failure(start, page);
2388 		}
2389 
2390 		if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2391 			ret = tree->ops->readpage_io_failed_hook(page, mirror);
2392 			if (!ret && !err &&
2393 			    test_bit(BIO_UPTODATE, &bio->bi_flags))
2394 				uptodate = 1;
2395 		} else if (!uptodate) {
2396 			/*
2397 			 * The generic bio_readpage_error handles errors the
2398 			 * following way: If possible, new read requests are
2399 			 * created and submitted and will end up in
2400 			 * end_bio_extent_readpage as well (if we're lucky, not
2401 			 * in the !uptodate case). In that case it returns 0 and
2402 			 * we just go on with the next page in our bio. If it
2403 			 * can't handle the error it will return -EIO and we
2404 			 * remain responsible for that page.
2405 			 */
2406 			ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2407 			if (ret == 0) {
2408 				uptodate =
2409 					test_bit(BIO_UPTODATE, &bio->bi_flags);
2410 				if (err)
2411 					uptodate = 0;
2412 				uncache_state(&cached);
2413 				continue;
2414 			}
2415 		}
2416 
2417 		if (uptodate && tree->track_uptodate) {
2418 			set_extent_uptodate(tree, start, end, &cached,
2419 					    GFP_ATOMIC);
2420 		}
2421 		unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2422 
2423 		if (whole_page) {
2424 			if (uptodate) {
2425 				SetPageUptodate(page);
2426 			} else {
2427 				ClearPageUptodate(page);
2428 				SetPageError(page);
2429 			}
2430 			unlock_page(page);
2431 		} else {
2432 			if (uptodate) {
2433 				check_page_uptodate(tree, page);
2434 			} else {
2435 				ClearPageUptodate(page);
2436 				SetPageError(page);
2437 			}
2438 			check_page_locked(tree, page);
2439 		}
2440 	} while (bvec <= bvec_end);
2441 
2442 	bio_put(bio);
2443 }
2444 
2445 struct bio *
2446 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2447 		gfp_t gfp_flags)
2448 {
2449 	struct bio *bio;
2450 
2451 	bio = bio_alloc(gfp_flags, nr_vecs);
2452 
2453 	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2454 		while (!bio && (nr_vecs /= 2))
2455 			bio = bio_alloc(gfp_flags, nr_vecs);
2456 	}
2457 
2458 	if (bio) {
2459 		bio->bi_size = 0;
2460 		bio->bi_bdev = bdev;
2461 		bio->bi_sector = first_sector;
2462 	}
2463 	return bio;
2464 }
2465 
2466 static int __must_check submit_one_bio(int rw, struct bio *bio,
2467 				       int mirror_num, unsigned long bio_flags)
2468 {
2469 	int ret = 0;
2470 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2471 	struct page *page = bvec->bv_page;
2472 	struct extent_io_tree *tree = bio->bi_private;
2473 	u64 start;
2474 
2475 	start = page_offset(page) + bvec->bv_offset;
2476 
2477 	bio->bi_private = NULL;
2478 
2479 	bio_get(bio);
2480 
2481 	if (tree->ops && tree->ops->submit_bio_hook)
2482 		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2483 					   mirror_num, bio_flags, start);
2484 	else
2485 		btrfsic_submit_bio(rw, bio);
2486 
2487 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2488 		ret = -EOPNOTSUPP;
2489 	bio_put(bio);
2490 	return ret;
2491 }
2492 
2493 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2494 		     unsigned long offset, size_t size, struct bio *bio,
2495 		     unsigned long bio_flags)
2496 {
2497 	int ret = 0;
2498 	if (tree->ops && tree->ops->merge_bio_hook)
2499 		ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2500 						bio_flags);
2501 	BUG_ON(ret < 0);
2502 	return ret;
2503 
2504 }
2505 
2506 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2507 			      struct page *page, sector_t sector,
2508 			      size_t size, unsigned long offset,
2509 			      struct block_device *bdev,
2510 			      struct bio **bio_ret,
2511 			      unsigned long max_pages,
2512 			      bio_end_io_t end_io_func,
2513 			      int mirror_num,
2514 			      unsigned long prev_bio_flags,
2515 			      unsigned long bio_flags)
2516 {
2517 	int ret = 0;
2518 	struct bio *bio;
2519 	int nr;
2520 	int contig = 0;
2521 	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2522 	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2523 	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2524 
2525 	if (bio_ret && *bio_ret) {
2526 		bio = *bio_ret;
2527 		if (old_compressed)
2528 			contig = bio->bi_sector == sector;
2529 		else
2530 			contig = bio->bi_sector + (bio->bi_size >> 9) ==
2531 				sector;
2532 
2533 		if (prev_bio_flags != bio_flags || !contig ||
2534 		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2535 		    bio_add_page(bio, page, page_size, offset) < page_size) {
2536 			ret = submit_one_bio(rw, bio, mirror_num,
2537 					     prev_bio_flags);
2538 			if (ret < 0)
2539 				return ret;
2540 			bio = NULL;
2541 		} else {
2542 			return 0;
2543 		}
2544 	}
2545 	if (this_compressed)
2546 		nr = BIO_MAX_PAGES;
2547 	else
2548 		nr = bio_get_nr_vecs(bdev);
2549 
2550 	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2551 	if (!bio)
2552 		return -ENOMEM;
2553 
2554 	bio_add_page(bio, page, page_size, offset);
2555 	bio->bi_end_io = end_io_func;
2556 	bio->bi_private = tree;
2557 
2558 	if (bio_ret)
2559 		*bio_ret = bio;
2560 	else
2561 		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2562 
2563 	return ret;
2564 }
2565 
2566 void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2567 {
2568 	if (!PagePrivate(page)) {
2569 		SetPagePrivate(page);
2570 		page_cache_get(page);
2571 		set_page_private(page, (unsigned long)eb);
2572 	} else {
2573 		WARN_ON(page->private != (unsigned long)eb);
2574 	}
2575 }
2576 
2577 void set_page_extent_mapped(struct page *page)
2578 {
2579 	if (!PagePrivate(page)) {
2580 		SetPagePrivate(page);
2581 		page_cache_get(page);
2582 		set_page_private(page, EXTENT_PAGE_PRIVATE);
2583 	}
2584 }
2585 
2586 /*
2587  * basic readpage implementation.  Locked extent state structs are inserted
2588  * into the tree that are removed when the IO is done (by the end_io
2589  * handlers)
2590  * XXX JDM: This needs looking at to ensure proper page locking
2591  */
2592 static int __extent_read_full_page(struct extent_io_tree *tree,
2593 				   struct page *page,
2594 				   get_extent_t *get_extent,
2595 				   struct bio **bio, int mirror_num,
2596 				   unsigned long *bio_flags)
2597 {
2598 	struct inode *inode = page->mapping->host;
2599 	u64 start = page_offset(page);
2600 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2601 	u64 end;
2602 	u64 cur = start;
2603 	u64 extent_offset;
2604 	u64 last_byte = i_size_read(inode);
2605 	u64 block_start;
2606 	u64 cur_end;
2607 	sector_t sector;
2608 	struct extent_map *em;
2609 	struct block_device *bdev;
2610 	struct btrfs_ordered_extent *ordered;
2611 	int ret;
2612 	int nr = 0;
2613 	size_t pg_offset = 0;
2614 	size_t iosize;
2615 	size_t disk_io_size;
2616 	size_t blocksize = inode->i_sb->s_blocksize;
2617 	unsigned long this_bio_flag = 0;
2618 
2619 	set_page_extent_mapped(page);
2620 
2621 	if (!PageUptodate(page)) {
2622 		if (cleancache_get_page(page) == 0) {
2623 			BUG_ON(blocksize != PAGE_SIZE);
2624 			goto out;
2625 		}
2626 	}
2627 
2628 	end = page_end;
2629 	while (1) {
2630 		lock_extent(tree, start, end);
2631 		ordered = btrfs_lookup_ordered_extent(inode, start);
2632 		if (!ordered)
2633 			break;
2634 		unlock_extent(tree, start, end);
2635 		btrfs_start_ordered_extent(inode, ordered, 1);
2636 		btrfs_put_ordered_extent(ordered);
2637 	}
2638 
2639 	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2640 		char *userpage;
2641 		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2642 
2643 		if (zero_offset) {
2644 			iosize = PAGE_CACHE_SIZE - zero_offset;
2645 			userpage = kmap_atomic(page);
2646 			memset(userpage + zero_offset, 0, iosize);
2647 			flush_dcache_page(page);
2648 			kunmap_atomic(userpage);
2649 		}
2650 	}
2651 	while (cur <= end) {
2652 		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2653 
2654 		if (cur >= last_byte) {
2655 			char *userpage;
2656 			struct extent_state *cached = NULL;
2657 
2658 			iosize = PAGE_CACHE_SIZE - pg_offset;
2659 			userpage = kmap_atomic(page);
2660 			memset(userpage + pg_offset, 0, iosize);
2661 			flush_dcache_page(page);
2662 			kunmap_atomic(userpage);
2663 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2664 					    &cached, GFP_NOFS);
2665 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2666 					     &cached, GFP_NOFS);
2667 			break;
2668 		}
2669 		em = get_extent(inode, page, pg_offset, cur,
2670 				end - cur + 1, 0);
2671 		if (IS_ERR_OR_NULL(em)) {
2672 			SetPageError(page);
2673 			unlock_extent(tree, cur, end);
2674 			break;
2675 		}
2676 		extent_offset = cur - em->start;
2677 		BUG_ON(extent_map_end(em) <= cur);
2678 		BUG_ON(end < cur);
2679 
2680 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2681 			this_bio_flag = EXTENT_BIO_COMPRESSED;
2682 			extent_set_compress_type(&this_bio_flag,
2683 						 em->compress_type);
2684 		}
2685 
2686 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2687 		cur_end = min(extent_map_end(em) - 1, end);
2688 		iosize = ALIGN(iosize, blocksize);
2689 		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2690 			disk_io_size = em->block_len;
2691 			sector = em->block_start >> 9;
2692 		} else {
2693 			sector = (em->block_start + extent_offset) >> 9;
2694 			disk_io_size = iosize;
2695 		}
2696 		bdev = em->bdev;
2697 		block_start = em->block_start;
2698 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2699 			block_start = EXTENT_MAP_HOLE;
2700 		free_extent_map(em);
2701 		em = NULL;
2702 
2703 		/* we've found a hole, just zero and go on */
2704 		if (block_start == EXTENT_MAP_HOLE) {
2705 			char *userpage;
2706 			struct extent_state *cached = NULL;
2707 
2708 			userpage = kmap_atomic(page);
2709 			memset(userpage + pg_offset, 0, iosize);
2710 			flush_dcache_page(page);
2711 			kunmap_atomic(userpage);
2712 
2713 			set_extent_uptodate(tree, cur, cur + iosize - 1,
2714 					    &cached, GFP_NOFS);
2715 			unlock_extent_cached(tree, cur, cur + iosize - 1,
2716 			                     &cached, GFP_NOFS);
2717 			cur = cur + iosize;
2718 			pg_offset += iosize;
2719 			continue;
2720 		}
2721 		/* the get_extent function already copied into the page */
2722 		if (test_range_bit(tree, cur, cur_end,
2723 				   EXTENT_UPTODATE, 1, NULL)) {
2724 			check_page_uptodate(tree, page);
2725 			unlock_extent(tree, cur, cur + iosize - 1);
2726 			cur = cur + iosize;
2727 			pg_offset += iosize;
2728 			continue;
2729 		}
2730 		/* we have an inline extent but it didn't get marked up
2731 		 * to date.  Error out
2732 		 */
2733 		if (block_start == EXTENT_MAP_INLINE) {
2734 			SetPageError(page);
2735 			unlock_extent(tree, cur, cur + iosize - 1);
2736 			cur = cur + iosize;
2737 			pg_offset += iosize;
2738 			continue;
2739 		}
2740 
2741 		pnr -= page->index;
2742 		ret = submit_extent_page(READ, tree, page,
2743 					 sector, disk_io_size, pg_offset,
2744 					 bdev, bio, pnr,
2745 					 end_bio_extent_readpage, mirror_num,
2746 					 *bio_flags,
2747 					 this_bio_flag);
2748 		if (!ret) {
2749 			nr++;
2750 			*bio_flags = this_bio_flag;
2751 		} else {
2752 			SetPageError(page);
2753 			unlock_extent(tree, cur, cur + iosize - 1);
2754 		}
2755 		cur = cur + iosize;
2756 		pg_offset += iosize;
2757 	}
2758 out:
2759 	if (!nr) {
2760 		if (!PageError(page))
2761 			SetPageUptodate(page);
2762 		unlock_page(page);
2763 	}
2764 	return 0;
2765 }
2766 
2767 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2768 			    get_extent_t *get_extent, int mirror_num)
2769 {
2770 	struct bio *bio = NULL;
2771 	unsigned long bio_flags = 0;
2772 	int ret;
2773 
2774 	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2775 				      &bio_flags);
2776 	if (bio)
2777 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2778 	return ret;
2779 }
2780 
2781 static noinline void update_nr_written(struct page *page,
2782 				      struct writeback_control *wbc,
2783 				      unsigned long nr_written)
2784 {
2785 	wbc->nr_to_write -= nr_written;
2786 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2787 	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2788 		page->mapping->writeback_index = page->index + nr_written;
2789 }
2790 
2791 /*
2792  * the writepage semantics are similar to regular writepage.  extent
2793  * records are inserted to lock ranges in the tree, and as dirty areas
2794  * are found, they are marked writeback.  Then the lock bits are removed
2795  * and the end_io handler clears the writeback ranges
2796  */
2797 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2798 			      void *data)
2799 {
2800 	struct inode *inode = page->mapping->host;
2801 	struct extent_page_data *epd = data;
2802 	struct extent_io_tree *tree = epd->tree;
2803 	u64 start = page_offset(page);
2804 	u64 delalloc_start;
2805 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
2806 	u64 end;
2807 	u64 cur = start;
2808 	u64 extent_offset;
2809 	u64 last_byte = i_size_read(inode);
2810 	u64 block_start;
2811 	u64 iosize;
2812 	sector_t sector;
2813 	struct extent_state *cached_state = NULL;
2814 	struct extent_map *em;
2815 	struct block_device *bdev;
2816 	int ret;
2817 	int nr = 0;
2818 	size_t pg_offset = 0;
2819 	size_t blocksize;
2820 	loff_t i_size = i_size_read(inode);
2821 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2822 	u64 nr_delalloc;
2823 	u64 delalloc_end;
2824 	int page_started;
2825 	int compressed;
2826 	int write_flags;
2827 	unsigned long nr_written = 0;
2828 	bool fill_delalloc = true;
2829 
2830 	if (wbc->sync_mode == WB_SYNC_ALL)
2831 		write_flags = WRITE_SYNC;
2832 	else
2833 		write_flags = WRITE;
2834 
2835 	trace___extent_writepage(page, inode, wbc);
2836 
2837 	WARN_ON(!PageLocked(page));
2838 
2839 	ClearPageError(page);
2840 
2841 	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2842 	if (page->index > end_index ||
2843 	   (page->index == end_index && !pg_offset)) {
2844 		page->mapping->a_ops->invalidatepage(page, 0);
2845 		unlock_page(page);
2846 		return 0;
2847 	}
2848 
2849 	if (page->index == end_index) {
2850 		char *userpage;
2851 
2852 		userpage = kmap_atomic(page);
2853 		memset(userpage + pg_offset, 0,
2854 		       PAGE_CACHE_SIZE - pg_offset);
2855 		kunmap_atomic(userpage);
2856 		flush_dcache_page(page);
2857 	}
2858 	pg_offset = 0;
2859 
2860 	set_page_extent_mapped(page);
2861 
2862 	if (!tree->ops || !tree->ops->fill_delalloc)
2863 		fill_delalloc = false;
2864 
2865 	delalloc_start = start;
2866 	delalloc_end = 0;
2867 	page_started = 0;
2868 	if (!epd->extent_locked && fill_delalloc) {
2869 		u64 delalloc_to_write = 0;
2870 		/*
2871 		 * make sure the wbc mapping index is at least updated
2872 		 * to this page.
2873 		 */
2874 		update_nr_written(page, wbc, 0);
2875 
2876 		while (delalloc_end < page_end) {
2877 			nr_delalloc = find_lock_delalloc_range(inode, tree,
2878 						       page,
2879 						       &delalloc_start,
2880 						       &delalloc_end,
2881 						       128 * 1024 * 1024);
2882 			if (nr_delalloc == 0) {
2883 				delalloc_start = delalloc_end + 1;
2884 				continue;
2885 			}
2886 			ret = tree->ops->fill_delalloc(inode, page,
2887 						       delalloc_start,
2888 						       delalloc_end,
2889 						       &page_started,
2890 						       &nr_written);
2891 			/* File system has been set read-only */
2892 			if (ret) {
2893 				SetPageError(page);
2894 				goto done;
2895 			}
2896 			/*
2897 			 * delalloc_end is already one less than the total
2898 			 * length, so we don't subtract one from
2899 			 * PAGE_CACHE_SIZE
2900 			 */
2901 			delalloc_to_write += (delalloc_end - delalloc_start +
2902 					      PAGE_CACHE_SIZE) >>
2903 					      PAGE_CACHE_SHIFT;
2904 			delalloc_start = delalloc_end + 1;
2905 		}
2906 		if (wbc->nr_to_write < delalloc_to_write) {
2907 			int thresh = 8192;
2908 
2909 			if (delalloc_to_write < thresh * 2)
2910 				thresh = delalloc_to_write;
2911 			wbc->nr_to_write = min_t(u64, delalloc_to_write,
2912 						 thresh);
2913 		}
2914 
2915 		/* did the fill delalloc function already unlock and start
2916 		 * the IO?
2917 		 */
2918 		if (page_started) {
2919 			ret = 0;
2920 			/*
2921 			 * we've unlocked the page, so we can't update
2922 			 * the mapping's writeback index, just update
2923 			 * nr_to_write.
2924 			 */
2925 			wbc->nr_to_write -= nr_written;
2926 			goto done_unlocked;
2927 		}
2928 	}
2929 	if (tree->ops && tree->ops->writepage_start_hook) {
2930 		ret = tree->ops->writepage_start_hook(page, start,
2931 						      page_end);
2932 		if (ret) {
2933 			/* Fixup worker will requeue */
2934 			if (ret == -EBUSY)
2935 				wbc->pages_skipped++;
2936 			else
2937 				redirty_page_for_writepage(wbc, page);
2938 			update_nr_written(page, wbc, nr_written);
2939 			unlock_page(page);
2940 			ret = 0;
2941 			goto done_unlocked;
2942 		}
2943 	}
2944 
2945 	/*
2946 	 * we don't want to touch the inode after unlocking the page,
2947 	 * so we update the mapping writeback index now
2948 	 */
2949 	update_nr_written(page, wbc, nr_written + 1);
2950 
2951 	end = page_end;
2952 	if (last_byte <= start) {
2953 		if (tree->ops && tree->ops->writepage_end_io_hook)
2954 			tree->ops->writepage_end_io_hook(page, start,
2955 							 page_end, NULL, 1);
2956 		goto done;
2957 	}
2958 
2959 	blocksize = inode->i_sb->s_blocksize;
2960 
2961 	while (cur <= end) {
2962 		if (cur >= last_byte) {
2963 			if (tree->ops && tree->ops->writepage_end_io_hook)
2964 				tree->ops->writepage_end_io_hook(page, cur,
2965 							 page_end, NULL, 1);
2966 			break;
2967 		}
2968 		em = epd->get_extent(inode, page, pg_offset, cur,
2969 				     end - cur + 1, 1);
2970 		if (IS_ERR_OR_NULL(em)) {
2971 			SetPageError(page);
2972 			break;
2973 		}
2974 
2975 		extent_offset = cur - em->start;
2976 		BUG_ON(extent_map_end(em) <= cur);
2977 		BUG_ON(end < cur);
2978 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
2979 		iosize = ALIGN(iosize, blocksize);
2980 		sector = (em->block_start + extent_offset) >> 9;
2981 		bdev = em->bdev;
2982 		block_start = em->block_start;
2983 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2984 		free_extent_map(em);
2985 		em = NULL;
2986 
2987 		/*
2988 		 * compressed and inline extents are written through other
2989 		 * paths in the FS
2990 		 */
2991 		if (compressed || block_start == EXTENT_MAP_HOLE ||
2992 		    block_start == EXTENT_MAP_INLINE) {
2993 			/*
2994 			 * end_io notification does not happen here for
2995 			 * compressed extents
2996 			 */
2997 			if (!compressed && tree->ops &&
2998 			    tree->ops->writepage_end_io_hook)
2999 				tree->ops->writepage_end_io_hook(page, cur,
3000 							 cur + iosize - 1,
3001 							 NULL, 1);
3002 			else if (compressed) {
3003 				/* we don't want to end_page_writeback on
3004 				 * a compressed extent.  this happens
3005 				 * elsewhere
3006 				 */
3007 				nr++;
3008 			}
3009 
3010 			cur += iosize;
3011 			pg_offset += iosize;
3012 			continue;
3013 		}
3014 		/* leave this out until we have a page_mkwrite call */
3015 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3016 				   EXTENT_DIRTY, 0, NULL)) {
3017 			cur = cur + iosize;
3018 			pg_offset += iosize;
3019 			continue;
3020 		}
3021 
3022 		if (tree->ops && tree->ops->writepage_io_hook) {
3023 			ret = tree->ops->writepage_io_hook(page, cur,
3024 						cur + iosize - 1);
3025 		} else {
3026 			ret = 0;
3027 		}
3028 		if (ret) {
3029 			SetPageError(page);
3030 		} else {
3031 			unsigned long max_nr = end_index + 1;
3032 
3033 			set_range_writeback(tree, cur, cur + iosize - 1);
3034 			if (!PageWriteback(page)) {
3035 				printk(KERN_ERR "btrfs warning page %lu not "
3036 				       "writeback, cur %llu end %llu\n",
3037 				       page->index, (unsigned long long)cur,
3038 				       (unsigned long long)end);
3039 			}
3040 
3041 			ret = submit_extent_page(write_flags, tree, page,
3042 						 sector, iosize, pg_offset,
3043 						 bdev, &epd->bio, max_nr,
3044 						 end_bio_extent_writepage,
3045 						 0, 0, 0);
3046 			if (ret)
3047 				SetPageError(page);
3048 		}
3049 		cur = cur + iosize;
3050 		pg_offset += iosize;
3051 		nr++;
3052 	}
3053 done:
3054 	if (nr == 0) {
3055 		/* make sure the mapping tag for page dirty gets cleared */
3056 		set_page_writeback(page);
3057 		end_page_writeback(page);
3058 	}
3059 	unlock_page(page);
3060 
3061 done_unlocked:
3062 
3063 	/* drop our reference on any cached states */
3064 	free_extent_state(cached_state);
3065 	return 0;
3066 }
3067 
3068 static int eb_wait(void *word)
3069 {
3070 	io_schedule();
3071 	return 0;
3072 }
3073 
3074 static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3075 {
3076 	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3077 		    TASK_UNINTERRUPTIBLE);
3078 }
3079 
3080 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3081 				     struct btrfs_fs_info *fs_info,
3082 				     struct extent_page_data *epd)
3083 {
3084 	unsigned long i, num_pages;
3085 	int flush = 0;
3086 	int ret = 0;
3087 
3088 	if (!btrfs_try_tree_write_lock(eb)) {
3089 		flush = 1;
3090 		flush_write_bio(epd);
3091 		btrfs_tree_lock(eb);
3092 	}
3093 
3094 	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3095 		btrfs_tree_unlock(eb);
3096 		if (!epd->sync_io)
3097 			return 0;
3098 		if (!flush) {
3099 			flush_write_bio(epd);
3100 			flush = 1;
3101 		}
3102 		while (1) {
3103 			wait_on_extent_buffer_writeback(eb);
3104 			btrfs_tree_lock(eb);
3105 			if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3106 				break;
3107 			btrfs_tree_unlock(eb);
3108 		}
3109 	}
3110 
3111 	/*
3112 	 * We need to do this to prevent races in people who check if the eb is
3113 	 * under IO since we can end up having no IO bits set for a short period
3114 	 * of time.
3115 	 */
3116 	spin_lock(&eb->refs_lock);
3117 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3118 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3119 		spin_unlock(&eb->refs_lock);
3120 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3121 		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
3122 				     -eb->len,
3123 				     fs_info->dirty_metadata_batch);
3124 		ret = 1;
3125 	} else {
3126 		spin_unlock(&eb->refs_lock);
3127 	}
3128 
3129 	btrfs_tree_unlock(eb);
3130 
3131 	if (!ret)
3132 		return ret;
3133 
3134 	num_pages = num_extent_pages(eb->start, eb->len);
3135 	for (i = 0; i < num_pages; i++) {
3136 		struct page *p = extent_buffer_page(eb, i);
3137 
3138 		if (!trylock_page(p)) {
3139 			if (!flush) {
3140 				flush_write_bio(epd);
3141 				flush = 1;
3142 			}
3143 			lock_page(p);
3144 		}
3145 	}
3146 
3147 	return ret;
3148 }
3149 
3150 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3151 {
3152 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3153 	smp_mb__after_clear_bit();
3154 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3155 }
3156 
3157 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3158 {
3159 	int uptodate = err == 0;
3160 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3161 	struct extent_buffer *eb;
3162 	int done;
3163 
3164 	do {
3165 		struct page *page = bvec->bv_page;
3166 
3167 		bvec--;
3168 		eb = (struct extent_buffer *)page->private;
3169 		BUG_ON(!eb);
3170 		done = atomic_dec_and_test(&eb->io_pages);
3171 
3172 		if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3173 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3174 			ClearPageUptodate(page);
3175 			SetPageError(page);
3176 		}
3177 
3178 		end_page_writeback(page);
3179 
3180 		if (!done)
3181 			continue;
3182 
3183 		end_extent_buffer_writeback(eb);
3184 	} while (bvec >= bio->bi_io_vec);
3185 
3186 	bio_put(bio);
3187 
3188 }
3189 
3190 static int write_one_eb(struct extent_buffer *eb,
3191 			struct btrfs_fs_info *fs_info,
3192 			struct writeback_control *wbc,
3193 			struct extent_page_data *epd)
3194 {
3195 	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3196 	u64 offset = eb->start;
3197 	unsigned long i, num_pages;
3198 	unsigned long bio_flags = 0;
3199 	int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3200 	int ret = 0;
3201 
3202 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3203 	num_pages = num_extent_pages(eb->start, eb->len);
3204 	atomic_set(&eb->io_pages, num_pages);
3205 	if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3206 		bio_flags = EXTENT_BIO_TREE_LOG;
3207 
3208 	for (i = 0; i < num_pages; i++) {
3209 		struct page *p = extent_buffer_page(eb, i);
3210 
3211 		clear_page_dirty_for_io(p);
3212 		set_page_writeback(p);
3213 		ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3214 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3215 					 -1, end_bio_extent_buffer_writepage,
3216 					 0, epd->bio_flags, bio_flags);
3217 		epd->bio_flags = bio_flags;
3218 		if (ret) {
3219 			set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3220 			SetPageError(p);
3221 			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3222 				end_extent_buffer_writeback(eb);
3223 			ret = -EIO;
3224 			break;
3225 		}
3226 		offset += PAGE_CACHE_SIZE;
3227 		update_nr_written(p, wbc, 1);
3228 		unlock_page(p);
3229 	}
3230 
3231 	if (unlikely(ret)) {
3232 		for (; i < num_pages; i++) {
3233 			struct page *p = extent_buffer_page(eb, i);
3234 			unlock_page(p);
3235 		}
3236 	}
3237 
3238 	return ret;
3239 }
3240 
3241 int btree_write_cache_pages(struct address_space *mapping,
3242 				   struct writeback_control *wbc)
3243 {
3244 	struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3245 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3246 	struct extent_buffer *eb, *prev_eb = NULL;
3247 	struct extent_page_data epd = {
3248 		.bio = NULL,
3249 		.tree = tree,
3250 		.extent_locked = 0,
3251 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3252 		.bio_flags = 0,
3253 	};
3254 	int ret = 0;
3255 	int done = 0;
3256 	int nr_to_write_done = 0;
3257 	struct pagevec pvec;
3258 	int nr_pages;
3259 	pgoff_t index;
3260 	pgoff_t end;		/* Inclusive */
3261 	int scanned = 0;
3262 	int tag;
3263 
3264 	pagevec_init(&pvec, 0);
3265 	if (wbc->range_cyclic) {
3266 		index = mapping->writeback_index; /* Start from prev offset */
3267 		end = -1;
3268 	} else {
3269 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3270 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3271 		scanned = 1;
3272 	}
3273 	if (wbc->sync_mode == WB_SYNC_ALL)
3274 		tag = PAGECACHE_TAG_TOWRITE;
3275 	else
3276 		tag = PAGECACHE_TAG_DIRTY;
3277 retry:
3278 	if (wbc->sync_mode == WB_SYNC_ALL)
3279 		tag_pages_for_writeback(mapping, index, end);
3280 	while (!done && !nr_to_write_done && (index <= end) &&
3281 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3282 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3283 		unsigned i;
3284 
3285 		scanned = 1;
3286 		for (i = 0; i < nr_pages; i++) {
3287 			struct page *page = pvec.pages[i];
3288 
3289 			if (!PagePrivate(page))
3290 				continue;
3291 
3292 			if (!wbc->range_cyclic && page->index > end) {
3293 				done = 1;
3294 				break;
3295 			}
3296 
3297 			spin_lock(&mapping->private_lock);
3298 			if (!PagePrivate(page)) {
3299 				spin_unlock(&mapping->private_lock);
3300 				continue;
3301 			}
3302 
3303 			eb = (struct extent_buffer *)page->private;
3304 
3305 			/*
3306 			 * Shouldn't happen and normally this would be a BUG_ON
3307 			 * but no sense in crashing the users box for something
3308 			 * we can survive anyway.
3309 			 */
3310 			if (!eb) {
3311 				spin_unlock(&mapping->private_lock);
3312 				WARN_ON(1);
3313 				continue;
3314 			}
3315 
3316 			if (eb == prev_eb) {
3317 				spin_unlock(&mapping->private_lock);
3318 				continue;
3319 			}
3320 
3321 			ret = atomic_inc_not_zero(&eb->refs);
3322 			spin_unlock(&mapping->private_lock);
3323 			if (!ret)
3324 				continue;
3325 
3326 			prev_eb = eb;
3327 			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3328 			if (!ret) {
3329 				free_extent_buffer(eb);
3330 				continue;
3331 			}
3332 
3333 			ret = write_one_eb(eb, fs_info, wbc, &epd);
3334 			if (ret) {
3335 				done = 1;
3336 				free_extent_buffer(eb);
3337 				break;
3338 			}
3339 			free_extent_buffer(eb);
3340 
3341 			/*
3342 			 * the filesystem may choose to bump up nr_to_write.
3343 			 * We have to make sure to honor the new nr_to_write
3344 			 * at any time
3345 			 */
3346 			nr_to_write_done = wbc->nr_to_write <= 0;
3347 		}
3348 		pagevec_release(&pvec);
3349 		cond_resched();
3350 	}
3351 	if (!scanned && !done) {
3352 		/*
3353 		 * We hit the last page and there is more work to be done: wrap
3354 		 * back to the start of the file
3355 		 */
3356 		scanned = 1;
3357 		index = 0;
3358 		goto retry;
3359 	}
3360 	flush_write_bio(&epd);
3361 	return ret;
3362 }
3363 
3364 /**
3365  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3366  * @mapping: address space structure to write
3367  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3368  * @writepage: function called for each page
3369  * @data: data passed to writepage function
3370  *
3371  * If a page is already under I/O, write_cache_pages() skips it, even
3372  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3373  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3374  * and msync() need to guarantee that all the data which was dirty at the time
3375  * the call was made get new I/O started against them.  If wbc->sync_mode is
3376  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3377  * existing IO to complete.
3378  */
3379 static int extent_write_cache_pages(struct extent_io_tree *tree,
3380 			     struct address_space *mapping,
3381 			     struct writeback_control *wbc,
3382 			     writepage_t writepage, void *data,
3383 			     void (*flush_fn)(void *))
3384 {
3385 	struct inode *inode = mapping->host;
3386 	int ret = 0;
3387 	int done = 0;
3388 	int nr_to_write_done = 0;
3389 	struct pagevec pvec;
3390 	int nr_pages;
3391 	pgoff_t index;
3392 	pgoff_t end;		/* Inclusive */
3393 	int scanned = 0;
3394 	int tag;
3395 
3396 	/*
3397 	 * We have to hold onto the inode so that ordered extents can do their
3398 	 * work when the IO finishes.  The alternative to this is failing to add
3399 	 * an ordered extent if the igrab() fails there and that is a huge pain
3400 	 * to deal with, so instead just hold onto the inode throughout the
3401 	 * writepages operation.  If it fails here we are freeing up the inode
3402 	 * anyway and we'd rather not waste our time writing out stuff that is
3403 	 * going to be truncated anyway.
3404 	 */
3405 	if (!igrab(inode))
3406 		return 0;
3407 
3408 	pagevec_init(&pvec, 0);
3409 	if (wbc->range_cyclic) {
3410 		index = mapping->writeback_index; /* Start from prev offset */
3411 		end = -1;
3412 	} else {
3413 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
3414 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
3415 		scanned = 1;
3416 	}
3417 	if (wbc->sync_mode == WB_SYNC_ALL)
3418 		tag = PAGECACHE_TAG_TOWRITE;
3419 	else
3420 		tag = PAGECACHE_TAG_DIRTY;
3421 retry:
3422 	if (wbc->sync_mode == WB_SYNC_ALL)
3423 		tag_pages_for_writeback(mapping, index, end);
3424 	while (!done && !nr_to_write_done && (index <= end) &&
3425 	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3426 			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3427 		unsigned i;
3428 
3429 		scanned = 1;
3430 		for (i = 0; i < nr_pages; i++) {
3431 			struct page *page = pvec.pages[i];
3432 
3433 			/*
3434 			 * At this point we hold neither mapping->tree_lock nor
3435 			 * lock on the page itself: the page may be truncated or
3436 			 * invalidated (changing page->mapping to NULL), or even
3437 			 * swizzled back from swapper_space to tmpfs file
3438 			 * mapping
3439 			 */
3440 			if (!trylock_page(page)) {
3441 				flush_fn(data);
3442 				lock_page(page);
3443 			}
3444 
3445 			if (unlikely(page->mapping != mapping)) {
3446 				unlock_page(page);
3447 				continue;
3448 			}
3449 
3450 			if (!wbc->range_cyclic && page->index > end) {
3451 				done = 1;
3452 				unlock_page(page);
3453 				continue;
3454 			}
3455 
3456 			if (wbc->sync_mode != WB_SYNC_NONE) {
3457 				if (PageWriteback(page))
3458 					flush_fn(data);
3459 				wait_on_page_writeback(page);
3460 			}
3461 
3462 			if (PageWriteback(page) ||
3463 			    !clear_page_dirty_for_io(page)) {
3464 				unlock_page(page);
3465 				continue;
3466 			}
3467 
3468 			ret = (*writepage)(page, wbc, data);
3469 
3470 			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3471 				unlock_page(page);
3472 				ret = 0;
3473 			}
3474 			if (ret)
3475 				done = 1;
3476 
3477 			/*
3478 			 * the filesystem may choose to bump up nr_to_write.
3479 			 * We have to make sure to honor the new nr_to_write
3480 			 * at any time
3481 			 */
3482 			nr_to_write_done = wbc->nr_to_write <= 0;
3483 		}
3484 		pagevec_release(&pvec);
3485 		cond_resched();
3486 	}
3487 	if (!scanned && !done) {
3488 		/*
3489 		 * We hit the last page and there is more work to be done: wrap
3490 		 * back to the start of the file
3491 		 */
3492 		scanned = 1;
3493 		index = 0;
3494 		goto retry;
3495 	}
3496 	btrfs_add_delayed_iput(inode);
3497 	return ret;
3498 }
3499 
3500 static void flush_epd_write_bio(struct extent_page_data *epd)
3501 {
3502 	if (epd->bio) {
3503 		int rw = WRITE;
3504 		int ret;
3505 
3506 		if (epd->sync_io)
3507 			rw = WRITE_SYNC;
3508 
3509 		ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3510 		BUG_ON(ret < 0); /* -ENOMEM */
3511 		epd->bio = NULL;
3512 	}
3513 }
3514 
3515 static noinline void flush_write_bio(void *data)
3516 {
3517 	struct extent_page_data *epd = data;
3518 	flush_epd_write_bio(epd);
3519 }
3520 
3521 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3522 			  get_extent_t *get_extent,
3523 			  struct writeback_control *wbc)
3524 {
3525 	int ret;
3526 	struct extent_page_data epd = {
3527 		.bio = NULL,
3528 		.tree = tree,
3529 		.get_extent = get_extent,
3530 		.extent_locked = 0,
3531 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3532 		.bio_flags = 0,
3533 	};
3534 
3535 	ret = __extent_writepage(page, wbc, &epd);
3536 
3537 	flush_epd_write_bio(&epd);
3538 	return ret;
3539 }
3540 
3541 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3542 			      u64 start, u64 end, get_extent_t *get_extent,
3543 			      int mode)
3544 {
3545 	int ret = 0;
3546 	struct address_space *mapping = inode->i_mapping;
3547 	struct page *page;
3548 	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3549 		PAGE_CACHE_SHIFT;
3550 
3551 	struct extent_page_data epd = {
3552 		.bio = NULL,
3553 		.tree = tree,
3554 		.get_extent = get_extent,
3555 		.extent_locked = 1,
3556 		.sync_io = mode == WB_SYNC_ALL,
3557 		.bio_flags = 0,
3558 	};
3559 	struct writeback_control wbc_writepages = {
3560 		.sync_mode	= mode,
3561 		.nr_to_write	= nr_pages * 2,
3562 		.range_start	= start,
3563 		.range_end	= end + 1,
3564 	};
3565 
3566 	while (start <= end) {
3567 		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3568 		if (clear_page_dirty_for_io(page))
3569 			ret = __extent_writepage(page, &wbc_writepages, &epd);
3570 		else {
3571 			if (tree->ops && tree->ops->writepage_end_io_hook)
3572 				tree->ops->writepage_end_io_hook(page, start,
3573 						 start + PAGE_CACHE_SIZE - 1,
3574 						 NULL, 1);
3575 			unlock_page(page);
3576 		}
3577 		page_cache_release(page);
3578 		start += PAGE_CACHE_SIZE;
3579 	}
3580 
3581 	flush_epd_write_bio(&epd);
3582 	return ret;
3583 }
3584 
3585 int extent_writepages(struct extent_io_tree *tree,
3586 		      struct address_space *mapping,
3587 		      get_extent_t *get_extent,
3588 		      struct writeback_control *wbc)
3589 {
3590 	int ret = 0;
3591 	struct extent_page_data epd = {
3592 		.bio = NULL,
3593 		.tree = tree,
3594 		.get_extent = get_extent,
3595 		.extent_locked = 0,
3596 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
3597 		.bio_flags = 0,
3598 	};
3599 
3600 	ret = extent_write_cache_pages(tree, mapping, wbc,
3601 				       __extent_writepage, &epd,
3602 				       flush_write_bio);
3603 	flush_epd_write_bio(&epd);
3604 	return ret;
3605 }
3606 
3607 int extent_readpages(struct extent_io_tree *tree,
3608 		     struct address_space *mapping,
3609 		     struct list_head *pages, unsigned nr_pages,
3610 		     get_extent_t get_extent)
3611 {
3612 	struct bio *bio = NULL;
3613 	unsigned page_idx;
3614 	unsigned long bio_flags = 0;
3615 	struct page *pagepool[16];
3616 	struct page *page;
3617 	int i = 0;
3618 	int nr = 0;
3619 
3620 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3621 		page = list_entry(pages->prev, struct page, lru);
3622 
3623 		prefetchw(&page->flags);
3624 		list_del(&page->lru);
3625 		if (add_to_page_cache_lru(page, mapping,
3626 					page->index, GFP_NOFS)) {
3627 			page_cache_release(page);
3628 			continue;
3629 		}
3630 
3631 		pagepool[nr++] = page;
3632 		if (nr < ARRAY_SIZE(pagepool))
3633 			continue;
3634 		for (i = 0; i < nr; i++) {
3635 			__extent_read_full_page(tree, pagepool[i], get_extent,
3636 					&bio, 0, &bio_flags);
3637 			page_cache_release(pagepool[i]);
3638 		}
3639 		nr = 0;
3640 	}
3641 	for (i = 0; i < nr; i++) {
3642 		__extent_read_full_page(tree, pagepool[i], get_extent,
3643 					&bio, 0, &bio_flags);
3644 		page_cache_release(pagepool[i]);
3645 	}
3646 
3647 	BUG_ON(!list_empty(pages));
3648 	if (bio)
3649 		return submit_one_bio(READ, bio, 0, bio_flags);
3650 	return 0;
3651 }
3652 
3653 /*
3654  * basic invalidatepage code, this waits on any locked or writeback
3655  * ranges corresponding to the page, and then deletes any extent state
3656  * records from the tree
3657  */
3658 int extent_invalidatepage(struct extent_io_tree *tree,
3659 			  struct page *page, unsigned long offset)
3660 {
3661 	struct extent_state *cached_state = NULL;
3662 	u64 start = page_offset(page);
3663 	u64 end = start + PAGE_CACHE_SIZE - 1;
3664 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3665 
3666 	start += ALIGN(offset, blocksize);
3667 	if (start > end)
3668 		return 0;
3669 
3670 	lock_extent_bits(tree, start, end, 0, &cached_state);
3671 	wait_on_page_writeback(page);
3672 	clear_extent_bit(tree, start, end,
3673 			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3674 			 EXTENT_DO_ACCOUNTING,
3675 			 1, 1, &cached_state, GFP_NOFS);
3676 	return 0;
3677 }
3678 
3679 /*
3680  * a helper for releasepage, this tests for areas of the page that
3681  * are locked or under IO and drops the related state bits if it is safe
3682  * to drop the page.
3683  */
3684 int try_release_extent_state(struct extent_map_tree *map,
3685 			     struct extent_io_tree *tree, struct page *page,
3686 			     gfp_t mask)
3687 {
3688 	u64 start = page_offset(page);
3689 	u64 end = start + PAGE_CACHE_SIZE - 1;
3690 	int ret = 1;
3691 
3692 	if (test_range_bit(tree, start, end,
3693 			   EXTENT_IOBITS, 0, NULL))
3694 		ret = 0;
3695 	else {
3696 		if ((mask & GFP_NOFS) == GFP_NOFS)
3697 			mask = GFP_NOFS;
3698 		/*
3699 		 * at this point we can safely clear everything except the
3700 		 * locked bit and the nodatasum bit
3701 		 */
3702 		ret = clear_extent_bit(tree, start, end,
3703 				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3704 				 0, 0, NULL, mask);
3705 
3706 		/* if clear_extent_bit failed for enomem reasons,
3707 		 * we can't allow the release to continue.
3708 		 */
3709 		if (ret < 0)
3710 			ret = 0;
3711 		else
3712 			ret = 1;
3713 	}
3714 	return ret;
3715 }
3716 
3717 /*
3718  * a helper for releasepage.  As long as there are no locked extents
3719  * in the range corresponding to the page, both state records and extent
3720  * map records are removed
3721  */
3722 int try_release_extent_mapping(struct extent_map_tree *map,
3723 			       struct extent_io_tree *tree, struct page *page,
3724 			       gfp_t mask)
3725 {
3726 	struct extent_map *em;
3727 	u64 start = page_offset(page);
3728 	u64 end = start + PAGE_CACHE_SIZE - 1;
3729 
3730 	if ((mask & __GFP_WAIT) &&
3731 	    page->mapping->host->i_size > 16 * 1024 * 1024) {
3732 		u64 len;
3733 		while (start <= end) {
3734 			len = end - start + 1;
3735 			write_lock(&map->lock);
3736 			em = lookup_extent_mapping(map, start, len);
3737 			if (!em) {
3738 				write_unlock(&map->lock);
3739 				break;
3740 			}
3741 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3742 			    em->start != start) {
3743 				write_unlock(&map->lock);
3744 				free_extent_map(em);
3745 				break;
3746 			}
3747 			if (!test_range_bit(tree, em->start,
3748 					    extent_map_end(em) - 1,
3749 					    EXTENT_LOCKED | EXTENT_WRITEBACK,
3750 					    0, NULL)) {
3751 				remove_extent_mapping(map, em);
3752 				/* once for the rb tree */
3753 				free_extent_map(em);
3754 			}
3755 			start = extent_map_end(em);
3756 			write_unlock(&map->lock);
3757 
3758 			/* once for us */
3759 			free_extent_map(em);
3760 		}
3761 	}
3762 	return try_release_extent_state(map, tree, page, mask);
3763 }
3764 
3765 /*
3766  * helper function for fiemap, which doesn't want to see any holes.
3767  * This maps until we find something past 'last'
3768  */
3769 static struct extent_map *get_extent_skip_holes(struct inode *inode,
3770 						u64 offset,
3771 						u64 last,
3772 						get_extent_t *get_extent)
3773 {
3774 	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3775 	struct extent_map *em;
3776 	u64 len;
3777 
3778 	if (offset >= last)
3779 		return NULL;
3780 
3781 	while(1) {
3782 		len = last - offset;
3783 		if (len == 0)
3784 			break;
3785 		len = ALIGN(len, sectorsize);
3786 		em = get_extent(inode, NULL, 0, offset, len, 0);
3787 		if (IS_ERR_OR_NULL(em))
3788 			return em;
3789 
3790 		/* if this isn't a hole return it */
3791 		if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3792 		    em->block_start != EXTENT_MAP_HOLE) {
3793 			return em;
3794 		}
3795 
3796 		/* this is a hole, advance to the next extent */
3797 		offset = extent_map_end(em);
3798 		free_extent_map(em);
3799 		if (offset >= last)
3800 			break;
3801 	}
3802 	return NULL;
3803 }
3804 
3805 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3806 		__u64 start, __u64 len, get_extent_t *get_extent)
3807 {
3808 	int ret = 0;
3809 	u64 off = start;
3810 	u64 max = start + len;
3811 	u32 flags = 0;
3812 	u32 found_type;
3813 	u64 last;
3814 	u64 last_for_get_extent = 0;
3815 	u64 disko = 0;
3816 	u64 isize = i_size_read(inode);
3817 	struct btrfs_key found_key;
3818 	struct extent_map *em = NULL;
3819 	struct extent_state *cached_state = NULL;
3820 	struct btrfs_path *path;
3821 	struct btrfs_file_extent_item *item;
3822 	int end = 0;
3823 	u64 em_start = 0;
3824 	u64 em_len = 0;
3825 	u64 em_end = 0;
3826 	unsigned long emflags;
3827 
3828 	if (len == 0)
3829 		return -EINVAL;
3830 
3831 	path = btrfs_alloc_path();
3832 	if (!path)
3833 		return -ENOMEM;
3834 	path->leave_spinning = 1;
3835 
3836 	start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3837 	len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3838 
3839 	/*
3840 	 * lookup the last file extent.  We're not using i_size here
3841 	 * because there might be preallocation past i_size
3842 	 */
3843 	ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3844 				       path, btrfs_ino(inode), -1, 0);
3845 	if (ret < 0) {
3846 		btrfs_free_path(path);
3847 		return ret;
3848 	}
3849 	WARN_ON(!ret);
3850 	path->slots[0]--;
3851 	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3852 			      struct btrfs_file_extent_item);
3853 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3854 	found_type = btrfs_key_type(&found_key);
3855 
3856 	/* No extents, but there might be delalloc bits */
3857 	if (found_key.objectid != btrfs_ino(inode) ||
3858 	    found_type != BTRFS_EXTENT_DATA_KEY) {
3859 		/* have to trust i_size as the end */
3860 		last = (u64)-1;
3861 		last_for_get_extent = isize;
3862 	} else {
3863 		/*
3864 		 * remember the start of the last extent.  There are a
3865 		 * bunch of different factors that go into the length of the
3866 		 * extent, so its much less complex to remember where it started
3867 		 */
3868 		last = found_key.offset;
3869 		last_for_get_extent = last + 1;
3870 	}
3871 	btrfs_free_path(path);
3872 
3873 	/*
3874 	 * we might have some extents allocated but more delalloc past those
3875 	 * extents.  so, we trust isize unless the start of the last extent is
3876 	 * beyond isize
3877 	 */
3878 	if (last < isize) {
3879 		last = (u64)-1;
3880 		last_for_get_extent = isize;
3881 	}
3882 
3883 	lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3884 			 &cached_state);
3885 
3886 	em = get_extent_skip_holes(inode, start, last_for_get_extent,
3887 				   get_extent);
3888 	if (!em)
3889 		goto out;
3890 	if (IS_ERR(em)) {
3891 		ret = PTR_ERR(em);
3892 		goto out;
3893 	}
3894 
3895 	while (!end) {
3896 		u64 offset_in_extent;
3897 
3898 		/* break if the extent we found is outside the range */
3899 		if (em->start >= max || extent_map_end(em) < off)
3900 			break;
3901 
3902 		/*
3903 		 * get_extent may return an extent that starts before our
3904 		 * requested range.  We have to make sure the ranges
3905 		 * we return to fiemap always move forward and don't
3906 		 * overlap, so adjust the offsets here
3907 		 */
3908 		em_start = max(em->start, off);
3909 
3910 		/*
3911 		 * record the offset from the start of the extent
3912 		 * for adjusting the disk offset below
3913 		 */
3914 		offset_in_extent = em_start - em->start;
3915 		em_end = extent_map_end(em);
3916 		em_len = em_end - em_start;
3917 		emflags = em->flags;
3918 		disko = 0;
3919 		flags = 0;
3920 
3921 		/*
3922 		 * bump off for our next call to get_extent
3923 		 */
3924 		off = extent_map_end(em);
3925 		if (off >= max)
3926 			end = 1;
3927 
3928 		if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3929 			end = 1;
3930 			flags |= FIEMAP_EXTENT_LAST;
3931 		} else if (em->block_start == EXTENT_MAP_INLINE) {
3932 			flags |= (FIEMAP_EXTENT_DATA_INLINE |
3933 				  FIEMAP_EXTENT_NOT_ALIGNED);
3934 		} else if (em->block_start == EXTENT_MAP_DELALLOC) {
3935 			flags |= (FIEMAP_EXTENT_DELALLOC |
3936 				  FIEMAP_EXTENT_UNKNOWN);
3937 		} else {
3938 			disko = em->block_start + offset_in_extent;
3939 		}
3940 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3941 			flags |= FIEMAP_EXTENT_ENCODED;
3942 
3943 		free_extent_map(em);
3944 		em = NULL;
3945 		if ((em_start >= last) || em_len == (u64)-1 ||
3946 		   (last == (u64)-1 && isize <= em_end)) {
3947 			flags |= FIEMAP_EXTENT_LAST;
3948 			end = 1;
3949 		}
3950 
3951 		/* now scan forward to see if this is really the last extent. */
3952 		em = get_extent_skip_holes(inode, off, last_for_get_extent,
3953 					   get_extent);
3954 		if (IS_ERR(em)) {
3955 			ret = PTR_ERR(em);
3956 			goto out;
3957 		}
3958 		if (!em) {
3959 			flags |= FIEMAP_EXTENT_LAST;
3960 			end = 1;
3961 		}
3962 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3963 					      em_len, flags);
3964 		if (ret)
3965 			goto out_free;
3966 	}
3967 out_free:
3968 	free_extent_map(em);
3969 out:
3970 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3971 			     &cached_state, GFP_NOFS);
3972 	return ret;
3973 }
3974 
3975 static void __free_extent_buffer(struct extent_buffer *eb)
3976 {
3977 #if LEAK_DEBUG
3978 	unsigned long flags;
3979 	spin_lock_irqsave(&leak_lock, flags);
3980 	list_del(&eb->leak_list);
3981 	spin_unlock_irqrestore(&leak_lock, flags);
3982 #endif
3983 	kmem_cache_free(extent_buffer_cache, eb);
3984 }
3985 
3986 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3987 						   u64 start,
3988 						   unsigned long len,
3989 						   gfp_t mask)
3990 {
3991 	struct extent_buffer *eb = NULL;
3992 #if LEAK_DEBUG
3993 	unsigned long flags;
3994 #endif
3995 
3996 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3997 	if (eb == NULL)
3998 		return NULL;
3999 	eb->start = start;
4000 	eb->len = len;
4001 	eb->tree = tree;
4002 	eb->bflags = 0;
4003 	rwlock_init(&eb->lock);
4004 	atomic_set(&eb->write_locks, 0);
4005 	atomic_set(&eb->read_locks, 0);
4006 	atomic_set(&eb->blocking_readers, 0);
4007 	atomic_set(&eb->blocking_writers, 0);
4008 	atomic_set(&eb->spinning_readers, 0);
4009 	atomic_set(&eb->spinning_writers, 0);
4010 	eb->lock_nested = 0;
4011 	init_waitqueue_head(&eb->write_lock_wq);
4012 	init_waitqueue_head(&eb->read_lock_wq);
4013 
4014 #if LEAK_DEBUG
4015 	spin_lock_irqsave(&leak_lock, flags);
4016 	list_add(&eb->leak_list, &buffers);
4017 	spin_unlock_irqrestore(&leak_lock, flags);
4018 #endif
4019 	spin_lock_init(&eb->refs_lock);
4020 	atomic_set(&eb->refs, 1);
4021 	atomic_set(&eb->io_pages, 0);
4022 
4023 	/*
4024 	 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4025 	 */
4026 	BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4027 		> MAX_INLINE_EXTENT_BUFFER_SIZE);
4028 	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4029 
4030 	return eb;
4031 }
4032 
4033 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4034 {
4035 	unsigned long i;
4036 	struct page *p;
4037 	struct extent_buffer *new;
4038 	unsigned long num_pages = num_extent_pages(src->start, src->len);
4039 
4040 	new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
4041 	if (new == NULL)
4042 		return NULL;
4043 
4044 	for (i = 0; i < num_pages; i++) {
4045 		p = alloc_page(GFP_ATOMIC);
4046 		BUG_ON(!p);
4047 		attach_extent_buffer_page(new, p);
4048 		WARN_ON(PageDirty(p));
4049 		SetPageUptodate(p);
4050 		new->pages[i] = p;
4051 	}
4052 
4053 	copy_extent_buffer(new, src, 0, 0, src->len);
4054 	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4055 	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4056 
4057 	return new;
4058 }
4059 
4060 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4061 {
4062 	struct extent_buffer *eb;
4063 	unsigned long num_pages = num_extent_pages(0, len);
4064 	unsigned long i;
4065 
4066 	eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4067 	if (!eb)
4068 		return NULL;
4069 
4070 	for (i = 0; i < num_pages; i++) {
4071 		eb->pages[i] = alloc_page(GFP_ATOMIC);
4072 		if (!eb->pages[i])
4073 			goto err;
4074 	}
4075 	set_extent_buffer_uptodate(eb);
4076 	btrfs_set_header_nritems(eb, 0);
4077 	set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4078 
4079 	return eb;
4080 err:
4081 	for (; i > 0; i--)
4082 		__free_page(eb->pages[i - 1]);
4083 	__free_extent_buffer(eb);
4084 	return NULL;
4085 }
4086 
4087 static int extent_buffer_under_io(struct extent_buffer *eb)
4088 {
4089 	return (atomic_read(&eb->io_pages) ||
4090 		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4091 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4092 }
4093 
4094 /*
4095  * Helper for releasing extent buffer page.
4096  */
4097 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4098 						unsigned long start_idx)
4099 {
4100 	unsigned long index;
4101 	unsigned long num_pages;
4102 	struct page *page;
4103 	int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4104 
4105 	BUG_ON(extent_buffer_under_io(eb));
4106 
4107 	num_pages = num_extent_pages(eb->start, eb->len);
4108 	index = start_idx + num_pages;
4109 	if (start_idx >= index)
4110 		return;
4111 
4112 	do {
4113 		index--;
4114 		page = extent_buffer_page(eb, index);
4115 		if (page && mapped) {
4116 			spin_lock(&page->mapping->private_lock);
4117 			/*
4118 			 * We do this since we'll remove the pages after we've
4119 			 * removed the eb from the radix tree, so we could race
4120 			 * and have this page now attached to the new eb.  So
4121 			 * only clear page_private if it's still connected to
4122 			 * this eb.
4123 			 */
4124 			if (PagePrivate(page) &&
4125 			    page->private == (unsigned long)eb) {
4126 				BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4127 				BUG_ON(PageDirty(page));
4128 				BUG_ON(PageWriteback(page));
4129 				/*
4130 				 * We need to make sure we haven't be attached
4131 				 * to a new eb.
4132 				 */
4133 				ClearPagePrivate(page);
4134 				set_page_private(page, 0);
4135 				/* One for the page private */
4136 				page_cache_release(page);
4137 			}
4138 			spin_unlock(&page->mapping->private_lock);
4139 
4140 		}
4141 		if (page) {
4142 			/* One for when we alloced the page */
4143 			page_cache_release(page);
4144 		}
4145 	} while (index != start_idx);
4146 }
4147 
4148 /*
4149  * Helper for releasing the extent buffer.
4150  */
4151 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4152 {
4153 	btrfs_release_extent_buffer_page(eb, 0);
4154 	__free_extent_buffer(eb);
4155 }
4156 
4157 static void check_buffer_tree_ref(struct extent_buffer *eb)
4158 {
4159 	int refs;
4160 	/* the ref bit is tricky.  We have to make sure it is set
4161 	 * if we have the buffer dirty.   Otherwise the
4162 	 * code to free a buffer can end up dropping a dirty
4163 	 * page
4164 	 *
4165 	 * Once the ref bit is set, it won't go away while the
4166 	 * buffer is dirty or in writeback, and it also won't
4167 	 * go away while we have the reference count on the
4168 	 * eb bumped.
4169 	 *
4170 	 * We can't just set the ref bit without bumping the
4171 	 * ref on the eb because free_extent_buffer might
4172 	 * see the ref bit and try to clear it.  If this happens
4173 	 * free_extent_buffer might end up dropping our original
4174 	 * ref by mistake and freeing the page before we are able
4175 	 * to add one more ref.
4176 	 *
4177 	 * So bump the ref count first, then set the bit.  If someone
4178 	 * beat us to it, drop the ref we added.
4179 	 */
4180 	refs = atomic_read(&eb->refs);
4181 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4182 		return;
4183 
4184 	spin_lock(&eb->refs_lock);
4185 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4186 		atomic_inc(&eb->refs);
4187 	spin_unlock(&eb->refs_lock);
4188 }
4189 
4190 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4191 {
4192 	unsigned long num_pages, i;
4193 
4194 	check_buffer_tree_ref(eb);
4195 
4196 	num_pages = num_extent_pages(eb->start, eb->len);
4197 	for (i = 0; i < num_pages; i++) {
4198 		struct page *p = extent_buffer_page(eb, i);
4199 		mark_page_accessed(p);
4200 	}
4201 }
4202 
4203 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4204 					  u64 start, unsigned long len)
4205 {
4206 	unsigned long num_pages = num_extent_pages(start, len);
4207 	unsigned long i;
4208 	unsigned long index = start >> PAGE_CACHE_SHIFT;
4209 	struct extent_buffer *eb;
4210 	struct extent_buffer *exists = NULL;
4211 	struct page *p;
4212 	struct address_space *mapping = tree->mapping;
4213 	int uptodate = 1;
4214 	int ret;
4215 
4216 	rcu_read_lock();
4217 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4218 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4219 		rcu_read_unlock();
4220 		mark_extent_buffer_accessed(eb);
4221 		return eb;
4222 	}
4223 	rcu_read_unlock();
4224 
4225 	eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4226 	if (!eb)
4227 		return NULL;
4228 
4229 	for (i = 0; i < num_pages; i++, index++) {
4230 		p = find_or_create_page(mapping, index, GFP_NOFS);
4231 		if (!p)
4232 			goto free_eb;
4233 
4234 		spin_lock(&mapping->private_lock);
4235 		if (PagePrivate(p)) {
4236 			/*
4237 			 * We could have already allocated an eb for this page
4238 			 * and attached one so lets see if we can get a ref on
4239 			 * the existing eb, and if we can we know it's good and
4240 			 * we can just return that one, else we know we can just
4241 			 * overwrite page->private.
4242 			 */
4243 			exists = (struct extent_buffer *)p->private;
4244 			if (atomic_inc_not_zero(&exists->refs)) {
4245 				spin_unlock(&mapping->private_lock);
4246 				unlock_page(p);
4247 				page_cache_release(p);
4248 				mark_extent_buffer_accessed(exists);
4249 				goto free_eb;
4250 			}
4251 
4252 			/*
4253 			 * Do this so attach doesn't complain and we need to
4254 			 * drop the ref the old guy had.
4255 			 */
4256 			ClearPagePrivate(p);
4257 			WARN_ON(PageDirty(p));
4258 			page_cache_release(p);
4259 		}
4260 		attach_extent_buffer_page(eb, p);
4261 		spin_unlock(&mapping->private_lock);
4262 		WARN_ON(PageDirty(p));
4263 		mark_page_accessed(p);
4264 		eb->pages[i] = p;
4265 		if (!PageUptodate(p))
4266 			uptodate = 0;
4267 
4268 		/*
4269 		 * see below about how we avoid a nasty race with release page
4270 		 * and why we unlock later
4271 		 */
4272 	}
4273 	if (uptodate)
4274 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4275 again:
4276 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4277 	if (ret)
4278 		goto free_eb;
4279 
4280 	spin_lock(&tree->buffer_lock);
4281 	ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4282 	if (ret == -EEXIST) {
4283 		exists = radix_tree_lookup(&tree->buffer,
4284 						start >> PAGE_CACHE_SHIFT);
4285 		if (!atomic_inc_not_zero(&exists->refs)) {
4286 			spin_unlock(&tree->buffer_lock);
4287 			radix_tree_preload_end();
4288 			exists = NULL;
4289 			goto again;
4290 		}
4291 		spin_unlock(&tree->buffer_lock);
4292 		radix_tree_preload_end();
4293 		mark_extent_buffer_accessed(exists);
4294 		goto free_eb;
4295 	}
4296 	/* add one reference for the tree */
4297 	check_buffer_tree_ref(eb);
4298 	spin_unlock(&tree->buffer_lock);
4299 	radix_tree_preload_end();
4300 
4301 	/*
4302 	 * there is a race where release page may have
4303 	 * tried to find this extent buffer in the radix
4304 	 * but failed.  It will tell the VM it is safe to
4305 	 * reclaim the, and it will clear the page private bit.
4306 	 * We must make sure to set the page private bit properly
4307 	 * after the extent buffer is in the radix tree so
4308 	 * it doesn't get lost
4309 	 */
4310 	SetPageChecked(eb->pages[0]);
4311 	for (i = 1; i < num_pages; i++) {
4312 		p = extent_buffer_page(eb, i);
4313 		ClearPageChecked(p);
4314 		unlock_page(p);
4315 	}
4316 	unlock_page(eb->pages[0]);
4317 	return eb;
4318 
4319 free_eb:
4320 	for (i = 0; i < num_pages; i++) {
4321 		if (eb->pages[i])
4322 			unlock_page(eb->pages[i]);
4323 	}
4324 
4325 	WARN_ON(!atomic_dec_and_test(&eb->refs));
4326 	btrfs_release_extent_buffer(eb);
4327 	return exists;
4328 }
4329 
4330 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4331 					 u64 start, unsigned long len)
4332 {
4333 	struct extent_buffer *eb;
4334 
4335 	rcu_read_lock();
4336 	eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4337 	if (eb && atomic_inc_not_zero(&eb->refs)) {
4338 		rcu_read_unlock();
4339 		mark_extent_buffer_accessed(eb);
4340 		return eb;
4341 	}
4342 	rcu_read_unlock();
4343 
4344 	return NULL;
4345 }
4346 
4347 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4348 {
4349 	struct extent_buffer *eb =
4350 			container_of(head, struct extent_buffer, rcu_head);
4351 
4352 	__free_extent_buffer(eb);
4353 }
4354 
4355 /* Expects to have eb->eb_lock already held */
4356 static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4357 {
4358 	WARN_ON(atomic_read(&eb->refs) == 0);
4359 	if (atomic_dec_and_test(&eb->refs)) {
4360 		if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4361 			spin_unlock(&eb->refs_lock);
4362 		} else {
4363 			struct extent_io_tree *tree = eb->tree;
4364 
4365 			spin_unlock(&eb->refs_lock);
4366 
4367 			spin_lock(&tree->buffer_lock);
4368 			radix_tree_delete(&tree->buffer,
4369 					  eb->start >> PAGE_CACHE_SHIFT);
4370 			spin_unlock(&tree->buffer_lock);
4371 		}
4372 
4373 		/* Should be safe to release our pages at this point */
4374 		btrfs_release_extent_buffer_page(eb, 0);
4375 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4376 		return 1;
4377 	}
4378 	spin_unlock(&eb->refs_lock);
4379 
4380 	return 0;
4381 }
4382 
4383 void free_extent_buffer(struct extent_buffer *eb)
4384 {
4385 	int refs;
4386 	int old;
4387 	if (!eb)
4388 		return;
4389 
4390 	while (1) {
4391 		refs = atomic_read(&eb->refs);
4392 		if (refs <= 3)
4393 			break;
4394 		old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4395 		if (old == refs)
4396 			return;
4397 	}
4398 
4399 	spin_lock(&eb->refs_lock);
4400 	if (atomic_read(&eb->refs) == 2 &&
4401 	    test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4402 		atomic_dec(&eb->refs);
4403 
4404 	if (atomic_read(&eb->refs) == 2 &&
4405 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4406 	    !extent_buffer_under_io(eb) &&
4407 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4408 		atomic_dec(&eb->refs);
4409 
4410 	/*
4411 	 * I know this is terrible, but it's temporary until we stop tracking
4412 	 * the uptodate bits and such for the extent buffers.
4413 	 */
4414 	release_extent_buffer(eb, GFP_ATOMIC);
4415 }
4416 
4417 void free_extent_buffer_stale(struct extent_buffer *eb)
4418 {
4419 	if (!eb)
4420 		return;
4421 
4422 	spin_lock(&eb->refs_lock);
4423 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4424 
4425 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4426 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4427 		atomic_dec(&eb->refs);
4428 	release_extent_buffer(eb, GFP_NOFS);
4429 }
4430 
4431 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4432 {
4433 	unsigned long i;
4434 	unsigned long num_pages;
4435 	struct page *page;
4436 
4437 	num_pages = num_extent_pages(eb->start, eb->len);
4438 
4439 	for (i = 0; i < num_pages; i++) {
4440 		page = extent_buffer_page(eb, i);
4441 		if (!PageDirty(page))
4442 			continue;
4443 
4444 		lock_page(page);
4445 		WARN_ON(!PagePrivate(page));
4446 
4447 		clear_page_dirty_for_io(page);
4448 		spin_lock_irq(&page->mapping->tree_lock);
4449 		if (!PageDirty(page)) {
4450 			radix_tree_tag_clear(&page->mapping->page_tree,
4451 						page_index(page),
4452 						PAGECACHE_TAG_DIRTY);
4453 		}
4454 		spin_unlock_irq(&page->mapping->tree_lock);
4455 		ClearPageError(page);
4456 		unlock_page(page);
4457 	}
4458 	WARN_ON(atomic_read(&eb->refs) == 0);
4459 }
4460 
4461 int set_extent_buffer_dirty(struct extent_buffer *eb)
4462 {
4463 	unsigned long i;
4464 	unsigned long num_pages;
4465 	int was_dirty = 0;
4466 
4467 	check_buffer_tree_ref(eb);
4468 
4469 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4470 
4471 	num_pages = num_extent_pages(eb->start, eb->len);
4472 	WARN_ON(atomic_read(&eb->refs) == 0);
4473 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4474 
4475 	for (i = 0; i < num_pages; i++)
4476 		set_page_dirty(extent_buffer_page(eb, i));
4477 	return was_dirty;
4478 }
4479 
4480 static int range_straddles_pages(u64 start, u64 len)
4481 {
4482 	if (len < PAGE_CACHE_SIZE)
4483 		return 1;
4484 	if (start & (PAGE_CACHE_SIZE - 1))
4485 		return 1;
4486 	if ((start + len) & (PAGE_CACHE_SIZE - 1))
4487 		return 1;
4488 	return 0;
4489 }
4490 
4491 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4492 {
4493 	unsigned long i;
4494 	struct page *page;
4495 	unsigned long num_pages;
4496 
4497 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4498 	num_pages = num_extent_pages(eb->start, eb->len);
4499 	for (i = 0; i < num_pages; i++) {
4500 		page = extent_buffer_page(eb, i);
4501 		if (page)
4502 			ClearPageUptodate(page);
4503 	}
4504 	return 0;
4505 }
4506 
4507 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4508 {
4509 	unsigned long i;
4510 	struct page *page;
4511 	unsigned long num_pages;
4512 
4513 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4514 	num_pages = num_extent_pages(eb->start, eb->len);
4515 	for (i = 0; i < num_pages; i++) {
4516 		page = extent_buffer_page(eb, i);
4517 		SetPageUptodate(page);
4518 	}
4519 	return 0;
4520 }
4521 
4522 int extent_range_uptodate(struct extent_io_tree *tree,
4523 			  u64 start, u64 end)
4524 {
4525 	struct page *page;
4526 	int ret;
4527 	int pg_uptodate = 1;
4528 	int uptodate;
4529 	unsigned long index;
4530 
4531 	if (range_straddles_pages(start, end - start + 1)) {
4532 		ret = test_range_bit(tree, start, end,
4533 				     EXTENT_UPTODATE, 1, NULL);
4534 		if (ret)
4535 			return 1;
4536 	}
4537 	while (start <= end) {
4538 		index = start >> PAGE_CACHE_SHIFT;
4539 		page = find_get_page(tree->mapping, index);
4540 		if (!page)
4541 			return 1;
4542 		uptodate = PageUptodate(page);
4543 		page_cache_release(page);
4544 		if (!uptodate) {
4545 			pg_uptodate = 0;
4546 			break;
4547 		}
4548 		start += PAGE_CACHE_SIZE;
4549 	}
4550 	return pg_uptodate;
4551 }
4552 
4553 int extent_buffer_uptodate(struct extent_buffer *eb)
4554 {
4555 	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4556 }
4557 
4558 int read_extent_buffer_pages(struct extent_io_tree *tree,
4559 			     struct extent_buffer *eb, u64 start, int wait,
4560 			     get_extent_t *get_extent, int mirror_num)
4561 {
4562 	unsigned long i;
4563 	unsigned long start_i;
4564 	struct page *page;
4565 	int err;
4566 	int ret = 0;
4567 	int locked_pages = 0;
4568 	int all_uptodate = 1;
4569 	unsigned long num_pages;
4570 	unsigned long num_reads = 0;
4571 	struct bio *bio = NULL;
4572 	unsigned long bio_flags = 0;
4573 
4574 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4575 		return 0;
4576 
4577 	if (start) {
4578 		WARN_ON(start < eb->start);
4579 		start_i = (start >> PAGE_CACHE_SHIFT) -
4580 			(eb->start >> PAGE_CACHE_SHIFT);
4581 	} else {
4582 		start_i = 0;
4583 	}
4584 
4585 	num_pages = num_extent_pages(eb->start, eb->len);
4586 	for (i = start_i; i < num_pages; i++) {
4587 		page = extent_buffer_page(eb, i);
4588 		if (wait == WAIT_NONE) {
4589 			if (!trylock_page(page))
4590 				goto unlock_exit;
4591 		} else {
4592 			lock_page(page);
4593 		}
4594 		locked_pages++;
4595 		if (!PageUptodate(page)) {
4596 			num_reads++;
4597 			all_uptodate = 0;
4598 		}
4599 	}
4600 	if (all_uptodate) {
4601 		if (start_i == 0)
4602 			set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4603 		goto unlock_exit;
4604 	}
4605 
4606 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4607 	eb->read_mirror = 0;
4608 	atomic_set(&eb->io_pages, num_reads);
4609 	for (i = start_i; i < num_pages; i++) {
4610 		page = extent_buffer_page(eb, i);
4611 		if (!PageUptodate(page)) {
4612 			ClearPageError(page);
4613 			err = __extent_read_full_page(tree, page,
4614 						      get_extent, &bio,
4615 						      mirror_num, &bio_flags);
4616 			if (err)
4617 				ret = err;
4618 		} else {
4619 			unlock_page(page);
4620 		}
4621 	}
4622 
4623 	if (bio) {
4624 		err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4625 		if (err)
4626 			return err;
4627 	}
4628 
4629 	if (ret || wait != WAIT_COMPLETE)
4630 		return ret;
4631 
4632 	for (i = start_i; i < num_pages; i++) {
4633 		page = extent_buffer_page(eb, i);
4634 		wait_on_page_locked(page);
4635 		if (!PageUptodate(page))
4636 			ret = -EIO;
4637 	}
4638 
4639 	return ret;
4640 
4641 unlock_exit:
4642 	i = start_i;
4643 	while (locked_pages > 0) {
4644 		page = extent_buffer_page(eb, i);
4645 		i++;
4646 		unlock_page(page);
4647 		locked_pages--;
4648 	}
4649 	return ret;
4650 }
4651 
4652 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4653 			unsigned long start,
4654 			unsigned long len)
4655 {
4656 	size_t cur;
4657 	size_t offset;
4658 	struct page *page;
4659 	char *kaddr;
4660 	char *dst = (char *)dstv;
4661 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4662 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4663 
4664 	WARN_ON(start > eb->len);
4665 	WARN_ON(start + len > eb->start + eb->len);
4666 
4667 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4668 
4669 	while (len > 0) {
4670 		page = extent_buffer_page(eb, i);
4671 
4672 		cur = min(len, (PAGE_CACHE_SIZE - offset));
4673 		kaddr = page_address(page);
4674 		memcpy(dst, kaddr + offset, cur);
4675 
4676 		dst += cur;
4677 		len -= cur;
4678 		offset = 0;
4679 		i++;
4680 	}
4681 }
4682 
4683 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4684 			       unsigned long min_len, char **map,
4685 			       unsigned long *map_start,
4686 			       unsigned long *map_len)
4687 {
4688 	size_t offset = start & (PAGE_CACHE_SIZE - 1);
4689 	char *kaddr;
4690 	struct page *p;
4691 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4692 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4693 	unsigned long end_i = (start_offset + start + min_len - 1) >>
4694 		PAGE_CACHE_SHIFT;
4695 
4696 	if (i != end_i)
4697 		return -EINVAL;
4698 
4699 	if (i == 0) {
4700 		offset = start_offset;
4701 		*map_start = 0;
4702 	} else {
4703 		offset = 0;
4704 		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4705 	}
4706 
4707 	if (start + min_len > eb->len) {
4708 		WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4709 		       "wanted %lu %lu\n", (unsigned long long)eb->start,
4710 		       eb->len, start, min_len);
4711 		return -EINVAL;
4712 	}
4713 
4714 	p = extent_buffer_page(eb, i);
4715 	kaddr = page_address(p);
4716 	*map = kaddr + offset;
4717 	*map_len = PAGE_CACHE_SIZE - offset;
4718 	return 0;
4719 }
4720 
4721 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4722 			  unsigned long start,
4723 			  unsigned long len)
4724 {
4725 	size_t cur;
4726 	size_t offset;
4727 	struct page *page;
4728 	char *kaddr;
4729 	char *ptr = (char *)ptrv;
4730 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4731 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4732 	int ret = 0;
4733 
4734 	WARN_ON(start > eb->len);
4735 	WARN_ON(start + len > eb->start + eb->len);
4736 
4737 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4738 
4739 	while (len > 0) {
4740 		page = extent_buffer_page(eb, i);
4741 
4742 		cur = min(len, (PAGE_CACHE_SIZE - offset));
4743 
4744 		kaddr = page_address(page);
4745 		ret = memcmp(ptr, kaddr + offset, cur);
4746 		if (ret)
4747 			break;
4748 
4749 		ptr += cur;
4750 		len -= cur;
4751 		offset = 0;
4752 		i++;
4753 	}
4754 	return ret;
4755 }
4756 
4757 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4758 			 unsigned long start, unsigned long len)
4759 {
4760 	size_t cur;
4761 	size_t offset;
4762 	struct page *page;
4763 	char *kaddr;
4764 	char *src = (char *)srcv;
4765 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4766 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4767 
4768 	WARN_ON(start > eb->len);
4769 	WARN_ON(start + len > eb->start + eb->len);
4770 
4771 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4772 
4773 	while (len > 0) {
4774 		page = extent_buffer_page(eb, i);
4775 		WARN_ON(!PageUptodate(page));
4776 
4777 		cur = min(len, PAGE_CACHE_SIZE - offset);
4778 		kaddr = page_address(page);
4779 		memcpy(kaddr + offset, src, cur);
4780 
4781 		src += cur;
4782 		len -= cur;
4783 		offset = 0;
4784 		i++;
4785 	}
4786 }
4787 
4788 void memset_extent_buffer(struct extent_buffer *eb, char c,
4789 			  unsigned long start, unsigned long len)
4790 {
4791 	size_t cur;
4792 	size_t offset;
4793 	struct page *page;
4794 	char *kaddr;
4795 	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4796 	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4797 
4798 	WARN_ON(start > eb->len);
4799 	WARN_ON(start + len > eb->start + eb->len);
4800 
4801 	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4802 
4803 	while (len > 0) {
4804 		page = extent_buffer_page(eb, i);
4805 		WARN_ON(!PageUptodate(page));
4806 
4807 		cur = min(len, PAGE_CACHE_SIZE - offset);
4808 		kaddr = page_address(page);
4809 		memset(kaddr + offset, c, cur);
4810 
4811 		len -= cur;
4812 		offset = 0;
4813 		i++;
4814 	}
4815 }
4816 
4817 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4818 			unsigned long dst_offset, unsigned long src_offset,
4819 			unsigned long len)
4820 {
4821 	u64 dst_len = dst->len;
4822 	size_t cur;
4823 	size_t offset;
4824 	struct page *page;
4825 	char *kaddr;
4826 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4827 	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4828 
4829 	WARN_ON(src->len != dst_len);
4830 
4831 	offset = (start_offset + dst_offset) &
4832 		((unsigned long)PAGE_CACHE_SIZE - 1);
4833 
4834 	while (len > 0) {
4835 		page = extent_buffer_page(dst, i);
4836 		WARN_ON(!PageUptodate(page));
4837 
4838 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4839 
4840 		kaddr = page_address(page);
4841 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4842 
4843 		src_offset += cur;
4844 		len -= cur;
4845 		offset = 0;
4846 		i++;
4847 	}
4848 }
4849 
4850 static void move_pages(struct page *dst_page, struct page *src_page,
4851 		       unsigned long dst_off, unsigned long src_off,
4852 		       unsigned long len)
4853 {
4854 	char *dst_kaddr = page_address(dst_page);
4855 	if (dst_page == src_page) {
4856 		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4857 	} else {
4858 		char *src_kaddr = page_address(src_page);
4859 		char *p = dst_kaddr + dst_off + len;
4860 		char *s = src_kaddr + src_off + len;
4861 
4862 		while (len--)
4863 			*--p = *--s;
4864 	}
4865 }
4866 
4867 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4868 {
4869 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4870 	return distance < len;
4871 }
4872 
4873 static void copy_pages(struct page *dst_page, struct page *src_page,
4874 		       unsigned long dst_off, unsigned long src_off,
4875 		       unsigned long len)
4876 {
4877 	char *dst_kaddr = page_address(dst_page);
4878 	char *src_kaddr;
4879 	int must_memmove = 0;
4880 
4881 	if (dst_page != src_page) {
4882 		src_kaddr = page_address(src_page);
4883 	} else {
4884 		src_kaddr = dst_kaddr;
4885 		if (areas_overlap(src_off, dst_off, len))
4886 			must_memmove = 1;
4887 	}
4888 
4889 	if (must_memmove)
4890 		memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4891 	else
4892 		memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4893 }
4894 
4895 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4896 			   unsigned long src_offset, unsigned long len)
4897 {
4898 	size_t cur;
4899 	size_t dst_off_in_page;
4900 	size_t src_off_in_page;
4901 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4902 	unsigned long dst_i;
4903 	unsigned long src_i;
4904 
4905 	if (src_offset + len > dst->len) {
4906 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4907 		       "len %lu dst len %lu\n", src_offset, len, dst->len);
4908 		BUG_ON(1);
4909 	}
4910 	if (dst_offset + len > dst->len) {
4911 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4912 		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
4913 		BUG_ON(1);
4914 	}
4915 
4916 	while (len > 0) {
4917 		dst_off_in_page = (start_offset + dst_offset) &
4918 			((unsigned long)PAGE_CACHE_SIZE - 1);
4919 		src_off_in_page = (start_offset + src_offset) &
4920 			((unsigned long)PAGE_CACHE_SIZE - 1);
4921 
4922 		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4923 		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4924 
4925 		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4926 					       src_off_in_page));
4927 		cur = min_t(unsigned long, cur,
4928 			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4929 
4930 		copy_pages(extent_buffer_page(dst, dst_i),
4931 			   extent_buffer_page(dst, src_i),
4932 			   dst_off_in_page, src_off_in_page, cur);
4933 
4934 		src_offset += cur;
4935 		dst_offset += cur;
4936 		len -= cur;
4937 	}
4938 }
4939 
4940 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4941 			   unsigned long src_offset, unsigned long len)
4942 {
4943 	size_t cur;
4944 	size_t dst_off_in_page;
4945 	size_t src_off_in_page;
4946 	unsigned long dst_end = dst_offset + len - 1;
4947 	unsigned long src_end = src_offset + len - 1;
4948 	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4949 	unsigned long dst_i;
4950 	unsigned long src_i;
4951 
4952 	if (src_offset + len > dst->len) {
4953 		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4954 		       "len %lu len %lu\n", src_offset, len, dst->len);
4955 		BUG_ON(1);
4956 	}
4957 	if (dst_offset + len > dst->len) {
4958 		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4959 		       "len %lu len %lu\n", dst_offset, len, dst->len);
4960 		BUG_ON(1);
4961 	}
4962 	if (dst_offset < src_offset) {
4963 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4964 		return;
4965 	}
4966 	while (len > 0) {
4967 		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4968 		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4969 
4970 		dst_off_in_page = (start_offset + dst_end) &
4971 			((unsigned long)PAGE_CACHE_SIZE - 1);
4972 		src_off_in_page = (start_offset + src_end) &
4973 			((unsigned long)PAGE_CACHE_SIZE - 1);
4974 
4975 		cur = min_t(unsigned long, len, src_off_in_page + 1);
4976 		cur = min(cur, dst_off_in_page + 1);
4977 		move_pages(extent_buffer_page(dst, dst_i),
4978 			   extent_buffer_page(dst, src_i),
4979 			   dst_off_in_page - cur + 1,
4980 			   src_off_in_page - cur + 1, cur);
4981 
4982 		dst_end -= cur;
4983 		src_end -= cur;
4984 		len -= cur;
4985 	}
4986 }
4987 
4988 int try_release_extent_buffer(struct page *page, gfp_t mask)
4989 {
4990 	struct extent_buffer *eb;
4991 
4992 	/*
4993 	 * We need to make sure noboody is attaching this page to an eb right
4994 	 * now.
4995 	 */
4996 	spin_lock(&page->mapping->private_lock);
4997 	if (!PagePrivate(page)) {
4998 		spin_unlock(&page->mapping->private_lock);
4999 		return 1;
5000 	}
5001 
5002 	eb = (struct extent_buffer *)page->private;
5003 	BUG_ON(!eb);
5004 
5005 	/*
5006 	 * This is a little awful but should be ok, we need to make sure that
5007 	 * the eb doesn't disappear out from under us while we're looking at
5008 	 * this page.
5009 	 */
5010 	spin_lock(&eb->refs_lock);
5011 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5012 		spin_unlock(&eb->refs_lock);
5013 		spin_unlock(&page->mapping->private_lock);
5014 		return 0;
5015 	}
5016 	spin_unlock(&page->mapping->private_lock);
5017 
5018 	if ((mask & GFP_NOFS) == GFP_NOFS)
5019 		mask = GFP_NOFS;
5020 
5021 	/*
5022 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
5023 	 * so just return, this page will likely be freed soon anyway.
5024 	 */
5025 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5026 		spin_unlock(&eb->refs_lock);
5027 		return 0;
5028 	}
5029 
5030 	return release_extent_buffer(eb, mask);
5031 }
5032