xref: /linux/fs/btrfs/disk-io.c (revision e190bfe56841551b1ad5abb42ebd0c4798cc8c01)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "volumes.h"
37 #include "print-tree.h"
38 #include "async-thread.h"
39 #include "locking.h"
40 #include "tree-log.h"
41 #include "free-space-cache.h"
42 
43 static struct extent_io_ops btree_extent_io_ops;
44 static void end_workqueue_fn(struct btrfs_work *work);
45 static void free_fs_root(struct btrfs_root *root);
46 
47 /*
48  * end_io_wq structs are used to do processing in task context when an IO is
49  * complete.  This is used during reads to verify checksums, and it is used
50  * by writes to insert metadata for new file extents after IO is complete.
51  */
52 struct end_io_wq {
53 	struct bio *bio;
54 	bio_end_io_t *end_io;
55 	void *private;
56 	struct btrfs_fs_info *info;
57 	int error;
58 	int metadata;
59 	struct list_head list;
60 	struct btrfs_work work;
61 };
62 
63 /*
64  * async submit bios are used to offload expensive checksumming
65  * onto the worker threads.  They checksum file and metadata bios
66  * just before they are sent down the IO stack.
67  */
68 struct async_submit_bio {
69 	struct inode *inode;
70 	struct bio *bio;
71 	struct list_head list;
72 	extent_submit_bio_hook_t *submit_bio_start;
73 	extent_submit_bio_hook_t *submit_bio_done;
74 	int rw;
75 	int mirror_num;
76 	unsigned long bio_flags;
77 	/*
78 	 * bio_offset is optional, can be used if the pages in the bio
79 	 * can't tell us where in the file the bio should go
80 	 */
81 	u64 bio_offset;
82 	struct btrfs_work work;
83 };
84 
85 /* These are used to set the lockdep class on the extent buffer locks.
86  * The class is set by the readpage_end_io_hook after the buffer has
87  * passed csum validation but before the pages are unlocked.
88  *
89  * The lockdep class is also set by btrfs_init_new_buffer on freshly
90  * allocated blocks.
91  *
92  * The class is based on the level in the tree block, which allows lockdep
93  * to know that lower nodes nest inside the locks of higher nodes.
94  *
95  * We also add a check to make sure the highest level of the tree is
96  * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
97  * code needs update as well.
98  */
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
100 # if BTRFS_MAX_LEVEL != 8
101 #  error
102 # endif
103 static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
104 static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
105 	/* leaf */
106 	"btrfs-extent-00",
107 	"btrfs-extent-01",
108 	"btrfs-extent-02",
109 	"btrfs-extent-03",
110 	"btrfs-extent-04",
111 	"btrfs-extent-05",
112 	"btrfs-extent-06",
113 	"btrfs-extent-07",
114 	/* highest possible level */
115 	"btrfs-extent-08",
116 };
117 #endif
118 
119 /*
120  * extents on the btree inode are pretty simple, there's one extent
121  * that covers the entire device
122  */
123 static struct extent_map *btree_get_extent(struct inode *inode,
124 		struct page *page, size_t page_offset, u64 start, u64 len,
125 		int create)
126 {
127 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
128 	struct extent_map *em;
129 	int ret;
130 
131 	read_lock(&em_tree->lock);
132 	em = lookup_extent_mapping(em_tree, start, len);
133 	if (em) {
134 		em->bdev =
135 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
136 		read_unlock(&em_tree->lock);
137 		goto out;
138 	}
139 	read_unlock(&em_tree->lock);
140 
141 	em = alloc_extent_map(GFP_NOFS);
142 	if (!em) {
143 		em = ERR_PTR(-ENOMEM);
144 		goto out;
145 	}
146 	em->start = 0;
147 	em->len = (u64)-1;
148 	em->block_len = (u64)-1;
149 	em->block_start = 0;
150 	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
151 
152 	write_lock(&em_tree->lock);
153 	ret = add_extent_mapping(em_tree, em);
154 	if (ret == -EEXIST) {
155 		u64 failed_start = em->start;
156 		u64 failed_len = em->len;
157 
158 		free_extent_map(em);
159 		em = lookup_extent_mapping(em_tree, start, len);
160 		if (em) {
161 			ret = 0;
162 		} else {
163 			em = lookup_extent_mapping(em_tree, failed_start,
164 						   failed_len);
165 			ret = -EIO;
166 		}
167 	} else if (ret) {
168 		free_extent_map(em);
169 		em = NULL;
170 	}
171 	write_unlock(&em_tree->lock);
172 
173 	if (ret)
174 		em = ERR_PTR(ret);
175 out:
176 	return em;
177 }
178 
179 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
180 {
181 	return crc32c(seed, data, len);
182 }
183 
184 void btrfs_csum_final(u32 crc, char *result)
185 {
186 	*(__le32 *)result = ~cpu_to_le32(crc);
187 }
188 
189 /*
190  * compute the csum for a btree block, and either verify it or write it
191  * into the csum field of the block.
192  */
193 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
194 			   int verify)
195 {
196 	u16 csum_size =
197 		btrfs_super_csum_size(&root->fs_info->super_copy);
198 	char *result = NULL;
199 	unsigned long len;
200 	unsigned long cur_len;
201 	unsigned long offset = BTRFS_CSUM_SIZE;
202 	char *map_token = NULL;
203 	char *kaddr;
204 	unsigned long map_start;
205 	unsigned long map_len;
206 	int err;
207 	u32 crc = ~(u32)0;
208 	unsigned long inline_result;
209 
210 	len = buf->len - offset;
211 	while (len > 0) {
212 		err = map_private_extent_buffer(buf, offset, 32,
213 					&map_token, &kaddr,
214 					&map_start, &map_len, KM_USER0);
215 		if (err)
216 			return 1;
217 		cur_len = min(len, map_len - (offset - map_start));
218 		crc = btrfs_csum_data(root, kaddr + offset - map_start,
219 				      crc, cur_len);
220 		len -= cur_len;
221 		offset += cur_len;
222 		unmap_extent_buffer(buf, map_token, KM_USER0);
223 	}
224 	if (csum_size > sizeof(inline_result)) {
225 		result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
226 		if (!result)
227 			return 1;
228 	} else {
229 		result = (char *)&inline_result;
230 	}
231 
232 	btrfs_csum_final(crc, result);
233 
234 	if (verify) {
235 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
236 			u32 val;
237 			u32 found = 0;
238 			memcpy(&found, result, csum_size);
239 
240 			read_extent_buffer(buf, &val, 0, csum_size);
241 			if (printk_ratelimit()) {
242 				printk(KERN_INFO "btrfs: %s checksum verify "
243 				       "failed on %llu wanted %X found %X "
244 				       "level %d\n",
245 				       root->fs_info->sb->s_id,
246 				       (unsigned long long)buf->start, val, found,
247 				       btrfs_header_level(buf));
248 			}
249 			if (result != (char *)&inline_result)
250 				kfree(result);
251 			return 1;
252 		}
253 	} else {
254 		write_extent_buffer(buf, result, 0, csum_size);
255 	}
256 	if (result != (char *)&inline_result)
257 		kfree(result);
258 	return 0;
259 }
260 
261 /*
262  * we can't consider a given block up to date unless the transid of the
263  * block matches the transid in the parent node's pointer.  This is how we
264  * detect blocks that either didn't get written at all or got written
265  * in the wrong place.
266  */
267 static int verify_parent_transid(struct extent_io_tree *io_tree,
268 				 struct extent_buffer *eb, u64 parent_transid)
269 {
270 	struct extent_state *cached_state = NULL;
271 	int ret;
272 
273 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
274 		return 0;
275 
276 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
277 			 0, &cached_state, GFP_NOFS);
278 	if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
279 	    btrfs_header_generation(eb) == parent_transid) {
280 		ret = 0;
281 		goto out;
282 	}
283 	if (printk_ratelimit()) {
284 		printk("parent transid verify failed on %llu wanted %llu "
285 		       "found %llu\n",
286 		       (unsigned long long)eb->start,
287 		       (unsigned long long)parent_transid,
288 		       (unsigned long long)btrfs_header_generation(eb));
289 	}
290 	ret = 1;
291 	clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
292 out:
293 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
294 			     &cached_state, GFP_NOFS);
295 	return ret;
296 }
297 
298 /*
299  * helper to read a given tree block, doing retries as required when
300  * the checksums don't match and we have alternate mirrors to try.
301  */
302 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
303 					  struct extent_buffer *eb,
304 					  u64 start, u64 parent_transid)
305 {
306 	struct extent_io_tree *io_tree;
307 	int ret;
308 	int num_copies = 0;
309 	int mirror_num = 0;
310 
311 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
312 	while (1) {
313 		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
314 					       btree_get_extent, mirror_num);
315 		if (!ret &&
316 		    !verify_parent_transid(io_tree, eb, parent_transid))
317 			return ret;
318 
319 		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
320 					      eb->start, eb->len);
321 		if (num_copies == 1)
322 			return ret;
323 
324 		mirror_num++;
325 		if (mirror_num > num_copies)
326 			return ret;
327 	}
328 	return -EIO;
329 }
330 
331 /*
332  * checksum a dirty tree block before IO.  This has extra checks to make sure
333  * we only fill in the checksum field in the first page of a multi-page block
334  */
335 
336 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
337 {
338 	struct extent_io_tree *tree;
339 	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
340 	u64 found_start;
341 	int found_level;
342 	unsigned long len;
343 	struct extent_buffer *eb;
344 	int ret;
345 
346 	tree = &BTRFS_I(page->mapping->host)->io_tree;
347 
348 	if (page->private == EXTENT_PAGE_PRIVATE)
349 		goto out;
350 	if (!page->private)
351 		goto out;
352 	len = page->private >> 2;
353 	WARN_ON(len == 0);
354 
355 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
356 	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
357 					     btrfs_header_generation(eb));
358 	BUG_ON(ret);
359 	found_start = btrfs_header_bytenr(eb);
360 	if (found_start != start) {
361 		WARN_ON(1);
362 		goto err;
363 	}
364 	if (eb->first_page != page) {
365 		WARN_ON(1);
366 		goto err;
367 	}
368 	if (!PageUptodate(page)) {
369 		WARN_ON(1);
370 		goto err;
371 	}
372 	found_level = btrfs_header_level(eb);
373 
374 	csum_tree_block(root, eb, 0);
375 err:
376 	free_extent_buffer(eb);
377 out:
378 	return 0;
379 }
380 
381 static int check_tree_block_fsid(struct btrfs_root *root,
382 				 struct extent_buffer *eb)
383 {
384 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
385 	u8 fsid[BTRFS_UUID_SIZE];
386 	int ret = 1;
387 
388 	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
389 			   BTRFS_FSID_SIZE);
390 	while (fs_devices) {
391 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
392 			ret = 0;
393 			break;
394 		}
395 		fs_devices = fs_devices->seed;
396 	}
397 	return ret;
398 }
399 
400 #ifdef CONFIG_DEBUG_LOCK_ALLOC
401 void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
402 {
403 	lockdep_set_class_and_name(&eb->lock,
404 			   &btrfs_eb_class[level],
405 			   btrfs_eb_name[level]);
406 }
407 #endif
408 
409 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
410 			       struct extent_state *state)
411 {
412 	struct extent_io_tree *tree;
413 	u64 found_start;
414 	int found_level;
415 	unsigned long len;
416 	struct extent_buffer *eb;
417 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
418 	int ret = 0;
419 
420 	tree = &BTRFS_I(page->mapping->host)->io_tree;
421 	if (page->private == EXTENT_PAGE_PRIVATE)
422 		goto out;
423 	if (!page->private)
424 		goto out;
425 
426 	len = page->private >> 2;
427 	WARN_ON(len == 0);
428 
429 	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
430 
431 	found_start = btrfs_header_bytenr(eb);
432 	if (found_start != start) {
433 		if (printk_ratelimit()) {
434 			printk(KERN_INFO "btrfs bad tree block start "
435 			       "%llu %llu\n",
436 			       (unsigned long long)found_start,
437 			       (unsigned long long)eb->start);
438 		}
439 		ret = -EIO;
440 		goto err;
441 	}
442 	if (eb->first_page != page) {
443 		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
444 		       eb->first_page->index, page->index);
445 		WARN_ON(1);
446 		ret = -EIO;
447 		goto err;
448 	}
449 	if (check_tree_block_fsid(root, eb)) {
450 		if (printk_ratelimit()) {
451 			printk(KERN_INFO "btrfs bad fsid on block %llu\n",
452 			       (unsigned long long)eb->start);
453 		}
454 		ret = -EIO;
455 		goto err;
456 	}
457 	found_level = btrfs_header_level(eb);
458 
459 	btrfs_set_buffer_lockdep_class(eb, found_level);
460 
461 	ret = csum_tree_block(root, eb, 1);
462 	if (ret)
463 		ret = -EIO;
464 
465 	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
466 	end = eb->start + end - 1;
467 err:
468 	free_extent_buffer(eb);
469 out:
470 	return ret;
471 }
472 
473 static void end_workqueue_bio(struct bio *bio, int err)
474 {
475 	struct end_io_wq *end_io_wq = bio->bi_private;
476 	struct btrfs_fs_info *fs_info;
477 
478 	fs_info = end_io_wq->info;
479 	end_io_wq->error = err;
480 	end_io_wq->work.func = end_workqueue_fn;
481 	end_io_wq->work.flags = 0;
482 
483 	if (bio->bi_rw & (1 << BIO_RW)) {
484 		if (end_io_wq->metadata)
485 			btrfs_queue_worker(&fs_info->endio_meta_write_workers,
486 					   &end_io_wq->work);
487 		else
488 			btrfs_queue_worker(&fs_info->endio_write_workers,
489 					   &end_io_wq->work);
490 	} else {
491 		if (end_io_wq->metadata)
492 			btrfs_queue_worker(&fs_info->endio_meta_workers,
493 					   &end_io_wq->work);
494 		else
495 			btrfs_queue_worker(&fs_info->endio_workers,
496 					   &end_io_wq->work);
497 	}
498 }
499 
500 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
501 			int metadata)
502 {
503 	struct end_io_wq *end_io_wq;
504 	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
505 	if (!end_io_wq)
506 		return -ENOMEM;
507 
508 	end_io_wq->private = bio->bi_private;
509 	end_io_wq->end_io = bio->bi_end_io;
510 	end_io_wq->info = info;
511 	end_io_wq->error = 0;
512 	end_io_wq->bio = bio;
513 	end_io_wq->metadata = metadata;
514 
515 	bio->bi_private = end_io_wq;
516 	bio->bi_end_io = end_workqueue_bio;
517 	return 0;
518 }
519 
520 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
521 {
522 	unsigned long limit = min_t(unsigned long,
523 				    info->workers.max_workers,
524 				    info->fs_devices->open_devices);
525 	return 256 * limit;
526 }
527 
528 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
529 {
530 	return atomic_read(&info->nr_async_bios) >
531 		btrfs_async_submit_limit(info);
532 }
533 
534 static void run_one_async_start(struct btrfs_work *work)
535 {
536 	struct btrfs_fs_info *fs_info;
537 	struct async_submit_bio *async;
538 
539 	async = container_of(work, struct  async_submit_bio, work);
540 	fs_info = BTRFS_I(async->inode)->root->fs_info;
541 	async->submit_bio_start(async->inode, async->rw, async->bio,
542 			       async->mirror_num, async->bio_flags,
543 			       async->bio_offset);
544 }
545 
546 static void run_one_async_done(struct btrfs_work *work)
547 {
548 	struct btrfs_fs_info *fs_info;
549 	struct async_submit_bio *async;
550 	int limit;
551 
552 	async = container_of(work, struct  async_submit_bio, work);
553 	fs_info = BTRFS_I(async->inode)->root->fs_info;
554 
555 	limit = btrfs_async_submit_limit(fs_info);
556 	limit = limit * 2 / 3;
557 
558 	atomic_dec(&fs_info->nr_async_submits);
559 
560 	if (atomic_read(&fs_info->nr_async_submits) < limit &&
561 	    waitqueue_active(&fs_info->async_submit_wait))
562 		wake_up(&fs_info->async_submit_wait);
563 
564 	async->submit_bio_done(async->inode, async->rw, async->bio,
565 			       async->mirror_num, async->bio_flags,
566 			       async->bio_offset);
567 }
568 
569 static void run_one_async_free(struct btrfs_work *work)
570 {
571 	struct async_submit_bio *async;
572 
573 	async = container_of(work, struct  async_submit_bio, work);
574 	kfree(async);
575 }
576 
577 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
578 			int rw, struct bio *bio, int mirror_num,
579 			unsigned long bio_flags,
580 			u64 bio_offset,
581 			extent_submit_bio_hook_t *submit_bio_start,
582 			extent_submit_bio_hook_t *submit_bio_done)
583 {
584 	struct async_submit_bio *async;
585 
586 	async = kmalloc(sizeof(*async), GFP_NOFS);
587 	if (!async)
588 		return -ENOMEM;
589 
590 	async->inode = inode;
591 	async->rw = rw;
592 	async->bio = bio;
593 	async->mirror_num = mirror_num;
594 	async->submit_bio_start = submit_bio_start;
595 	async->submit_bio_done = submit_bio_done;
596 
597 	async->work.func = run_one_async_start;
598 	async->work.ordered_func = run_one_async_done;
599 	async->work.ordered_free = run_one_async_free;
600 
601 	async->work.flags = 0;
602 	async->bio_flags = bio_flags;
603 	async->bio_offset = bio_offset;
604 
605 	atomic_inc(&fs_info->nr_async_submits);
606 
607 	if (rw & (1 << BIO_RW_SYNCIO))
608 		btrfs_set_work_high_prio(&async->work);
609 
610 	btrfs_queue_worker(&fs_info->workers, &async->work);
611 
612 	while (atomic_read(&fs_info->async_submit_draining) &&
613 	      atomic_read(&fs_info->nr_async_submits)) {
614 		wait_event(fs_info->async_submit_wait,
615 			   (atomic_read(&fs_info->nr_async_submits) == 0));
616 	}
617 
618 	return 0;
619 }
620 
621 static int btree_csum_one_bio(struct bio *bio)
622 {
623 	struct bio_vec *bvec = bio->bi_io_vec;
624 	int bio_index = 0;
625 	struct btrfs_root *root;
626 
627 	WARN_ON(bio->bi_vcnt <= 0);
628 	while (bio_index < bio->bi_vcnt) {
629 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
630 		csum_dirty_buffer(root, bvec->bv_page);
631 		bio_index++;
632 		bvec++;
633 	}
634 	return 0;
635 }
636 
637 static int __btree_submit_bio_start(struct inode *inode, int rw,
638 				    struct bio *bio, int mirror_num,
639 				    unsigned long bio_flags,
640 				    u64 bio_offset)
641 {
642 	/*
643 	 * when we're called for a write, we're already in the async
644 	 * submission context.  Just jump into btrfs_map_bio
645 	 */
646 	btree_csum_one_bio(bio);
647 	return 0;
648 }
649 
650 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
651 				 int mirror_num, unsigned long bio_flags,
652 				 u64 bio_offset)
653 {
654 	/*
655 	 * when we're called for a write, we're already in the async
656 	 * submission context.  Just jump into btrfs_map_bio
657 	 */
658 	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
659 }
660 
661 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
662 				 int mirror_num, unsigned long bio_flags,
663 				 u64 bio_offset)
664 {
665 	int ret;
666 
667 	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
668 					  bio, 1);
669 	BUG_ON(ret);
670 
671 	if (!(rw & (1 << BIO_RW))) {
672 		/*
673 		 * called for a read, do the setup so that checksum validation
674 		 * can happen in the async kernel threads
675 		 */
676 		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
677 				     mirror_num, 0);
678 	}
679 
680 	/*
681 	 * kthread helpers are used to submit writes so that checksumming
682 	 * can happen in parallel across all CPUs
683 	 */
684 	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
685 				   inode, rw, bio, mirror_num, 0,
686 				   bio_offset,
687 				   __btree_submit_bio_start,
688 				   __btree_submit_bio_done);
689 }
690 
691 static int btree_writepage(struct page *page, struct writeback_control *wbc)
692 {
693 	struct extent_io_tree *tree;
694 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
695 	struct extent_buffer *eb;
696 	int was_dirty;
697 
698 	tree = &BTRFS_I(page->mapping->host)->io_tree;
699 	if (!(current->flags & PF_MEMALLOC)) {
700 		return extent_write_full_page(tree, page,
701 					      btree_get_extent, wbc);
702 	}
703 
704 	redirty_page_for_writepage(wbc, page);
705 	eb = btrfs_find_tree_block(root, page_offset(page),
706 				      PAGE_CACHE_SIZE);
707 	WARN_ON(!eb);
708 
709 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
710 	if (!was_dirty) {
711 		spin_lock(&root->fs_info->delalloc_lock);
712 		root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
713 		spin_unlock(&root->fs_info->delalloc_lock);
714 	}
715 	free_extent_buffer(eb);
716 
717 	unlock_page(page);
718 	return 0;
719 }
720 
721 static int btree_writepages(struct address_space *mapping,
722 			    struct writeback_control *wbc)
723 {
724 	struct extent_io_tree *tree;
725 	tree = &BTRFS_I(mapping->host)->io_tree;
726 	if (wbc->sync_mode == WB_SYNC_NONE) {
727 		struct btrfs_root *root = BTRFS_I(mapping->host)->root;
728 		u64 num_dirty;
729 		unsigned long thresh = 32 * 1024 * 1024;
730 
731 		if (wbc->for_kupdate)
732 			return 0;
733 
734 		/* this is a bit racy, but that's ok */
735 		num_dirty = root->fs_info->dirty_metadata_bytes;
736 		if (num_dirty < thresh)
737 			return 0;
738 	}
739 	return extent_writepages(tree, mapping, btree_get_extent, wbc);
740 }
741 
742 static int btree_readpage(struct file *file, struct page *page)
743 {
744 	struct extent_io_tree *tree;
745 	tree = &BTRFS_I(page->mapping->host)->io_tree;
746 	return extent_read_full_page(tree, page, btree_get_extent);
747 }
748 
749 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
750 {
751 	struct extent_io_tree *tree;
752 	struct extent_map_tree *map;
753 	int ret;
754 
755 	if (PageWriteback(page) || PageDirty(page))
756 		return 0;
757 
758 	tree = &BTRFS_I(page->mapping->host)->io_tree;
759 	map = &BTRFS_I(page->mapping->host)->extent_tree;
760 
761 	ret = try_release_extent_state(map, tree, page, gfp_flags);
762 	if (!ret)
763 		return 0;
764 
765 	ret = try_release_extent_buffer(tree, page);
766 	if (ret == 1) {
767 		ClearPagePrivate(page);
768 		set_page_private(page, 0);
769 		page_cache_release(page);
770 	}
771 
772 	return ret;
773 }
774 
775 static void btree_invalidatepage(struct page *page, unsigned long offset)
776 {
777 	struct extent_io_tree *tree;
778 	tree = &BTRFS_I(page->mapping->host)->io_tree;
779 	extent_invalidatepage(tree, page, offset);
780 	btree_releasepage(page, GFP_NOFS);
781 	if (PagePrivate(page)) {
782 		printk(KERN_WARNING "btrfs warning page private not zero "
783 		       "on page %llu\n", (unsigned long long)page_offset(page));
784 		ClearPagePrivate(page);
785 		set_page_private(page, 0);
786 		page_cache_release(page);
787 	}
788 }
789 
790 static const struct address_space_operations btree_aops = {
791 	.readpage	= btree_readpage,
792 	.writepage	= btree_writepage,
793 	.writepages	= btree_writepages,
794 	.releasepage	= btree_releasepage,
795 	.invalidatepage = btree_invalidatepage,
796 	.sync_page	= block_sync_page,
797 };
798 
799 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
800 			 u64 parent_transid)
801 {
802 	struct extent_buffer *buf = NULL;
803 	struct inode *btree_inode = root->fs_info->btree_inode;
804 	int ret = 0;
805 
806 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
807 	if (!buf)
808 		return 0;
809 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
810 				 buf, 0, 0, btree_get_extent, 0);
811 	free_extent_buffer(buf);
812 	return ret;
813 }
814 
815 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
816 					    u64 bytenr, u32 blocksize)
817 {
818 	struct inode *btree_inode = root->fs_info->btree_inode;
819 	struct extent_buffer *eb;
820 	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
821 				bytenr, blocksize, GFP_NOFS);
822 	return eb;
823 }
824 
825 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
826 						 u64 bytenr, u32 blocksize)
827 {
828 	struct inode *btree_inode = root->fs_info->btree_inode;
829 	struct extent_buffer *eb;
830 
831 	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
832 				 bytenr, blocksize, NULL, GFP_NOFS);
833 	return eb;
834 }
835 
836 
837 int btrfs_write_tree_block(struct extent_buffer *buf)
838 {
839 	return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
840 					buf->start + buf->len - 1);
841 }
842 
843 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
844 {
845 	return filemap_fdatawait_range(buf->first_page->mapping,
846 				       buf->start, buf->start + buf->len - 1);
847 }
848 
849 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
850 				      u32 blocksize, u64 parent_transid)
851 {
852 	struct extent_buffer *buf = NULL;
853 	struct inode *btree_inode = root->fs_info->btree_inode;
854 	struct extent_io_tree *io_tree;
855 	int ret;
856 
857 	io_tree = &BTRFS_I(btree_inode)->io_tree;
858 
859 	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
860 	if (!buf)
861 		return NULL;
862 
863 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
864 
865 	if (ret == 0)
866 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
867 	return buf;
868 
869 }
870 
871 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
872 		     struct extent_buffer *buf)
873 {
874 	struct inode *btree_inode = root->fs_info->btree_inode;
875 	if (btrfs_header_generation(buf) ==
876 	    root->fs_info->running_transaction->transid) {
877 		btrfs_assert_tree_locked(buf);
878 
879 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
880 			spin_lock(&root->fs_info->delalloc_lock);
881 			if (root->fs_info->dirty_metadata_bytes >= buf->len)
882 				root->fs_info->dirty_metadata_bytes -= buf->len;
883 			else
884 				WARN_ON(1);
885 			spin_unlock(&root->fs_info->delalloc_lock);
886 		}
887 
888 		/* ugh, clear_extent_buffer_dirty needs to lock the page */
889 		btrfs_set_lock_blocking(buf);
890 		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
891 					  buf);
892 	}
893 	return 0;
894 }
895 
896 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
897 			u32 stripesize, struct btrfs_root *root,
898 			struct btrfs_fs_info *fs_info,
899 			u64 objectid)
900 {
901 	root->node = NULL;
902 	root->commit_root = NULL;
903 	root->sectorsize = sectorsize;
904 	root->nodesize = nodesize;
905 	root->leafsize = leafsize;
906 	root->stripesize = stripesize;
907 	root->ref_cows = 0;
908 	root->track_dirty = 0;
909 	root->in_radix = 0;
910 	root->orphan_item_inserted = 0;
911 	root->orphan_cleanup_state = 0;
912 
913 	root->fs_info = fs_info;
914 	root->objectid = objectid;
915 	root->last_trans = 0;
916 	root->highest_objectid = 0;
917 	root->name = NULL;
918 	root->in_sysfs = 0;
919 	root->inode_tree = RB_ROOT;
920 	root->block_rsv = NULL;
921 	root->orphan_block_rsv = NULL;
922 
923 	INIT_LIST_HEAD(&root->dirty_list);
924 	INIT_LIST_HEAD(&root->orphan_list);
925 	INIT_LIST_HEAD(&root->root_list);
926 	spin_lock_init(&root->node_lock);
927 	spin_lock_init(&root->orphan_lock);
928 	spin_lock_init(&root->inode_lock);
929 	spin_lock_init(&root->accounting_lock);
930 	mutex_init(&root->objectid_mutex);
931 	mutex_init(&root->log_mutex);
932 	init_waitqueue_head(&root->log_writer_wait);
933 	init_waitqueue_head(&root->log_commit_wait[0]);
934 	init_waitqueue_head(&root->log_commit_wait[1]);
935 	atomic_set(&root->log_commit[0], 0);
936 	atomic_set(&root->log_commit[1], 0);
937 	atomic_set(&root->log_writers, 0);
938 	root->log_batch = 0;
939 	root->log_transid = 0;
940 	root->last_log_commit = 0;
941 	extent_io_tree_init(&root->dirty_log_pages,
942 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
943 
944 	memset(&root->root_key, 0, sizeof(root->root_key));
945 	memset(&root->root_item, 0, sizeof(root->root_item));
946 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
947 	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
948 	root->defrag_trans_start = fs_info->generation;
949 	init_completion(&root->kobj_unregister);
950 	root->defrag_running = 0;
951 	root->root_key.objectid = objectid;
952 	root->anon_super.s_root = NULL;
953 	root->anon_super.s_dev = 0;
954 	INIT_LIST_HEAD(&root->anon_super.s_list);
955 	INIT_LIST_HEAD(&root->anon_super.s_instances);
956 	init_rwsem(&root->anon_super.s_umount);
957 
958 	return 0;
959 }
960 
961 static int find_and_setup_root(struct btrfs_root *tree_root,
962 			       struct btrfs_fs_info *fs_info,
963 			       u64 objectid,
964 			       struct btrfs_root *root)
965 {
966 	int ret;
967 	u32 blocksize;
968 	u64 generation;
969 
970 	__setup_root(tree_root->nodesize, tree_root->leafsize,
971 		     tree_root->sectorsize, tree_root->stripesize,
972 		     root, fs_info, objectid);
973 	ret = btrfs_find_last_root(tree_root, objectid,
974 				   &root->root_item, &root->root_key);
975 	if (ret > 0)
976 		return -ENOENT;
977 	BUG_ON(ret);
978 
979 	generation = btrfs_root_generation(&root->root_item);
980 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
981 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
982 				     blocksize, generation);
983 	BUG_ON(!root->node);
984 	root->commit_root = btrfs_root_node(root);
985 	return 0;
986 }
987 
988 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
989 					 struct btrfs_fs_info *fs_info)
990 {
991 	struct btrfs_root *root;
992 	struct btrfs_root *tree_root = fs_info->tree_root;
993 	struct extent_buffer *leaf;
994 
995 	root = kzalloc(sizeof(*root), GFP_NOFS);
996 	if (!root)
997 		return ERR_PTR(-ENOMEM);
998 
999 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1000 		     tree_root->sectorsize, tree_root->stripesize,
1001 		     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1002 
1003 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1004 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1005 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1006 	/*
1007 	 * log trees do not get reference counted because they go away
1008 	 * before a real commit is actually done.  They do store pointers
1009 	 * to file data extents, and those reference counts still get
1010 	 * updated (along with back refs to the log tree).
1011 	 */
1012 	root->ref_cows = 0;
1013 
1014 	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1015 				      BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1016 	if (IS_ERR(leaf)) {
1017 		kfree(root);
1018 		return ERR_CAST(leaf);
1019 	}
1020 
1021 	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1022 	btrfs_set_header_bytenr(leaf, leaf->start);
1023 	btrfs_set_header_generation(leaf, trans->transid);
1024 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1025 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1026 	root->node = leaf;
1027 
1028 	write_extent_buffer(root->node, root->fs_info->fsid,
1029 			    (unsigned long)btrfs_header_fsid(root->node),
1030 			    BTRFS_FSID_SIZE);
1031 	btrfs_mark_buffer_dirty(root->node);
1032 	btrfs_tree_unlock(root->node);
1033 	return root;
1034 }
1035 
1036 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1037 			     struct btrfs_fs_info *fs_info)
1038 {
1039 	struct btrfs_root *log_root;
1040 
1041 	log_root = alloc_log_tree(trans, fs_info);
1042 	if (IS_ERR(log_root))
1043 		return PTR_ERR(log_root);
1044 	WARN_ON(fs_info->log_root_tree);
1045 	fs_info->log_root_tree = log_root;
1046 	return 0;
1047 }
1048 
1049 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1050 		       struct btrfs_root *root)
1051 {
1052 	struct btrfs_root *log_root;
1053 	struct btrfs_inode_item *inode_item;
1054 
1055 	log_root = alloc_log_tree(trans, root->fs_info);
1056 	if (IS_ERR(log_root))
1057 		return PTR_ERR(log_root);
1058 
1059 	log_root->last_trans = trans->transid;
1060 	log_root->root_key.offset = root->root_key.objectid;
1061 
1062 	inode_item = &log_root->root_item.inode;
1063 	inode_item->generation = cpu_to_le64(1);
1064 	inode_item->size = cpu_to_le64(3);
1065 	inode_item->nlink = cpu_to_le32(1);
1066 	inode_item->nbytes = cpu_to_le64(root->leafsize);
1067 	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1068 
1069 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1070 
1071 	WARN_ON(root->log_root);
1072 	root->log_root = log_root;
1073 	root->log_transid = 0;
1074 	root->last_log_commit = 0;
1075 	return 0;
1076 }
1077 
1078 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1079 					       struct btrfs_key *location)
1080 {
1081 	struct btrfs_root *root;
1082 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1083 	struct btrfs_path *path;
1084 	struct extent_buffer *l;
1085 	u64 generation;
1086 	u32 blocksize;
1087 	int ret = 0;
1088 
1089 	root = kzalloc(sizeof(*root), GFP_NOFS);
1090 	if (!root)
1091 		return ERR_PTR(-ENOMEM);
1092 	if (location->offset == (u64)-1) {
1093 		ret = find_and_setup_root(tree_root, fs_info,
1094 					  location->objectid, root);
1095 		if (ret) {
1096 			kfree(root);
1097 			return ERR_PTR(ret);
1098 		}
1099 		goto out;
1100 	}
1101 
1102 	__setup_root(tree_root->nodesize, tree_root->leafsize,
1103 		     tree_root->sectorsize, tree_root->stripesize,
1104 		     root, fs_info, location->objectid);
1105 
1106 	path = btrfs_alloc_path();
1107 	BUG_ON(!path);
1108 	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1109 	if (ret == 0) {
1110 		l = path->nodes[0];
1111 		read_extent_buffer(l, &root->root_item,
1112 				btrfs_item_ptr_offset(l, path->slots[0]),
1113 				sizeof(root->root_item));
1114 		memcpy(&root->root_key, location, sizeof(*location));
1115 	}
1116 	btrfs_free_path(path);
1117 	if (ret) {
1118 		if (ret > 0)
1119 			ret = -ENOENT;
1120 		return ERR_PTR(ret);
1121 	}
1122 
1123 	generation = btrfs_root_generation(&root->root_item);
1124 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1125 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1126 				     blocksize, generation);
1127 	root->commit_root = btrfs_root_node(root);
1128 	BUG_ON(!root->node);
1129 out:
1130 	if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
1131 		root->ref_cows = 1;
1132 
1133 	return root;
1134 }
1135 
1136 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1137 					u64 root_objectid)
1138 {
1139 	struct btrfs_root *root;
1140 
1141 	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1142 		return fs_info->tree_root;
1143 	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1144 		return fs_info->extent_root;
1145 
1146 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1147 				 (unsigned long)root_objectid);
1148 	return root;
1149 }
1150 
1151 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1152 					      struct btrfs_key *location)
1153 {
1154 	struct btrfs_root *root;
1155 	int ret;
1156 
1157 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1158 		return fs_info->tree_root;
1159 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1160 		return fs_info->extent_root;
1161 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1162 		return fs_info->chunk_root;
1163 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1164 		return fs_info->dev_root;
1165 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1166 		return fs_info->csum_root;
1167 again:
1168 	spin_lock(&fs_info->fs_roots_radix_lock);
1169 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1170 				 (unsigned long)location->objectid);
1171 	spin_unlock(&fs_info->fs_roots_radix_lock);
1172 	if (root)
1173 		return root;
1174 
1175 	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1176 	if (IS_ERR(root))
1177 		return root;
1178 
1179 	set_anon_super(&root->anon_super, NULL);
1180 
1181 	if (btrfs_root_refs(&root->root_item) == 0) {
1182 		ret = -ENOENT;
1183 		goto fail;
1184 	}
1185 
1186 	ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1187 	if (ret < 0)
1188 		goto fail;
1189 	if (ret == 0)
1190 		root->orphan_item_inserted = 1;
1191 
1192 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1193 	if (ret)
1194 		goto fail;
1195 
1196 	spin_lock(&fs_info->fs_roots_radix_lock);
1197 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1198 				(unsigned long)root->root_key.objectid,
1199 				root);
1200 	if (ret == 0)
1201 		root->in_radix = 1;
1202 
1203 	spin_unlock(&fs_info->fs_roots_radix_lock);
1204 	radix_tree_preload_end();
1205 	if (ret) {
1206 		if (ret == -EEXIST) {
1207 			free_fs_root(root);
1208 			goto again;
1209 		}
1210 		goto fail;
1211 	}
1212 
1213 	ret = btrfs_find_dead_roots(fs_info->tree_root,
1214 				    root->root_key.objectid);
1215 	WARN_ON(ret);
1216 	return root;
1217 fail:
1218 	free_fs_root(root);
1219 	return ERR_PTR(ret);
1220 }
1221 
1222 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1223 				      struct btrfs_key *location,
1224 				      const char *name, int namelen)
1225 {
1226 	return btrfs_read_fs_root_no_name(fs_info, location);
1227 #if 0
1228 	struct btrfs_root *root;
1229 	int ret;
1230 
1231 	root = btrfs_read_fs_root_no_name(fs_info, location);
1232 	if (!root)
1233 		return NULL;
1234 
1235 	if (root->in_sysfs)
1236 		return root;
1237 
1238 	ret = btrfs_set_root_name(root, name, namelen);
1239 	if (ret) {
1240 		free_extent_buffer(root->node);
1241 		kfree(root);
1242 		return ERR_PTR(ret);
1243 	}
1244 
1245 	ret = btrfs_sysfs_add_root(root);
1246 	if (ret) {
1247 		free_extent_buffer(root->node);
1248 		kfree(root->name);
1249 		kfree(root);
1250 		return ERR_PTR(ret);
1251 	}
1252 	root->in_sysfs = 1;
1253 	return root;
1254 #endif
1255 }
1256 
1257 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1258 {
1259 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1260 	int ret = 0;
1261 	struct btrfs_device *device;
1262 	struct backing_dev_info *bdi;
1263 
1264 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1265 		if (!device->bdev)
1266 			continue;
1267 		bdi = blk_get_backing_dev_info(device->bdev);
1268 		if (bdi && bdi_congested(bdi, bdi_bits)) {
1269 			ret = 1;
1270 			break;
1271 		}
1272 	}
1273 	return ret;
1274 }
1275 
1276 /*
1277  * this unplugs every device on the box, and it is only used when page
1278  * is null
1279  */
1280 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1281 {
1282 	struct btrfs_device *device;
1283 	struct btrfs_fs_info *info;
1284 
1285 	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1286 	list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1287 		if (!device->bdev)
1288 			continue;
1289 
1290 		bdi = blk_get_backing_dev_info(device->bdev);
1291 		if (bdi->unplug_io_fn)
1292 			bdi->unplug_io_fn(bdi, page);
1293 	}
1294 }
1295 
1296 static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1297 {
1298 	struct inode *inode;
1299 	struct extent_map_tree *em_tree;
1300 	struct extent_map *em;
1301 	struct address_space *mapping;
1302 	u64 offset;
1303 
1304 	/* the generic O_DIRECT read code does this */
1305 	if (1 || !page) {
1306 		__unplug_io_fn(bdi, page);
1307 		return;
1308 	}
1309 
1310 	/*
1311 	 * page->mapping may change at any time.  Get a consistent copy
1312 	 * and use that for everything below
1313 	 */
1314 	smp_mb();
1315 	mapping = page->mapping;
1316 	if (!mapping)
1317 		return;
1318 
1319 	inode = mapping->host;
1320 
1321 	/*
1322 	 * don't do the expensive searching for a small number of
1323 	 * devices
1324 	 */
1325 	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1326 		__unplug_io_fn(bdi, page);
1327 		return;
1328 	}
1329 
1330 	offset = page_offset(page);
1331 
1332 	em_tree = &BTRFS_I(inode)->extent_tree;
1333 	read_lock(&em_tree->lock);
1334 	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1335 	read_unlock(&em_tree->lock);
1336 	if (!em) {
1337 		__unplug_io_fn(bdi, page);
1338 		return;
1339 	}
1340 
1341 	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1342 		free_extent_map(em);
1343 		__unplug_io_fn(bdi, page);
1344 		return;
1345 	}
1346 	offset = offset - em->start;
1347 	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1348 			  em->block_start + offset, page);
1349 	free_extent_map(em);
1350 }
1351 
1352 /*
1353  * If this fails, caller must call bdi_destroy() to get rid of the
1354  * bdi again.
1355  */
1356 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1357 {
1358 	int err;
1359 
1360 	bdi->capabilities = BDI_CAP_MAP_COPY;
1361 	err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1362 	if (err)
1363 		return err;
1364 
1365 	bdi->ra_pages	= default_backing_dev_info.ra_pages;
1366 	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
1367 	bdi->unplug_io_data	= info;
1368 	bdi->congested_fn	= btrfs_congested_fn;
1369 	bdi->congested_data	= info;
1370 	return 0;
1371 }
1372 
1373 static int bio_ready_for_csum(struct bio *bio)
1374 {
1375 	u64 length = 0;
1376 	u64 buf_len = 0;
1377 	u64 start = 0;
1378 	struct page *page;
1379 	struct extent_io_tree *io_tree = NULL;
1380 	struct btrfs_fs_info *info = NULL;
1381 	struct bio_vec *bvec;
1382 	int i;
1383 	int ret;
1384 
1385 	bio_for_each_segment(bvec, bio, i) {
1386 		page = bvec->bv_page;
1387 		if (page->private == EXTENT_PAGE_PRIVATE) {
1388 			length += bvec->bv_len;
1389 			continue;
1390 		}
1391 		if (!page->private) {
1392 			length += bvec->bv_len;
1393 			continue;
1394 		}
1395 		length = bvec->bv_len;
1396 		buf_len = page->private >> 2;
1397 		start = page_offset(page) + bvec->bv_offset;
1398 		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1399 		info = BTRFS_I(page->mapping->host)->root->fs_info;
1400 	}
1401 	/* are we fully contained in this bio? */
1402 	if (buf_len <= length)
1403 		return 1;
1404 
1405 	ret = extent_range_uptodate(io_tree, start + length,
1406 				    start + buf_len - 1);
1407 	return ret;
1408 }
1409 
1410 /*
1411  * called by the kthread helper functions to finally call the bio end_io
1412  * functions.  This is where read checksum verification actually happens
1413  */
1414 static void end_workqueue_fn(struct btrfs_work *work)
1415 {
1416 	struct bio *bio;
1417 	struct end_io_wq *end_io_wq;
1418 	struct btrfs_fs_info *fs_info;
1419 	int error;
1420 
1421 	end_io_wq = container_of(work, struct end_io_wq, work);
1422 	bio = end_io_wq->bio;
1423 	fs_info = end_io_wq->info;
1424 
1425 	/* metadata bio reads are special because the whole tree block must
1426 	 * be checksummed at once.  This makes sure the entire block is in
1427 	 * ram and up to date before trying to verify things.  For
1428 	 * blocksize <= pagesize, it is basically a noop
1429 	 */
1430 	if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1431 	    !bio_ready_for_csum(bio)) {
1432 		btrfs_queue_worker(&fs_info->endio_meta_workers,
1433 				   &end_io_wq->work);
1434 		return;
1435 	}
1436 	error = end_io_wq->error;
1437 	bio->bi_private = end_io_wq->private;
1438 	bio->bi_end_io = end_io_wq->end_io;
1439 	kfree(end_io_wq);
1440 	bio_endio(bio, error);
1441 }
1442 
1443 static int cleaner_kthread(void *arg)
1444 {
1445 	struct btrfs_root *root = arg;
1446 
1447 	do {
1448 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1449 
1450 		if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1451 		    mutex_trylock(&root->fs_info->cleaner_mutex)) {
1452 			btrfs_run_delayed_iputs(root);
1453 			btrfs_clean_old_snapshots(root);
1454 			mutex_unlock(&root->fs_info->cleaner_mutex);
1455 		}
1456 
1457 		if (freezing(current)) {
1458 			refrigerator();
1459 		} else {
1460 			set_current_state(TASK_INTERRUPTIBLE);
1461 			if (!kthread_should_stop())
1462 				schedule();
1463 			__set_current_state(TASK_RUNNING);
1464 		}
1465 	} while (!kthread_should_stop());
1466 	return 0;
1467 }
1468 
1469 static int transaction_kthread(void *arg)
1470 {
1471 	struct btrfs_root *root = arg;
1472 	struct btrfs_trans_handle *trans;
1473 	struct btrfs_transaction *cur;
1474 	u64 transid;
1475 	unsigned long now;
1476 	unsigned long delay;
1477 	int ret;
1478 
1479 	do {
1480 		delay = HZ * 30;
1481 		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1482 		mutex_lock(&root->fs_info->transaction_kthread_mutex);
1483 
1484 		spin_lock(&root->fs_info->new_trans_lock);
1485 		cur = root->fs_info->running_transaction;
1486 		if (!cur) {
1487 			spin_unlock(&root->fs_info->new_trans_lock);
1488 			goto sleep;
1489 		}
1490 
1491 		now = get_seconds();
1492 		if (!cur->blocked &&
1493 		    (now < cur->start_time || now - cur->start_time < 30)) {
1494 			spin_unlock(&root->fs_info->new_trans_lock);
1495 			delay = HZ * 5;
1496 			goto sleep;
1497 		}
1498 		transid = cur->transid;
1499 		spin_unlock(&root->fs_info->new_trans_lock);
1500 
1501 		trans = btrfs_join_transaction(root, 1);
1502 		if (transid == trans->transid) {
1503 			ret = btrfs_commit_transaction(trans, root);
1504 			BUG_ON(ret);
1505 		} else {
1506 			btrfs_end_transaction(trans, root);
1507 		}
1508 sleep:
1509 		wake_up_process(root->fs_info->cleaner_kthread);
1510 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1511 
1512 		if (freezing(current)) {
1513 			refrigerator();
1514 		} else {
1515 			set_current_state(TASK_INTERRUPTIBLE);
1516 			if (!kthread_should_stop() &&
1517 			    !btrfs_transaction_blocked(root->fs_info))
1518 				schedule_timeout(delay);
1519 			__set_current_state(TASK_RUNNING);
1520 		}
1521 	} while (!kthread_should_stop());
1522 	return 0;
1523 }
1524 
1525 struct btrfs_root *open_ctree(struct super_block *sb,
1526 			      struct btrfs_fs_devices *fs_devices,
1527 			      char *options)
1528 {
1529 	u32 sectorsize;
1530 	u32 nodesize;
1531 	u32 leafsize;
1532 	u32 blocksize;
1533 	u32 stripesize;
1534 	u64 generation;
1535 	u64 features;
1536 	struct btrfs_key location;
1537 	struct buffer_head *bh;
1538 	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1539 						 GFP_NOFS);
1540 	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1541 						 GFP_NOFS);
1542 	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1543 					       GFP_NOFS);
1544 	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1545 						GFP_NOFS);
1546 	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1547 						GFP_NOFS);
1548 	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1549 					      GFP_NOFS);
1550 	struct btrfs_root *log_tree_root;
1551 
1552 	int ret;
1553 	int err = -EINVAL;
1554 
1555 	struct btrfs_super_block *disk_super;
1556 
1557 	if (!extent_root || !tree_root || !fs_info ||
1558 	    !chunk_root || !dev_root || !csum_root) {
1559 		err = -ENOMEM;
1560 		goto fail;
1561 	}
1562 
1563 	ret = init_srcu_struct(&fs_info->subvol_srcu);
1564 	if (ret) {
1565 		err = ret;
1566 		goto fail;
1567 	}
1568 
1569 	ret = setup_bdi(fs_info, &fs_info->bdi);
1570 	if (ret) {
1571 		err = ret;
1572 		goto fail_srcu;
1573 	}
1574 
1575 	fs_info->btree_inode = new_inode(sb);
1576 	if (!fs_info->btree_inode) {
1577 		err = -ENOMEM;
1578 		goto fail_bdi;
1579 	}
1580 
1581 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1582 	INIT_LIST_HEAD(&fs_info->trans_list);
1583 	INIT_LIST_HEAD(&fs_info->dead_roots);
1584 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
1585 	INIT_LIST_HEAD(&fs_info->hashers);
1586 	INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1587 	INIT_LIST_HEAD(&fs_info->ordered_operations);
1588 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
1589 	spin_lock_init(&fs_info->delalloc_lock);
1590 	spin_lock_init(&fs_info->new_trans_lock);
1591 	spin_lock_init(&fs_info->ref_cache_lock);
1592 	spin_lock_init(&fs_info->fs_roots_radix_lock);
1593 	spin_lock_init(&fs_info->delayed_iput_lock);
1594 
1595 	init_completion(&fs_info->kobj_unregister);
1596 	fs_info->tree_root = tree_root;
1597 	fs_info->extent_root = extent_root;
1598 	fs_info->csum_root = csum_root;
1599 	fs_info->chunk_root = chunk_root;
1600 	fs_info->dev_root = dev_root;
1601 	fs_info->fs_devices = fs_devices;
1602 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1603 	INIT_LIST_HEAD(&fs_info->space_info);
1604 	btrfs_mapping_init(&fs_info->mapping_tree);
1605 	btrfs_init_block_rsv(&fs_info->global_block_rsv);
1606 	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1607 	btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1608 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1609 	btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1610 	INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
1611 	mutex_init(&fs_info->durable_block_rsv_mutex);
1612 	atomic_set(&fs_info->nr_async_submits, 0);
1613 	atomic_set(&fs_info->async_delalloc_pages, 0);
1614 	atomic_set(&fs_info->async_submit_draining, 0);
1615 	atomic_set(&fs_info->nr_async_bios, 0);
1616 	fs_info->sb = sb;
1617 	fs_info->max_inline = 8192 * 1024;
1618 	fs_info->metadata_ratio = 0;
1619 
1620 	fs_info->thread_pool_size = min_t(unsigned long,
1621 					  num_online_cpus() + 2, 8);
1622 
1623 	INIT_LIST_HEAD(&fs_info->ordered_extents);
1624 	spin_lock_init(&fs_info->ordered_extent_lock);
1625 
1626 	sb->s_blocksize = 4096;
1627 	sb->s_blocksize_bits = blksize_bits(4096);
1628 	sb->s_bdi = &fs_info->bdi;
1629 
1630 	fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1631 	fs_info->btree_inode->i_nlink = 1;
1632 	/*
1633 	 * we set the i_size on the btree inode to the max possible int.
1634 	 * the real end of the address space is determined by all of
1635 	 * the devices in the system
1636 	 */
1637 	fs_info->btree_inode->i_size = OFFSET_MAX;
1638 	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1639 	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1640 
1641 	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1642 	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1643 			     fs_info->btree_inode->i_mapping,
1644 			     GFP_NOFS);
1645 	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1646 			     GFP_NOFS);
1647 
1648 	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1649 
1650 	BTRFS_I(fs_info->btree_inode)->root = tree_root;
1651 	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1652 	       sizeof(struct btrfs_key));
1653 	BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1654 	insert_inode_hash(fs_info->btree_inode);
1655 
1656 	spin_lock_init(&fs_info->block_group_cache_lock);
1657 	fs_info->block_group_cache_tree = RB_ROOT;
1658 
1659 	extent_io_tree_init(&fs_info->freed_extents[0],
1660 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1661 	extent_io_tree_init(&fs_info->freed_extents[1],
1662 			     fs_info->btree_inode->i_mapping, GFP_NOFS);
1663 	fs_info->pinned_extents = &fs_info->freed_extents[0];
1664 	fs_info->do_barriers = 1;
1665 
1666 
1667 	mutex_init(&fs_info->trans_mutex);
1668 	mutex_init(&fs_info->ordered_operations_mutex);
1669 	mutex_init(&fs_info->tree_log_mutex);
1670 	mutex_init(&fs_info->chunk_mutex);
1671 	mutex_init(&fs_info->transaction_kthread_mutex);
1672 	mutex_init(&fs_info->cleaner_mutex);
1673 	mutex_init(&fs_info->volume_mutex);
1674 	init_rwsem(&fs_info->extent_commit_sem);
1675 	init_rwsem(&fs_info->cleanup_work_sem);
1676 	init_rwsem(&fs_info->subvol_sem);
1677 
1678 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1679 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1680 
1681 	init_waitqueue_head(&fs_info->transaction_throttle);
1682 	init_waitqueue_head(&fs_info->transaction_wait);
1683 	init_waitqueue_head(&fs_info->async_submit_wait);
1684 
1685 	__setup_root(4096, 4096, 4096, 4096, tree_root,
1686 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
1687 
1688 
1689 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1690 	if (!bh)
1691 		goto fail_iput;
1692 
1693 	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1694 	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1695 	       sizeof(fs_info->super_for_commit));
1696 	brelse(bh);
1697 
1698 	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1699 
1700 	disk_super = &fs_info->super_copy;
1701 	if (!btrfs_super_root(disk_super))
1702 		goto fail_iput;
1703 
1704 	ret = btrfs_parse_options(tree_root, options);
1705 	if (ret) {
1706 		err = ret;
1707 		goto fail_iput;
1708 	}
1709 
1710 	features = btrfs_super_incompat_flags(disk_super) &
1711 		~BTRFS_FEATURE_INCOMPAT_SUPP;
1712 	if (features) {
1713 		printk(KERN_ERR "BTRFS: couldn't mount because of "
1714 		       "unsupported optional features (%Lx).\n",
1715 		       (unsigned long long)features);
1716 		err = -EINVAL;
1717 		goto fail_iput;
1718 	}
1719 
1720 	features = btrfs_super_incompat_flags(disk_super);
1721 	if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) {
1722 		features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1723 		btrfs_set_super_incompat_flags(disk_super, features);
1724 	}
1725 
1726 	features = btrfs_super_compat_ro_flags(disk_super) &
1727 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
1728 	if (!(sb->s_flags & MS_RDONLY) && features) {
1729 		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1730 		       "unsupported option features (%Lx).\n",
1731 		       (unsigned long long)features);
1732 		err = -EINVAL;
1733 		goto fail_iput;
1734 	}
1735 
1736 	btrfs_init_workers(&fs_info->generic_worker,
1737 			   "genwork", 1, NULL);
1738 
1739 	btrfs_init_workers(&fs_info->workers, "worker",
1740 			   fs_info->thread_pool_size,
1741 			   &fs_info->generic_worker);
1742 
1743 	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1744 			   fs_info->thread_pool_size,
1745 			   &fs_info->generic_worker);
1746 
1747 	btrfs_init_workers(&fs_info->submit_workers, "submit",
1748 			   min_t(u64, fs_devices->num_devices,
1749 			   fs_info->thread_pool_size),
1750 			   &fs_info->generic_worker);
1751 
1752 	/* a higher idle thresh on the submit workers makes it much more
1753 	 * likely that bios will be send down in a sane order to the
1754 	 * devices
1755 	 */
1756 	fs_info->submit_workers.idle_thresh = 64;
1757 
1758 	fs_info->workers.idle_thresh = 16;
1759 	fs_info->workers.ordered = 1;
1760 
1761 	fs_info->delalloc_workers.idle_thresh = 2;
1762 	fs_info->delalloc_workers.ordered = 1;
1763 
1764 	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1765 			   &fs_info->generic_worker);
1766 	btrfs_init_workers(&fs_info->endio_workers, "endio",
1767 			   fs_info->thread_pool_size,
1768 			   &fs_info->generic_worker);
1769 	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1770 			   fs_info->thread_pool_size,
1771 			   &fs_info->generic_worker);
1772 	btrfs_init_workers(&fs_info->endio_meta_write_workers,
1773 			   "endio-meta-write", fs_info->thread_pool_size,
1774 			   &fs_info->generic_worker);
1775 	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1776 			   fs_info->thread_pool_size,
1777 			   &fs_info->generic_worker);
1778 
1779 	/*
1780 	 * endios are largely parallel and should have a very
1781 	 * low idle thresh
1782 	 */
1783 	fs_info->endio_workers.idle_thresh = 4;
1784 	fs_info->endio_meta_workers.idle_thresh = 4;
1785 
1786 	fs_info->endio_write_workers.idle_thresh = 2;
1787 	fs_info->endio_meta_write_workers.idle_thresh = 2;
1788 
1789 	btrfs_start_workers(&fs_info->workers, 1);
1790 	btrfs_start_workers(&fs_info->generic_worker, 1);
1791 	btrfs_start_workers(&fs_info->submit_workers, 1);
1792 	btrfs_start_workers(&fs_info->delalloc_workers, 1);
1793 	btrfs_start_workers(&fs_info->fixup_workers, 1);
1794 	btrfs_start_workers(&fs_info->endio_workers, 1);
1795 	btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1796 	btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1797 	btrfs_start_workers(&fs_info->endio_write_workers, 1);
1798 
1799 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1800 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1801 				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1802 
1803 	nodesize = btrfs_super_nodesize(disk_super);
1804 	leafsize = btrfs_super_leafsize(disk_super);
1805 	sectorsize = btrfs_super_sectorsize(disk_super);
1806 	stripesize = btrfs_super_stripesize(disk_super);
1807 	tree_root->nodesize = nodesize;
1808 	tree_root->leafsize = leafsize;
1809 	tree_root->sectorsize = sectorsize;
1810 	tree_root->stripesize = stripesize;
1811 
1812 	sb->s_blocksize = sectorsize;
1813 	sb->s_blocksize_bits = blksize_bits(sectorsize);
1814 
1815 	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1816 		    sizeof(disk_super->magic))) {
1817 		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1818 		goto fail_sb_buffer;
1819 	}
1820 
1821 	mutex_lock(&fs_info->chunk_mutex);
1822 	ret = btrfs_read_sys_array(tree_root);
1823 	mutex_unlock(&fs_info->chunk_mutex);
1824 	if (ret) {
1825 		printk(KERN_WARNING "btrfs: failed to read the system "
1826 		       "array on %s\n", sb->s_id);
1827 		goto fail_sb_buffer;
1828 	}
1829 
1830 	blocksize = btrfs_level_size(tree_root,
1831 				     btrfs_super_chunk_root_level(disk_super));
1832 	generation = btrfs_super_chunk_root_generation(disk_super);
1833 
1834 	__setup_root(nodesize, leafsize, sectorsize, stripesize,
1835 		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1836 
1837 	chunk_root->node = read_tree_block(chunk_root,
1838 					   btrfs_super_chunk_root(disk_super),
1839 					   blocksize, generation);
1840 	BUG_ON(!chunk_root->node);
1841 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1842 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1843 		       sb->s_id);
1844 		goto fail_chunk_root;
1845 	}
1846 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1847 	chunk_root->commit_root = btrfs_root_node(chunk_root);
1848 
1849 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1850 	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1851 	   BTRFS_UUID_SIZE);
1852 
1853 	mutex_lock(&fs_info->chunk_mutex);
1854 	ret = btrfs_read_chunk_tree(chunk_root);
1855 	mutex_unlock(&fs_info->chunk_mutex);
1856 	if (ret) {
1857 		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1858 		       sb->s_id);
1859 		goto fail_chunk_root;
1860 	}
1861 
1862 	btrfs_close_extra_devices(fs_devices);
1863 
1864 	blocksize = btrfs_level_size(tree_root,
1865 				     btrfs_super_root_level(disk_super));
1866 	generation = btrfs_super_generation(disk_super);
1867 
1868 	tree_root->node = read_tree_block(tree_root,
1869 					  btrfs_super_root(disk_super),
1870 					  blocksize, generation);
1871 	if (!tree_root->node)
1872 		goto fail_chunk_root;
1873 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1874 		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1875 		       sb->s_id);
1876 		goto fail_tree_root;
1877 	}
1878 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1879 	tree_root->commit_root = btrfs_root_node(tree_root);
1880 
1881 	ret = find_and_setup_root(tree_root, fs_info,
1882 				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1883 	if (ret)
1884 		goto fail_tree_root;
1885 	extent_root->track_dirty = 1;
1886 
1887 	ret = find_and_setup_root(tree_root, fs_info,
1888 				  BTRFS_DEV_TREE_OBJECTID, dev_root);
1889 	if (ret)
1890 		goto fail_extent_root;
1891 	dev_root->track_dirty = 1;
1892 
1893 	ret = find_and_setup_root(tree_root, fs_info,
1894 				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
1895 	if (ret)
1896 		goto fail_dev_root;
1897 
1898 	csum_root->track_dirty = 1;
1899 
1900 	fs_info->generation = generation;
1901 	fs_info->last_trans_committed = generation;
1902 	fs_info->data_alloc_profile = (u64)-1;
1903 	fs_info->metadata_alloc_profile = (u64)-1;
1904 	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1905 
1906 	ret = btrfs_read_block_groups(extent_root);
1907 	if (ret) {
1908 		printk(KERN_ERR "Failed to read block groups: %d\n", ret);
1909 		goto fail_block_groups;
1910 	}
1911 
1912 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1913 					       "btrfs-cleaner");
1914 	if (IS_ERR(fs_info->cleaner_kthread))
1915 		goto fail_block_groups;
1916 
1917 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
1918 						   tree_root,
1919 						   "btrfs-transaction");
1920 	if (IS_ERR(fs_info->transaction_kthread))
1921 		goto fail_cleaner;
1922 
1923 	if (!btrfs_test_opt(tree_root, SSD) &&
1924 	    !btrfs_test_opt(tree_root, NOSSD) &&
1925 	    !fs_info->fs_devices->rotating) {
1926 		printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
1927 		       "mode\n");
1928 		btrfs_set_opt(fs_info->mount_opt, SSD);
1929 	}
1930 
1931 	if (btrfs_super_log_root(disk_super) != 0) {
1932 		u64 bytenr = btrfs_super_log_root(disk_super);
1933 
1934 		if (fs_devices->rw_devices == 0) {
1935 			printk(KERN_WARNING "Btrfs log replay required "
1936 			       "on RO media\n");
1937 			err = -EIO;
1938 			goto fail_trans_kthread;
1939 		}
1940 		blocksize =
1941 		     btrfs_level_size(tree_root,
1942 				      btrfs_super_log_root_level(disk_super));
1943 
1944 		log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
1945 		if (!log_tree_root) {
1946 			err = -ENOMEM;
1947 			goto fail_trans_kthread;
1948 		}
1949 
1950 		__setup_root(nodesize, leafsize, sectorsize, stripesize,
1951 			     log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1952 
1953 		log_tree_root->node = read_tree_block(tree_root, bytenr,
1954 						      blocksize,
1955 						      generation + 1);
1956 		ret = btrfs_recover_log_trees(log_tree_root);
1957 		BUG_ON(ret);
1958 
1959 		if (sb->s_flags & MS_RDONLY) {
1960 			ret =  btrfs_commit_super(tree_root);
1961 			BUG_ON(ret);
1962 		}
1963 	}
1964 
1965 	ret = btrfs_find_orphan_roots(tree_root);
1966 	BUG_ON(ret);
1967 
1968 	if (!(sb->s_flags & MS_RDONLY)) {
1969 		ret = btrfs_cleanup_fs_roots(fs_info);
1970 		BUG_ON(ret);
1971 
1972 		ret = btrfs_recover_relocation(tree_root);
1973 		if (ret < 0) {
1974 			printk(KERN_WARNING
1975 			       "btrfs: failed to recover relocation\n");
1976 			err = -EINVAL;
1977 			goto fail_trans_kthread;
1978 		}
1979 	}
1980 
1981 	location.objectid = BTRFS_FS_TREE_OBJECTID;
1982 	location.type = BTRFS_ROOT_ITEM_KEY;
1983 	location.offset = (u64)-1;
1984 
1985 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1986 	if (!fs_info->fs_root)
1987 		goto fail_trans_kthread;
1988 	if (IS_ERR(fs_info->fs_root)) {
1989 		err = PTR_ERR(fs_info->fs_root);
1990 		goto fail_trans_kthread;
1991 	}
1992 
1993 	if (!(sb->s_flags & MS_RDONLY)) {
1994 		down_read(&fs_info->cleanup_work_sem);
1995 		btrfs_orphan_cleanup(fs_info->fs_root);
1996 		up_read(&fs_info->cleanup_work_sem);
1997 	}
1998 
1999 	return tree_root;
2000 
2001 fail_trans_kthread:
2002 	kthread_stop(fs_info->transaction_kthread);
2003 fail_cleaner:
2004 	kthread_stop(fs_info->cleaner_kthread);
2005 
2006 	/*
2007 	 * make sure we're done with the btree inode before we stop our
2008 	 * kthreads
2009 	 */
2010 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2011 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2012 
2013 fail_block_groups:
2014 	btrfs_free_block_groups(fs_info);
2015 	free_extent_buffer(csum_root->node);
2016 	free_extent_buffer(csum_root->commit_root);
2017 fail_dev_root:
2018 	free_extent_buffer(dev_root->node);
2019 	free_extent_buffer(dev_root->commit_root);
2020 fail_extent_root:
2021 	free_extent_buffer(extent_root->node);
2022 	free_extent_buffer(extent_root->commit_root);
2023 fail_tree_root:
2024 	free_extent_buffer(tree_root->node);
2025 	free_extent_buffer(tree_root->commit_root);
2026 fail_chunk_root:
2027 	free_extent_buffer(chunk_root->node);
2028 	free_extent_buffer(chunk_root->commit_root);
2029 fail_sb_buffer:
2030 	btrfs_stop_workers(&fs_info->generic_worker);
2031 	btrfs_stop_workers(&fs_info->fixup_workers);
2032 	btrfs_stop_workers(&fs_info->delalloc_workers);
2033 	btrfs_stop_workers(&fs_info->workers);
2034 	btrfs_stop_workers(&fs_info->endio_workers);
2035 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2036 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2037 	btrfs_stop_workers(&fs_info->endio_write_workers);
2038 	btrfs_stop_workers(&fs_info->submit_workers);
2039 fail_iput:
2040 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2041 	iput(fs_info->btree_inode);
2042 
2043 	btrfs_close_devices(fs_info->fs_devices);
2044 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2045 fail_bdi:
2046 	bdi_destroy(&fs_info->bdi);
2047 fail_srcu:
2048 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2049 fail:
2050 	kfree(extent_root);
2051 	kfree(tree_root);
2052 	kfree(fs_info);
2053 	kfree(chunk_root);
2054 	kfree(dev_root);
2055 	kfree(csum_root);
2056 	return ERR_PTR(err);
2057 }
2058 
2059 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2060 {
2061 	char b[BDEVNAME_SIZE];
2062 
2063 	if (uptodate) {
2064 		set_buffer_uptodate(bh);
2065 	} else {
2066 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
2067 			printk(KERN_WARNING "lost page write due to "
2068 					"I/O error on %s\n",
2069 				       bdevname(bh->b_bdev, b));
2070 		}
2071 		/* note, we dont' set_buffer_write_io_error because we have
2072 		 * our own ways of dealing with the IO errors
2073 		 */
2074 		clear_buffer_uptodate(bh);
2075 	}
2076 	unlock_buffer(bh);
2077 	put_bh(bh);
2078 }
2079 
2080 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2081 {
2082 	struct buffer_head *bh;
2083 	struct buffer_head *latest = NULL;
2084 	struct btrfs_super_block *super;
2085 	int i;
2086 	u64 transid = 0;
2087 	u64 bytenr;
2088 
2089 	/* we would like to check all the supers, but that would make
2090 	 * a btrfs mount succeed after a mkfs from a different FS.
2091 	 * So, we need to add a special mount option to scan for
2092 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2093 	 */
2094 	for (i = 0; i < 1; i++) {
2095 		bytenr = btrfs_sb_offset(i);
2096 		if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2097 			break;
2098 		bh = __bread(bdev, bytenr / 4096, 4096);
2099 		if (!bh)
2100 			continue;
2101 
2102 		super = (struct btrfs_super_block *)bh->b_data;
2103 		if (btrfs_super_bytenr(super) != bytenr ||
2104 		    strncmp((char *)(&super->magic), BTRFS_MAGIC,
2105 			    sizeof(super->magic))) {
2106 			brelse(bh);
2107 			continue;
2108 		}
2109 
2110 		if (!latest || btrfs_super_generation(super) > transid) {
2111 			brelse(latest);
2112 			latest = bh;
2113 			transid = btrfs_super_generation(super);
2114 		} else {
2115 			brelse(bh);
2116 		}
2117 	}
2118 	return latest;
2119 }
2120 
2121 /*
2122  * this should be called twice, once with wait == 0 and
2123  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2124  * we write are pinned.
2125  *
2126  * They are released when wait == 1 is done.
2127  * max_mirrors must be the same for both runs, and it indicates how
2128  * many supers on this one device should be written.
2129  *
2130  * max_mirrors == 0 means to write them all.
2131  */
2132 static int write_dev_supers(struct btrfs_device *device,
2133 			    struct btrfs_super_block *sb,
2134 			    int do_barriers, int wait, int max_mirrors)
2135 {
2136 	struct buffer_head *bh;
2137 	int i;
2138 	int ret;
2139 	int errors = 0;
2140 	u32 crc;
2141 	u64 bytenr;
2142 	int last_barrier = 0;
2143 
2144 	if (max_mirrors == 0)
2145 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2146 
2147 	/* make sure only the last submit_bh does a barrier */
2148 	if (do_barriers) {
2149 		for (i = 0; i < max_mirrors; i++) {
2150 			bytenr = btrfs_sb_offset(i);
2151 			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2152 			    device->total_bytes)
2153 				break;
2154 			last_barrier = i;
2155 		}
2156 	}
2157 
2158 	for (i = 0; i < max_mirrors; i++) {
2159 		bytenr = btrfs_sb_offset(i);
2160 		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2161 			break;
2162 
2163 		if (wait) {
2164 			bh = __find_get_block(device->bdev, bytenr / 4096,
2165 					      BTRFS_SUPER_INFO_SIZE);
2166 			BUG_ON(!bh);
2167 			wait_on_buffer(bh);
2168 			if (!buffer_uptodate(bh))
2169 				errors++;
2170 
2171 			/* drop our reference */
2172 			brelse(bh);
2173 
2174 			/* drop the reference from the wait == 0 run */
2175 			brelse(bh);
2176 			continue;
2177 		} else {
2178 			btrfs_set_super_bytenr(sb, bytenr);
2179 
2180 			crc = ~(u32)0;
2181 			crc = btrfs_csum_data(NULL, (char *)sb +
2182 					      BTRFS_CSUM_SIZE, crc,
2183 					      BTRFS_SUPER_INFO_SIZE -
2184 					      BTRFS_CSUM_SIZE);
2185 			btrfs_csum_final(crc, sb->csum);
2186 
2187 			/*
2188 			 * one reference for us, and we leave it for the
2189 			 * caller
2190 			 */
2191 			bh = __getblk(device->bdev, bytenr / 4096,
2192 				      BTRFS_SUPER_INFO_SIZE);
2193 			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2194 
2195 			/* one reference for submit_bh */
2196 			get_bh(bh);
2197 
2198 			set_buffer_uptodate(bh);
2199 			lock_buffer(bh);
2200 			bh->b_end_io = btrfs_end_buffer_write_sync;
2201 		}
2202 
2203 		if (i == last_barrier && do_barriers && device->barriers) {
2204 			ret = submit_bh(WRITE_BARRIER, bh);
2205 			if (ret == -EOPNOTSUPP) {
2206 				printk("btrfs: disabling barriers on dev %s\n",
2207 				       device->name);
2208 				set_buffer_uptodate(bh);
2209 				device->barriers = 0;
2210 				/* one reference for submit_bh */
2211 				get_bh(bh);
2212 				lock_buffer(bh);
2213 				ret = submit_bh(WRITE_SYNC, bh);
2214 			}
2215 		} else {
2216 			ret = submit_bh(WRITE_SYNC, bh);
2217 		}
2218 
2219 		if (ret)
2220 			errors++;
2221 	}
2222 	return errors < i ? 0 : -1;
2223 }
2224 
2225 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2226 {
2227 	struct list_head *head;
2228 	struct btrfs_device *dev;
2229 	struct btrfs_super_block *sb;
2230 	struct btrfs_dev_item *dev_item;
2231 	int ret;
2232 	int do_barriers;
2233 	int max_errors;
2234 	int total_errors = 0;
2235 	u64 flags;
2236 
2237 	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2238 	do_barriers = !btrfs_test_opt(root, NOBARRIER);
2239 
2240 	sb = &root->fs_info->super_for_commit;
2241 	dev_item = &sb->dev_item;
2242 
2243 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2244 	head = &root->fs_info->fs_devices->devices;
2245 	list_for_each_entry(dev, head, dev_list) {
2246 		if (!dev->bdev) {
2247 			total_errors++;
2248 			continue;
2249 		}
2250 		if (!dev->in_fs_metadata || !dev->writeable)
2251 			continue;
2252 
2253 		btrfs_set_stack_device_generation(dev_item, 0);
2254 		btrfs_set_stack_device_type(dev_item, dev->type);
2255 		btrfs_set_stack_device_id(dev_item, dev->devid);
2256 		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2257 		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2258 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2259 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2260 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2261 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2262 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2263 
2264 		flags = btrfs_super_flags(sb);
2265 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2266 
2267 		ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2268 		if (ret)
2269 			total_errors++;
2270 	}
2271 	if (total_errors > max_errors) {
2272 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2273 		       total_errors);
2274 		BUG();
2275 	}
2276 
2277 	total_errors = 0;
2278 	list_for_each_entry(dev, head, dev_list) {
2279 		if (!dev->bdev)
2280 			continue;
2281 		if (!dev->in_fs_metadata || !dev->writeable)
2282 			continue;
2283 
2284 		ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2285 		if (ret)
2286 			total_errors++;
2287 	}
2288 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2289 	if (total_errors > max_errors) {
2290 		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2291 		       total_errors);
2292 		BUG();
2293 	}
2294 	return 0;
2295 }
2296 
2297 int write_ctree_super(struct btrfs_trans_handle *trans,
2298 		      struct btrfs_root *root, int max_mirrors)
2299 {
2300 	int ret;
2301 
2302 	ret = write_all_supers(root, max_mirrors);
2303 	return ret;
2304 }
2305 
2306 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2307 {
2308 	spin_lock(&fs_info->fs_roots_radix_lock);
2309 	radix_tree_delete(&fs_info->fs_roots_radix,
2310 			  (unsigned long)root->root_key.objectid);
2311 	spin_unlock(&fs_info->fs_roots_radix_lock);
2312 
2313 	if (btrfs_root_refs(&root->root_item) == 0)
2314 		synchronize_srcu(&fs_info->subvol_srcu);
2315 
2316 	free_fs_root(root);
2317 	return 0;
2318 }
2319 
2320 static void free_fs_root(struct btrfs_root *root)
2321 {
2322 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2323 	if (root->anon_super.s_dev) {
2324 		down_write(&root->anon_super.s_umount);
2325 		kill_anon_super(&root->anon_super);
2326 	}
2327 	free_extent_buffer(root->node);
2328 	free_extent_buffer(root->commit_root);
2329 	kfree(root->name);
2330 	kfree(root);
2331 }
2332 
2333 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2334 {
2335 	int ret;
2336 	struct btrfs_root *gang[8];
2337 	int i;
2338 
2339 	while (!list_empty(&fs_info->dead_roots)) {
2340 		gang[0] = list_entry(fs_info->dead_roots.next,
2341 				     struct btrfs_root, root_list);
2342 		list_del(&gang[0]->root_list);
2343 
2344 		if (gang[0]->in_radix) {
2345 			btrfs_free_fs_root(fs_info, gang[0]);
2346 		} else {
2347 			free_extent_buffer(gang[0]->node);
2348 			free_extent_buffer(gang[0]->commit_root);
2349 			kfree(gang[0]);
2350 		}
2351 	}
2352 
2353 	while (1) {
2354 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2355 					     (void **)gang, 0,
2356 					     ARRAY_SIZE(gang));
2357 		if (!ret)
2358 			break;
2359 		for (i = 0; i < ret; i++)
2360 			btrfs_free_fs_root(fs_info, gang[i]);
2361 	}
2362 	return 0;
2363 }
2364 
2365 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2366 {
2367 	u64 root_objectid = 0;
2368 	struct btrfs_root *gang[8];
2369 	int i;
2370 	int ret;
2371 
2372 	while (1) {
2373 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2374 					     (void **)gang, root_objectid,
2375 					     ARRAY_SIZE(gang));
2376 		if (!ret)
2377 			break;
2378 
2379 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
2380 		for (i = 0; i < ret; i++) {
2381 			root_objectid = gang[i]->root_key.objectid;
2382 			btrfs_orphan_cleanup(gang[i]);
2383 		}
2384 		root_objectid++;
2385 	}
2386 	return 0;
2387 }
2388 
2389 int btrfs_commit_super(struct btrfs_root *root)
2390 {
2391 	struct btrfs_trans_handle *trans;
2392 	int ret;
2393 
2394 	mutex_lock(&root->fs_info->cleaner_mutex);
2395 	btrfs_run_delayed_iputs(root);
2396 	btrfs_clean_old_snapshots(root);
2397 	mutex_unlock(&root->fs_info->cleaner_mutex);
2398 
2399 	/* wait until ongoing cleanup work done */
2400 	down_write(&root->fs_info->cleanup_work_sem);
2401 	up_write(&root->fs_info->cleanup_work_sem);
2402 
2403 	trans = btrfs_join_transaction(root, 1);
2404 	ret = btrfs_commit_transaction(trans, root);
2405 	BUG_ON(ret);
2406 	/* run commit again to drop the original snapshot */
2407 	trans = btrfs_join_transaction(root, 1);
2408 	btrfs_commit_transaction(trans, root);
2409 	ret = btrfs_write_and_wait_transaction(NULL, root);
2410 	BUG_ON(ret);
2411 
2412 	ret = write_ctree_super(NULL, root, 0);
2413 	return ret;
2414 }
2415 
2416 int close_ctree(struct btrfs_root *root)
2417 {
2418 	struct btrfs_fs_info *fs_info = root->fs_info;
2419 	int ret;
2420 
2421 	fs_info->closing = 1;
2422 	smp_mb();
2423 
2424 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2425 		ret =  btrfs_commit_super(root);
2426 		if (ret)
2427 			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2428 	}
2429 
2430 	kthread_stop(root->fs_info->transaction_kthread);
2431 	kthread_stop(root->fs_info->cleaner_kthread);
2432 
2433 	fs_info->closing = 2;
2434 	smp_mb();
2435 
2436 	if (fs_info->delalloc_bytes) {
2437 		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2438 		       (unsigned long long)fs_info->delalloc_bytes);
2439 	}
2440 	if (fs_info->total_ref_cache_size) {
2441 		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2442 		       (unsigned long long)fs_info->total_ref_cache_size);
2443 	}
2444 
2445 	free_extent_buffer(fs_info->extent_root->node);
2446 	free_extent_buffer(fs_info->extent_root->commit_root);
2447 	free_extent_buffer(fs_info->tree_root->node);
2448 	free_extent_buffer(fs_info->tree_root->commit_root);
2449 	free_extent_buffer(root->fs_info->chunk_root->node);
2450 	free_extent_buffer(root->fs_info->chunk_root->commit_root);
2451 	free_extent_buffer(root->fs_info->dev_root->node);
2452 	free_extent_buffer(root->fs_info->dev_root->commit_root);
2453 	free_extent_buffer(root->fs_info->csum_root->node);
2454 	free_extent_buffer(root->fs_info->csum_root->commit_root);
2455 
2456 	btrfs_free_block_groups(root->fs_info);
2457 
2458 	del_fs_roots(fs_info);
2459 
2460 	iput(fs_info->btree_inode);
2461 
2462 	btrfs_stop_workers(&fs_info->generic_worker);
2463 	btrfs_stop_workers(&fs_info->fixup_workers);
2464 	btrfs_stop_workers(&fs_info->delalloc_workers);
2465 	btrfs_stop_workers(&fs_info->workers);
2466 	btrfs_stop_workers(&fs_info->endio_workers);
2467 	btrfs_stop_workers(&fs_info->endio_meta_workers);
2468 	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2469 	btrfs_stop_workers(&fs_info->endio_write_workers);
2470 	btrfs_stop_workers(&fs_info->submit_workers);
2471 
2472 	btrfs_close_devices(fs_info->fs_devices);
2473 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
2474 
2475 	bdi_destroy(&fs_info->bdi);
2476 	cleanup_srcu_struct(&fs_info->subvol_srcu);
2477 
2478 	kfree(fs_info->extent_root);
2479 	kfree(fs_info->tree_root);
2480 	kfree(fs_info->chunk_root);
2481 	kfree(fs_info->dev_root);
2482 	kfree(fs_info->csum_root);
2483 	return 0;
2484 }
2485 
2486 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2487 {
2488 	int ret;
2489 	struct inode *btree_inode = buf->first_page->mapping->host;
2490 
2491 	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2492 				     NULL);
2493 	if (!ret)
2494 		return ret;
2495 
2496 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2497 				    parent_transid);
2498 	return !ret;
2499 }
2500 
2501 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2502 {
2503 	struct inode *btree_inode = buf->first_page->mapping->host;
2504 	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2505 					  buf);
2506 }
2507 
2508 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2509 {
2510 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2511 	u64 transid = btrfs_header_generation(buf);
2512 	struct inode *btree_inode = root->fs_info->btree_inode;
2513 	int was_dirty;
2514 
2515 	btrfs_assert_tree_locked(buf);
2516 	if (transid != root->fs_info->generation) {
2517 		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2518 		       "found %llu running %llu\n",
2519 			(unsigned long long)buf->start,
2520 			(unsigned long long)transid,
2521 			(unsigned long long)root->fs_info->generation);
2522 		WARN_ON(1);
2523 	}
2524 	was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2525 					    buf);
2526 	if (!was_dirty) {
2527 		spin_lock(&root->fs_info->delalloc_lock);
2528 		root->fs_info->dirty_metadata_bytes += buf->len;
2529 		spin_unlock(&root->fs_info->delalloc_lock);
2530 	}
2531 }
2532 
2533 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2534 {
2535 	/*
2536 	 * looks as though older kernels can get into trouble with
2537 	 * this code, they end up stuck in balance_dirty_pages forever
2538 	 */
2539 	u64 num_dirty;
2540 	unsigned long thresh = 32 * 1024 * 1024;
2541 
2542 	if (current->flags & PF_MEMALLOC)
2543 		return;
2544 
2545 	num_dirty = root->fs_info->dirty_metadata_bytes;
2546 
2547 	if (num_dirty > thresh) {
2548 		balance_dirty_pages_ratelimited_nr(
2549 				   root->fs_info->btree_inode->i_mapping, 1);
2550 	}
2551 	return;
2552 }
2553 
2554 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2555 {
2556 	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2557 	int ret;
2558 	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2559 	if (ret == 0)
2560 		set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2561 	return ret;
2562 }
2563 
2564 int btree_lock_page_hook(struct page *page)
2565 {
2566 	struct inode *inode = page->mapping->host;
2567 	struct btrfs_root *root = BTRFS_I(inode)->root;
2568 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2569 	struct extent_buffer *eb;
2570 	unsigned long len;
2571 	u64 bytenr = page_offset(page);
2572 
2573 	if (page->private == EXTENT_PAGE_PRIVATE)
2574 		goto out;
2575 
2576 	len = page->private >> 2;
2577 	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2578 	if (!eb)
2579 		goto out;
2580 
2581 	btrfs_tree_lock(eb);
2582 	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2583 
2584 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2585 		spin_lock(&root->fs_info->delalloc_lock);
2586 		if (root->fs_info->dirty_metadata_bytes >= eb->len)
2587 			root->fs_info->dirty_metadata_bytes -= eb->len;
2588 		else
2589 			WARN_ON(1);
2590 		spin_unlock(&root->fs_info->delalloc_lock);
2591 	}
2592 
2593 	btrfs_tree_unlock(eb);
2594 	free_extent_buffer(eb);
2595 out:
2596 	lock_page(page);
2597 	return 0;
2598 }
2599 
2600 static struct extent_io_ops btree_extent_io_ops = {
2601 	.write_cache_pages_lock_hook = btree_lock_page_hook,
2602 	.readpage_end_io_hook = btree_readpage_end_io_hook,
2603 	.submit_bio_hook = btree_submit_bio_hook,
2604 	/* note we're sharing with inode.c for the merge bio hook */
2605 	.merge_bio_hook = btrfs_merge_bio_hook,
2606 };
2607