xref: /linux/fs/btrfs/scrub.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 
29 /*
30  * This is only the first step towards a full-features scrub. It reads all
31  * extent and super block and verifies the checksums. In case a bad checksum
32  * is found or the extent cannot be read, good data will be written back if
33  * any can be found.
34  *
35  * Future enhancements:
36  *  - In case an unrepairable extent is encountered, track which files are
37  *    affected and report them
38  *  - In case of a read error on files with nodatasum, map the file and read
39  *    the extent to trigger a writeback of the good copy
40  *  - track and record media errors, throw out bad devices
41  *  - add a mode to also read unallocated space
42  */
43 
44 struct scrub_bio;
45 struct scrub_page;
46 struct scrub_dev;
47 static void scrub_bio_end_io(struct bio *bio, int err);
48 static void scrub_checksum(struct btrfs_work *work);
49 static int scrub_checksum_data(struct scrub_dev *sdev,
50 			       struct scrub_page *spag, void *buffer);
51 static int scrub_checksum_tree_block(struct scrub_dev *sdev,
52 				     struct scrub_page *spag, u64 logical,
53 				     void *buffer);
54 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
55 static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
56 static void scrub_fixup_end_io(struct bio *bio, int err);
57 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
58 			  struct page *page);
59 static void scrub_fixup(struct scrub_bio *sbio, int ix);
60 
61 #define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
62 #define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
63 
64 struct scrub_page {
65 	u64			flags;  /* extent flags */
66 	u64			generation;
67 	int			mirror_num;
68 	int			have_csum;
69 	u8			csum[BTRFS_CSUM_SIZE];
70 };
71 
72 struct scrub_bio {
73 	int			index;
74 	struct scrub_dev	*sdev;
75 	struct bio		*bio;
76 	int			err;
77 	u64			logical;
78 	u64			physical;
79 	struct scrub_page	spag[SCRUB_PAGES_PER_BIO];
80 	u64			count;
81 	int			next_free;
82 	struct btrfs_work	work;
83 };
84 
85 struct scrub_dev {
86 	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
87 	struct btrfs_device	*dev;
88 	int			first_free;
89 	int			curr;
90 	atomic_t		in_flight;
91 	atomic_t		fixup_cnt;
92 	spinlock_t		list_lock;
93 	wait_queue_head_t	list_wait;
94 	u16			csum_size;
95 	struct list_head	csum_list;
96 	atomic_t		cancel_req;
97 	int			readonly;
98 	/*
99 	 * statistics
100 	 */
101 	struct btrfs_scrub_progress stat;
102 	spinlock_t		stat_lock;
103 };
104 
105 struct scrub_fixup_nodatasum {
106 	struct scrub_dev	*sdev;
107 	u64			logical;
108 	struct btrfs_root	*root;
109 	struct btrfs_work	work;
110 	int			mirror_num;
111 };
112 
113 struct scrub_warning {
114 	struct btrfs_path	*path;
115 	u64			extent_item_size;
116 	char			*scratch_buf;
117 	char			*msg_buf;
118 	const char		*errstr;
119 	sector_t		sector;
120 	u64			logical;
121 	struct btrfs_device	*dev;
122 	int			msg_bufsize;
123 	int			scratch_bufsize;
124 };
125 
126 static void scrub_free_csums(struct scrub_dev *sdev)
127 {
128 	while (!list_empty(&sdev->csum_list)) {
129 		struct btrfs_ordered_sum *sum;
130 		sum = list_first_entry(&sdev->csum_list,
131 				       struct btrfs_ordered_sum, list);
132 		list_del(&sum->list);
133 		kfree(sum);
134 	}
135 }
136 
137 static void scrub_free_bio(struct bio *bio)
138 {
139 	int i;
140 	struct page *last_page = NULL;
141 
142 	if (!bio)
143 		return;
144 
145 	for (i = 0; i < bio->bi_vcnt; ++i) {
146 		if (bio->bi_io_vec[i].bv_page == last_page)
147 			continue;
148 		last_page = bio->bi_io_vec[i].bv_page;
149 		__free_page(last_page);
150 	}
151 	bio_put(bio);
152 }
153 
154 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
155 {
156 	int i;
157 
158 	if (!sdev)
159 		return;
160 
161 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
162 		struct scrub_bio *sbio = sdev->bios[i];
163 
164 		if (!sbio)
165 			break;
166 
167 		scrub_free_bio(sbio->bio);
168 		kfree(sbio);
169 	}
170 
171 	scrub_free_csums(sdev);
172 	kfree(sdev);
173 }
174 
175 static noinline_for_stack
176 struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
177 {
178 	struct scrub_dev *sdev;
179 	int		i;
180 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
181 
182 	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
183 	if (!sdev)
184 		goto nomem;
185 	sdev->dev = dev;
186 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
187 		struct scrub_bio *sbio;
188 
189 		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
190 		if (!sbio)
191 			goto nomem;
192 		sdev->bios[i] = sbio;
193 
194 		sbio->index = i;
195 		sbio->sdev = sdev;
196 		sbio->count = 0;
197 		sbio->work.func = scrub_checksum;
198 
199 		if (i != SCRUB_BIOS_PER_DEV-1)
200 			sdev->bios[i]->next_free = i + 1;
201 		else
202 			sdev->bios[i]->next_free = -1;
203 	}
204 	sdev->first_free = 0;
205 	sdev->curr = -1;
206 	atomic_set(&sdev->in_flight, 0);
207 	atomic_set(&sdev->fixup_cnt, 0);
208 	atomic_set(&sdev->cancel_req, 0);
209 	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
210 	INIT_LIST_HEAD(&sdev->csum_list);
211 
212 	spin_lock_init(&sdev->list_lock);
213 	spin_lock_init(&sdev->stat_lock);
214 	init_waitqueue_head(&sdev->list_wait);
215 	return sdev;
216 
217 nomem:
218 	scrub_free_dev(sdev);
219 	return ERR_PTR(-ENOMEM);
220 }
221 
222 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
223 {
224 	u64 isize;
225 	u32 nlink;
226 	int ret;
227 	int i;
228 	struct extent_buffer *eb;
229 	struct btrfs_inode_item *inode_item;
230 	struct scrub_warning *swarn = ctx;
231 	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
232 	struct inode_fs_paths *ipath = NULL;
233 	struct btrfs_root *local_root;
234 	struct btrfs_key root_key;
235 
236 	root_key.objectid = root;
237 	root_key.type = BTRFS_ROOT_ITEM_KEY;
238 	root_key.offset = (u64)-1;
239 	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
240 	if (IS_ERR(local_root)) {
241 		ret = PTR_ERR(local_root);
242 		goto err;
243 	}
244 
245 	ret = inode_item_info(inum, 0, local_root, swarn->path);
246 	if (ret) {
247 		btrfs_release_path(swarn->path);
248 		goto err;
249 	}
250 
251 	eb = swarn->path->nodes[0];
252 	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
253 					struct btrfs_inode_item);
254 	isize = btrfs_inode_size(eb, inode_item);
255 	nlink = btrfs_inode_nlink(eb, inode_item);
256 	btrfs_release_path(swarn->path);
257 
258 	ipath = init_ipath(4096, local_root, swarn->path);
259 	if (IS_ERR(ipath)) {
260 		ret = PTR_ERR(ipath);
261 		ipath = NULL;
262 		goto err;
263 	}
264 	ret = paths_from_inode(inum, ipath);
265 
266 	if (ret < 0)
267 		goto err;
268 
269 	/*
270 	 * we deliberately ignore the bit ipath might have been too small to
271 	 * hold all of the paths here
272 	 */
273 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
274 		printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
275 			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
276 			"length %llu, links %u (path: %s)\n", swarn->errstr,
277 			swarn->logical, swarn->dev->name,
278 			(unsigned long long)swarn->sector, root, inum, offset,
279 			min(isize - offset, (u64)PAGE_SIZE), nlink,
280 			(char *)(unsigned long)ipath->fspath->val[i]);
281 
282 	free_ipath(ipath);
283 	return 0;
284 
285 err:
286 	printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
287 		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
288 		"resolving failed with ret=%d\n", swarn->errstr,
289 		swarn->logical, swarn->dev->name,
290 		(unsigned long long)swarn->sector, root, inum, offset, ret);
291 
292 	free_ipath(ipath);
293 	return 0;
294 }
295 
296 static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
297 				int ix)
298 {
299 	struct btrfs_device *dev = sbio->sdev->dev;
300 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
301 	struct btrfs_path *path;
302 	struct btrfs_key found_key;
303 	struct extent_buffer *eb;
304 	struct btrfs_extent_item *ei;
305 	struct scrub_warning swarn;
306 	u32 item_size;
307 	int ret;
308 	u64 ref_root;
309 	u8 ref_level;
310 	unsigned long ptr = 0;
311 	const int bufsize = 4096;
312 	u64 extent_offset;
313 
314 	path = btrfs_alloc_path();
315 
316 	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
317 	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
318 	swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
319 	swarn.logical = sbio->logical + ix * PAGE_SIZE;
320 	swarn.errstr = errstr;
321 	swarn.dev = dev;
322 	swarn.msg_bufsize = bufsize;
323 	swarn.scratch_bufsize = bufsize;
324 
325 	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
326 		goto out;
327 
328 	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
329 	if (ret < 0)
330 		goto out;
331 
332 	extent_offset = swarn.logical - found_key.objectid;
333 	swarn.extent_item_size = found_key.offset;
334 
335 	eb = path->nodes[0];
336 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
337 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
338 
339 	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
340 		do {
341 			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
342 							&ref_root, &ref_level);
343 			printk(KERN_WARNING "%s at logical %llu on dev %s, "
344 				"sector %llu: metadata %s (level %d) in tree "
345 				"%llu\n", errstr, swarn.logical, dev->name,
346 				(unsigned long long)swarn.sector,
347 				ref_level ? "node" : "leaf",
348 				ret < 0 ? -1 : ref_level,
349 				ret < 0 ? -1 : ref_root);
350 		} while (ret != 1);
351 	} else {
352 		swarn.path = path;
353 		iterate_extent_inodes(fs_info, path, found_key.objectid,
354 					extent_offset,
355 					scrub_print_warning_inode, &swarn);
356 	}
357 
358 out:
359 	btrfs_free_path(path);
360 	kfree(swarn.scratch_buf);
361 	kfree(swarn.msg_buf);
362 }
363 
364 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
365 {
366 	struct page *page = NULL;
367 	unsigned long index;
368 	struct scrub_fixup_nodatasum *fixup = ctx;
369 	int ret;
370 	int corrected = 0;
371 	struct btrfs_key key;
372 	struct inode *inode = NULL;
373 	u64 end = offset + PAGE_SIZE - 1;
374 	struct btrfs_root *local_root;
375 
376 	key.objectid = root;
377 	key.type = BTRFS_ROOT_ITEM_KEY;
378 	key.offset = (u64)-1;
379 	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
380 	if (IS_ERR(local_root))
381 		return PTR_ERR(local_root);
382 
383 	key.type = BTRFS_INODE_ITEM_KEY;
384 	key.objectid = inum;
385 	key.offset = 0;
386 	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
387 	if (IS_ERR(inode))
388 		return PTR_ERR(inode);
389 
390 	index = offset >> PAGE_CACHE_SHIFT;
391 
392 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
393 	if (!page) {
394 		ret = -ENOMEM;
395 		goto out;
396 	}
397 
398 	if (PageUptodate(page)) {
399 		struct btrfs_mapping_tree *map_tree;
400 		if (PageDirty(page)) {
401 			/*
402 			 * we need to write the data to the defect sector. the
403 			 * data that was in that sector is not in memory,
404 			 * because the page was modified. we must not write the
405 			 * modified page to that sector.
406 			 *
407 			 * TODO: what could be done here: wait for the delalloc
408 			 *       runner to write out that page (might involve
409 			 *       COW) and see whether the sector is still
410 			 *       referenced afterwards.
411 			 *
412 			 * For the meantime, we'll treat this error
413 			 * incorrectable, although there is a chance that a
414 			 * later scrub will find the bad sector again and that
415 			 * there's no dirty page in memory, then.
416 			 */
417 			ret = -EIO;
418 			goto out;
419 		}
420 		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
421 		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
422 					fixup->logical, page,
423 					fixup->mirror_num);
424 		unlock_page(page);
425 		corrected = !ret;
426 	} else {
427 		/*
428 		 * we need to get good data first. the general readpage path
429 		 * will call repair_io_failure for us, we just have to make
430 		 * sure we read the bad mirror.
431 		 */
432 		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
433 					EXTENT_DAMAGED, GFP_NOFS);
434 		if (ret) {
435 			/* set_extent_bits should give proper error */
436 			WARN_ON(ret > 0);
437 			if (ret > 0)
438 				ret = -EFAULT;
439 			goto out;
440 		}
441 
442 		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
443 						btrfs_get_extent,
444 						fixup->mirror_num);
445 		wait_on_page_locked(page);
446 
447 		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
448 						end, EXTENT_DAMAGED, 0, NULL);
449 		if (!corrected)
450 			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
451 						EXTENT_DAMAGED, GFP_NOFS);
452 	}
453 
454 out:
455 	if (page)
456 		put_page(page);
457 	if (inode)
458 		iput(inode);
459 
460 	if (ret < 0)
461 		return ret;
462 
463 	if (ret == 0 && corrected) {
464 		/*
465 		 * we only need to call readpage for one of the inodes belonging
466 		 * to this extent. so make iterate_extent_inodes stop
467 		 */
468 		return 1;
469 	}
470 
471 	return -EIO;
472 }
473 
474 static void scrub_fixup_nodatasum(struct btrfs_work *work)
475 {
476 	int ret;
477 	struct scrub_fixup_nodatasum *fixup;
478 	struct scrub_dev *sdev;
479 	struct btrfs_trans_handle *trans = NULL;
480 	struct btrfs_fs_info *fs_info;
481 	struct btrfs_path *path;
482 	int uncorrectable = 0;
483 
484 	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
485 	sdev = fixup->sdev;
486 	fs_info = fixup->root->fs_info;
487 
488 	path = btrfs_alloc_path();
489 	if (!path) {
490 		spin_lock(&sdev->stat_lock);
491 		++sdev->stat.malloc_errors;
492 		spin_unlock(&sdev->stat_lock);
493 		uncorrectable = 1;
494 		goto out;
495 	}
496 
497 	trans = btrfs_join_transaction(fixup->root);
498 	if (IS_ERR(trans)) {
499 		uncorrectable = 1;
500 		goto out;
501 	}
502 
503 	/*
504 	 * the idea is to trigger a regular read through the standard path. we
505 	 * read a page from the (failed) logical address by specifying the
506 	 * corresponding copynum of the failed sector. thus, that readpage is
507 	 * expected to fail.
508 	 * that is the point where on-the-fly error correction will kick in
509 	 * (once it's finished) and rewrite the failed sector if a good copy
510 	 * can be found.
511 	 */
512 	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
513 						path, scrub_fixup_readpage,
514 						fixup);
515 	if (ret < 0) {
516 		uncorrectable = 1;
517 		goto out;
518 	}
519 	WARN_ON(ret != 1);
520 
521 	spin_lock(&sdev->stat_lock);
522 	++sdev->stat.corrected_errors;
523 	spin_unlock(&sdev->stat_lock);
524 
525 out:
526 	if (trans && !IS_ERR(trans))
527 		btrfs_end_transaction(trans, fixup->root);
528 	if (uncorrectable) {
529 		spin_lock(&sdev->stat_lock);
530 		++sdev->stat.uncorrectable_errors;
531 		spin_unlock(&sdev->stat_lock);
532 		printk_ratelimited(KERN_ERR "btrfs: unable to fixup "
533 					"(nodatasum) error at logical %llu\n",
534 					fixup->logical);
535 	}
536 
537 	btrfs_free_path(path);
538 	kfree(fixup);
539 
540 	/* see caller why we're pretending to be paused in the scrub counters */
541 	mutex_lock(&fs_info->scrub_lock);
542 	atomic_dec(&fs_info->scrubs_running);
543 	atomic_dec(&fs_info->scrubs_paused);
544 	mutex_unlock(&fs_info->scrub_lock);
545 	atomic_dec(&sdev->fixup_cnt);
546 	wake_up(&fs_info->scrub_pause_wait);
547 	wake_up(&sdev->list_wait);
548 }
549 
550 /*
551  * scrub_recheck_error gets called when either verification of the page
552  * failed or the bio failed to read, e.g. with EIO. In the latter case,
553  * recheck_error gets called for every page in the bio, even though only
554  * one may be bad
555  */
556 static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
557 {
558 	struct scrub_dev *sdev = sbio->sdev;
559 	u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
560 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
561 					DEFAULT_RATELIMIT_BURST);
562 
563 	if (sbio->err) {
564 		if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
565 				   sbio->bio->bi_io_vec[ix].bv_page) == 0) {
566 			if (scrub_fixup_check(sbio, ix) == 0)
567 				return 0;
568 		}
569 		if (__ratelimit(&_rs))
570 			scrub_print_warning("i/o error", sbio, ix);
571 	} else {
572 		if (__ratelimit(&_rs))
573 			scrub_print_warning("checksum error", sbio, ix);
574 	}
575 
576 	spin_lock(&sdev->stat_lock);
577 	++sdev->stat.read_errors;
578 	spin_unlock(&sdev->stat_lock);
579 
580 	scrub_fixup(sbio, ix);
581 	return 1;
582 }
583 
584 static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
585 {
586 	int ret = 1;
587 	struct page *page;
588 	void *buffer;
589 	u64 flags = sbio->spag[ix].flags;
590 
591 	page = sbio->bio->bi_io_vec[ix].bv_page;
592 	buffer = kmap_atomic(page, KM_USER0);
593 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
594 		ret = scrub_checksum_data(sbio->sdev,
595 					  sbio->spag + ix, buffer);
596 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
597 		ret = scrub_checksum_tree_block(sbio->sdev,
598 						sbio->spag + ix,
599 						sbio->logical + ix * PAGE_SIZE,
600 						buffer);
601 	} else {
602 		WARN_ON(1);
603 	}
604 	kunmap_atomic(buffer, KM_USER0);
605 
606 	return ret;
607 }
608 
609 static void scrub_fixup_end_io(struct bio *bio, int err)
610 {
611 	complete((struct completion *)bio->bi_private);
612 }
613 
614 static void scrub_fixup(struct scrub_bio *sbio, int ix)
615 {
616 	struct scrub_dev *sdev = sbio->sdev;
617 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
618 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
619 	struct btrfs_bio *bbio = NULL;
620 	struct scrub_fixup_nodatasum *fixup;
621 	u64 logical = sbio->logical + ix * PAGE_SIZE;
622 	u64 length;
623 	int i;
624 	int ret;
625 	DECLARE_COMPLETION_ONSTACK(complete);
626 
627 	if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
628 	    (sbio->spag[ix].have_csum == 0)) {
629 		fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
630 		if (!fixup)
631 			goto uncorrectable;
632 		fixup->sdev = sdev;
633 		fixup->logical = logical;
634 		fixup->root = fs_info->extent_root;
635 		fixup->mirror_num = sbio->spag[ix].mirror_num;
636 		/*
637 		 * increment scrubs_running to prevent cancel requests from
638 		 * completing as long as a fixup worker is running. we must also
639 		 * increment scrubs_paused to prevent deadlocking on pause
640 		 * requests used for transactions commits (as the worker uses a
641 		 * transaction context). it is safe to regard the fixup worker
642 		 * as paused for all matters practical. effectively, we only
643 		 * avoid cancellation requests from completing.
644 		 */
645 		mutex_lock(&fs_info->scrub_lock);
646 		atomic_inc(&fs_info->scrubs_running);
647 		atomic_inc(&fs_info->scrubs_paused);
648 		mutex_unlock(&fs_info->scrub_lock);
649 		atomic_inc(&sdev->fixup_cnt);
650 		fixup->work.func = scrub_fixup_nodatasum;
651 		btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work);
652 		return;
653 	}
654 
655 	length = PAGE_SIZE;
656 	ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
657 			      &bbio, 0);
658 	if (ret || !bbio || length < PAGE_SIZE) {
659 		printk(KERN_ERR
660 		       "scrub_fixup: btrfs_map_block failed us for %llu\n",
661 		       (unsigned long long)logical);
662 		WARN_ON(1);
663 		kfree(bbio);
664 		return;
665 	}
666 
667 	if (bbio->num_stripes == 1)
668 		/* there aren't any replicas */
669 		goto uncorrectable;
670 
671 	/*
672 	 * first find a good copy
673 	 */
674 	for (i = 0; i < bbio->num_stripes; ++i) {
675 		if (i + 1 == sbio->spag[ix].mirror_num)
676 			continue;
677 
678 		if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
679 				   bbio->stripes[i].physical >> 9,
680 				   sbio->bio->bi_io_vec[ix].bv_page)) {
681 			/* I/O-error, this is not a good copy */
682 			continue;
683 		}
684 
685 		if (scrub_fixup_check(sbio, ix) == 0)
686 			break;
687 	}
688 	if (i == bbio->num_stripes)
689 		goto uncorrectable;
690 
691 	if (!sdev->readonly) {
692 		/*
693 		 * bi_io_vec[ix].bv_page now contains good data, write it back
694 		 */
695 		if (scrub_fixup_io(WRITE, sdev->dev->bdev,
696 				   (sbio->physical + ix * PAGE_SIZE) >> 9,
697 				   sbio->bio->bi_io_vec[ix].bv_page)) {
698 			/* I/O-error, writeback failed, give up */
699 			goto uncorrectable;
700 		}
701 	}
702 
703 	kfree(bbio);
704 	spin_lock(&sdev->stat_lock);
705 	++sdev->stat.corrected_errors;
706 	spin_unlock(&sdev->stat_lock);
707 
708 	printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
709 			       (unsigned long long)logical);
710 	return;
711 
712 uncorrectable:
713 	kfree(bbio);
714 	spin_lock(&sdev->stat_lock);
715 	++sdev->stat.uncorrectable_errors;
716 	spin_unlock(&sdev->stat_lock);
717 
718 	printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
719 				"logical %llu\n", (unsigned long long)logical);
720 }
721 
722 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
723 			 struct page *page)
724 {
725 	struct bio *bio = NULL;
726 	int ret;
727 	DECLARE_COMPLETION_ONSTACK(complete);
728 
729 	bio = bio_alloc(GFP_NOFS, 1);
730 	bio->bi_bdev = bdev;
731 	bio->bi_sector = sector;
732 	bio_add_page(bio, page, PAGE_SIZE, 0);
733 	bio->bi_end_io = scrub_fixup_end_io;
734 	bio->bi_private = &complete;
735 	submit_bio(rw, bio);
736 
737 	/* this will also unplug the queue */
738 	wait_for_completion(&complete);
739 
740 	ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
741 	bio_put(bio);
742 	return ret;
743 }
744 
745 static void scrub_bio_end_io(struct bio *bio, int err)
746 {
747 	struct scrub_bio *sbio = bio->bi_private;
748 	struct scrub_dev *sdev = sbio->sdev;
749 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
750 
751 	sbio->err = err;
752 	sbio->bio = bio;
753 
754 	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
755 }
756 
757 static void scrub_checksum(struct btrfs_work *work)
758 {
759 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
760 	struct scrub_dev *sdev = sbio->sdev;
761 	struct page *page;
762 	void *buffer;
763 	int i;
764 	u64 flags;
765 	u64 logical;
766 	int ret;
767 
768 	if (sbio->err) {
769 		ret = 0;
770 		for (i = 0; i < sbio->count; ++i)
771 			ret |= scrub_recheck_error(sbio, i);
772 		if (!ret) {
773 			spin_lock(&sdev->stat_lock);
774 			++sdev->stat.unverified_errors;
775 			spin_unlock(&sdev->stat_lock);
776 		}
777 
778 		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
779 		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
780 		sbio->bio->bi_phys_segments = 0;
781 		sbio->bio->bi_idx = 0;
782 
783 		for (i = 0; i < sbio->count; i++) {
784 			struct bio_vec *bi;
785 			bi = &sbio->bio->bi_io_vec[i];
786 			bi->bv_offset = 0;
787 			bi->bv_len = PAGE_SIZE;
788 		}
789 		goto out;
790 	}
791 	for (i = 0; i < sbio->count; ++i) {
792 		page = sbio->bio->bi_io_vec[i].bv_page;
793 		buffer = kmap_atomic(page, KM_USER0);
794 		flags = sbio->spag[i].flags;
795 		logical = sbio->logical + i * PAGE_SIZE;
796 		ret = 0;
797 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
798 			ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
799 		} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
800 			ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
801 							logical, buffer);
802 		} else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
803 			BUG_ON(i);
804 			(void)scrub_checksum_super(sbio, buffer);
805 		} else {
806 			WARN_ON(1);
807 		}
808 		kunmap_atomic(buffer, KM_USER0);
809 		if (ret) {
810 			ret = scrub_recheck_error(sbio, i);
811 			if (!ret) {
812 				spin_lock(&sdev->stat_lock);
813 				++sdev->stat.unverified_errors;
814 				spin_unlock(&sdev->stat_lock);
815 			}
816 		}
817 	}
818 
819 out:
820 	scrub_free_bio(sbio->bio);
821 	sbio->bio = NULL;
822 	spin_lock(&sdev->list_lock);
823 	sbio->next_free = sdev->first_free;
824 	sdev->first_free = sbio->index;
825 	spin_unlock(&sdev->list_lock);
826 	atomic_dec(&sdev->in_flight);
827 	wake_up(&sdev->list_wait);
828 }
829 
830 static int scrub_checksum_data(struct scrub_dev *sdev,
831 			       struct scrub_page *spag, void *buffer)
832 {
833 	u8 csum[BTRFS_CSUM_SIZE];
834 	u32 crc = ~(u32)0;
835 	int fail = 0;
836 	struct btrfs_root *root = sdev->dev->dev_root;
837 
838 	if (!spag->have_csum)
839 		return 0;
840 
841 	crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
842 	btrfs_csum_final(crc, csum);
843 	if (memcmp(csum, spag->csum, sdev->csum_size))
844 		fail = 1;
845 
846 	spin_lock(&sdev->stat_lock);
847 	++sdev->stat.data_extents_scrubbed;
848 	sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
849 	if (fail)
850 		++sdev->stat.csum_errors;
851 	spin_unlock(&sdev->stat_lock);
852 
853 	return fail;
854 }
855 
856 static int scrub_checksum_tree_block(struct scrub_dev *sdev,
857 				     struct scrub_page *spag, u64 logical,
858 				     void *buffer)
859 {
860 	struct btrfs_header *h;
861 	struct btrfs_root *root = sdev->dev->dev_root;
862 	struct btrfs_fs_info *fs_info = root->fs_info;
863 	u8 csum[BTRFS_CSUM_SIZE];
864 	u32 crc = ~(u32)0;
865 	int fail = 0;
866 	int crc_fail = 0;
867 
868 	/*
869 	 * we don't use the getter functions here, as we
870 	 * a) don't have an extent buffer and
871 	 * b) the page is already kmapped
872 	 */
873 	h = (struct btrfs_header *)buffer;
874 
875 	if (logical != le64_to_cpu(h->bytenr))
876 		++fail;
877 
878 	if (spag->generation != le64_to_cpu(h->generation))
879 		++fail;
880 
881 	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
882 		++fail;
883 
884 	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
885 		   BTRFS_UUID_SIZE))
886 		++fail;
887 
888 	crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
889 			      PAGE_SIZE - BTRFS_CSUM_SIZE);
890 	btrfs_csum_final(crc, csum);
891 	if (memcmp(csum, h->csum, sdev->csum_size))
892 		++crc_fail;
893 
894 	spin_lock(&sdev->stat_lock);
895 	++sdev->stat.tree_extents_scrubbed;
896 	sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
897 	if (crc_fail)
898 		++sdev->stat.csum_errors;
899 	if (fail)
900 		++sdev->stat.verify_errors;
901 	spin_unlock(&sdev->stat_lock);
902 
903 	return fail || crc_fail;
904 }
905 
906 static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
907 {
908 	struct btrfs_super_block *s;
909 	u64 logical;
910 	struct scrub_dev *sdev = sbio->sdev;
911 	struct btrfs_root *root = sdev->dev->dev_root;
912 	struct btrfs_fs_info *fs_info = root->fs_info;
913 	u8 csum[BTRFS_CSUM_SIZE];
914 	u32 crc = ~(u32)0;
915 	int fail = 0;
916 
917 	s = (struct btrfs_super_block *)buffer;
918 	logical = sbio->logical;
919 
920 	if (logical != le64_to_cpu(s->bytenr))
921 		++fail;
922 
923 	if (sbio->spag[0].generation != le64_to_cpu(s->generation))
924 		++fail;
925 
926 	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
927 		++fail;
928 
929 	crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
930 			      PAGE_SIZE - BTRFS_CSUM_SIZE);
931 	btrfs_csum_final(crc, csum);
932 	if (memcmp(csum, s->csum, sbio->sdev->csum_size))
933 		++fail;
934 
935 	if (fail) {
936 		/*
937 		 * if we find an error in a super block, we just report it.
938 		 * They will get written with the next transaction commit
939 		 * anyway
940 		 */
941 		spin_lock(&sdev->stat_lock);
942 		++sdev->stat.super_errors;
943 		spin_unlock(&sdev->stat_lock);
944 	}
945 
946 	return fail;
947 }
948 
949 static int scrub_submit(struct scrub_dev *sdev)
950 {
951 	struct scrub_bio *sbio;
952 
953 	if (sdev->curr == -1)
954 		return 0;
955 
956 	sbio = sdev->bios[sdev->curr];
957 	sbio->err = 0;
958 	sdev->curr = -1;
959 	atomic_inc(&sdev->in_flight);
960 
961 	submit_bio(READ, sbio->bio);
962 
963 	return 0;
964 }
965 
966 static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
967 		      u64 physical, u64 flags, u64 gen, int mirror_num,
968 		      u8 *csum, int force)
969 {
970 	struct scrub_bio *sbio;
971 	struct page *page;
972 	int ret;
973 
974 again:
975 	/*
976 	 * grab a fresh bio or wait for one to become available
977 	 */
978 	while (sdev->curr == -1) {
979 		spin_lock(&sdev->list_lock);
980 		sdev->curr = sdev->first_free;
981 		if (sdev->curr != -1) {
982 			sdev->first_free = sdev->bios[sdev->curr]->next_free;
983 			sdev->bios[sdev->curr]->next_free = -1;
984 			sdev->bios[sdev->curr]->count = 0;
985 			spin_unlock(&sdev->list_lock);
986 		} else {
987 			spin_unlock(&sdev->list_lock);
988 			wait_event(sdev->list_wait, sdev->first_free != -1);
989 		}
990 	}
991 	sbio = sdev->bios[sdev->curr];
992 	if (sbio->count == 0) {
993 		struct bio *bio;
994 
995 		sbio->physical = physical;
996 		sbio->logical = logical;
997 		bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
998 		if (!bio)
999 			return -ENOMEM;
1000 
1001 		bio->bi_private = sbio;
1002 		bio->bi_end_io = scrub_bio_end_io;
1003 		bio->bi_bdev = sdev->dev->bdev;
1004 		bio->bi_sector = sbio->physical >> 9;
1005 		sbio->err = 0;
1006 		sbio->bio = bio;
1007 	} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
1008 		   sbio->logical + sbio->count * PAGE_SIZE != logical) {
1009 		ret = scrub_submit(sdev);
1010 		if (ret)
1011 			return ret;
1012 		goto again;
1013 	}
1014 	sbio->spag[sbio->count].flags = flags;
1015 	sbio->spag[sbio->count].generation = gen;
1016 	sbio->spag[sbio->count].have_csum = 0;
1017 	sbio->spag[sbio->count].mirror_num = mirror_num;
1018 
1019 	page = alloc_page(GFP_NOFS);
1020 	if (!page)
1021 		return -ENOMEM;
1022 
1023 	ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
1024 	if (!ret) {
1025 		__free_page(page);
1026 		ret = scrub_submit(sdev);
1027 		if (ret)
1028 			return ret;
1029 		goto again;
1030 	}
1031 
1032 	if (csum) {
1033 		sbio->spag[sbio->count].have_csum = 1;
1034 		memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
1035 	}
1036 	++sbio->count;
1037 	if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
1038 		int ret;
1039 
1040 		ret = scrub_submit(sdev);
1041 		if (ret)
1042 			return ret;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1049 			   u8 *csum)
1050 {
1051 	struct btrfs_ordered_sum *sum = NULL;
1052 	int ret = 0;
1053 	unsigned long i;
1054 	unsigned long num_sectors;
1055 	u32 sectorsize = sdev->dev->dev_root->sectorsize;
1056 
1057 	while (!list_empty(&sdev->csum_list)) {
1058 		sum = list_first_entry(&sdev->csum_list,
1059 				       struct btrfs_ordered_sum, list);
1060 		if (sum->bytenr > logical)
1061 			return 0;
1062 		if (sum->bytenr + sum->len > logical)
1063 			break;
1064 
1065 		++sdev->stat.csum_discards;
1066 		list_del(&sum->list);
1067 		kfree(sum);
1068 		sum = NULL;
1069 	}
1070 	if (!sum)
1071 		return 0;
1072 
1073 	num_sectors = sum->len / sectorsize;
1074 	for (i = 0; i < num_sectors; ++i) {
1075 		if (sum->sums[i].bytenr == logical) {
1076 			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1077 			ret = 1;
1078 			break;
1079 		}
1080 	}
1081 	if (ret && i == num_sectors - 1) {
1082 		list_del(&sum->list);
1083 		kfree(sum);
1084 	}
1085 	return ret;
1086 }
1087 
1088 /* scrub extent tries to collect up to 64 kB for each bio */
1089 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1090 			u64 physical, u64 flags, u64 gen, int mirror_num)
1091 {
1092 	int ret;
1093 	u8 csum[BTRFS_CSUM_SIZE];
1094 
1095 	while (len) {
1096 		u64 l = min_t(u64, len, PAGE_SIZE);
1097 		int have_csum = 0;
1098 
1099 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
1100 			/* push csums to sbio */
1101 			have_csum = scrub_find_csum(sdev, logical, l, csum);
1102 			if (have_csum == 0)
1103 				++sdev->stat.no_csum;
1104 		}
1105 		ret = scrub_page(sdev, logical, l, physical, flags, gen,
1106 				 mirror_num, have_csum ? csum : NULL, 0);
1107 		if (ret)
1108 			return ret;
1109 		len -= l;
1110 		logical += l;
1111 		physical += l;
1112 	}
1113 	return 0;
1114 }
1115 
1116 static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1117 	struct map_lookup *map, int num, u64 base, u64 length)
1118 {
1119 	struct btrfs_path *path;
1120 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1121 	struct btrfs_root *root = fs_info->extent_root;
1122 	struct btrfs_root *csum_root = fs_info->csum_root;
1123 	struct btrfs_extent_item *extent;
1124 	struct blk_plug plug;
1125 	u64 flags;
1126 	int ret;
1127 	int slot;
1128 	int i;
1129 	u64 nstripes;
1130 	struct extent_buffer *l;
1131 	struct btrfs_key key;
1132 	u64 physical;
1133 	u64 logical;
1134 	u64 generation;
1135 	int mirror_num;
1136 	struct reada_control *reada1;
1137 	struct reada_control *reada2;
1138 	struct btrfs_key key_start;
1139 	struct btrfs_key key_end;
1140 
1141 	u64 increment = map->stripe_len;
1142 	u64 offset;
1143 
1144 	nstripes = length;
1145 	offset = 0;
1146 	do_div(nstripes, map->stripe_len);
1147 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1148 		offset = map->stripe_len * num;
1149 		increment = map->stripe_len * map->num_stripes;
1150 		mirror_num = 1;
1151 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1152 		int factor = map->num_stripes / map->sub_stripes;
1153 		offset = map->stripe_len * (num / map->sub_stripes);
1154 		increment = map->stripe_len * factor;
1155 		mirror_num = num % map->sub_stripes + 1;
1156 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1157 		increment = map->stripe_len;
1158 		mirror_num = num % map->num_stripes + 1;
1159 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1160 		increment = map->stripe_len;
1161 		mirror_num = num % map->num_stripes + 1;
1162 	} else {
1163 		increment = map->stripe_len;
1164 		mirror_num = 1;
1165 	}
1166 
1167 	path = btrfs_alloc_path();
1168 	if (!path)
1169 		return -ENOMEM;
1170 
1171 	path->search_commit_root = 1;
1172 	path->skip_locking = 1;
1173 
1174 	/*
1175 	 * trigger the readahead for extent tree csum tree and wait for
1176 	 * completion. During readahead, the scrub is officially paused
1177 	 * to not hold off transaction commits
1178 	 */
1179 	logical = base + offset;
1180 
1181 	wait_event(sdev->list_wait,
1182 		   atomic_read(&sdev->in_flight) == 0);
1183 	atomic_inc(&fs_info->scrubs_paused);
1184 	wake_up(&fs_info->scrub_pause_wait);
1185 
1186 	/* FIXME it might be better to start readahead at commit root */
1187 	key_start.objectid = logical;
1188 	key_start.type = BTRFS_EXTENT_ITEM_KEY;
1189 	key_start.offset = (u64)0;
1190 	key_end.objectid = base + offset + nstripes * increment;
1191 	key_end.type = BTRFS_EXTENT_ITEM_KEY;
1192 	key_end.offset = (u64)0;
1193 	reada1 = btrfs_reada_add(root, &key_start, &key_end);
1194 
1195 	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1196 	key_start.type = BTRFS_EXTENT_CSUM_KEY;
1197 	key_start.offset = logical;
1198 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1199 	key_end.type = BTRFS_EXTENT_CSUM_KEY;
1200 	key_end.offset = base + offset + nstripes * increment;
1201 	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1202 
1203 	if (!IS_ERR(reada1))
1204 		btrfs_reada_wait(reada1);
1205 	if (!IS_ERR(reada2))
1206 		btrfs_reada_wait(reada2);
1207 
1208 	mutex_lock(&fs_info->scrub_lock);
1209 	while (atomic_read(&fs_info->scrub_pause_req)) {
1210 		mutex_unlock(&fs_info->scrub_lock);
1211 		wait_event(fs_info->scrub_pause_wait,
1212 		   atomic_read(&fs_info->scrub_pause_req) == 0);
1213 		mutex_lock(&fs_info->scrub_lock);
1214 	}
1215 	atomic_dec(&fs_info->scrubs_paused);
1216 	mutex_unlock(&fs_info->scrub_lock);
1217 	wake_up(&fs_info->scrub_pause_wait);
1218 
1219 	/*
1220 	 * collect all data csums for the stripe to avoid seeking during
1221 	 * the scrub. This might currently (crc32) end up to be about 1MB
1222 	 */
1223 	blk_start_plug(&plug);
1224 
1225 	/*
1226 	 * now find all extents for each stripe and scrub them
1227 	 */
1228 	logical = base + offset;
1229 	physical = map->stripes[num].physical;
1230 	ret = 0;
1231 	for (i = 0; i < nstripes; ++i) {
1232 		/*
1233 		 * canceled?
1234 		 */
1235 		if (atomic_read(&fs_info->scrub_cancel_req) ||
1236 		    atomic_read(&sdev->cancel_req)) {
1237 			ret = -ECANCELED;
1238 			goto out;
1239 		}
1240 		/*
1241 		 * check to see if we have to pause
1242 		 */
1243 		if (atomic_read(&fs_info->scrub_pause_req)) {
1244 			/* push queued extents */
1245 			scrub_submit(sdev);
1246 			wait_event(sdev->list_wait,
1247 				   atomic_read(&sdev->in_flight) == 0);
1248 			atomic_inc(&fs_info->scrubs_paused);
1249 			wake_up(&fs_info->scrub_pause_wait);
1250 			mutex_lock(&fs_info->scrub_lock);
1251 			while (atomic_read(&fs_info->scrub_pause_req)) {
1252 				mutex_unlock(&fs_info->scrub_lock);
1253 				wait_event(fs_info->scrub_pause_wait,
1254 				   atomic_read(&fs_info->scrub_pause_req) == 0);
1255 				mutex_lock(&fs_info->scrub_lock);
1256 			}
1257 			atomic_dec(&fs_info->scrubs_paused);
1258 			mutex_unlock(&fs_info->scrub_lock);
1259 			wake_up(&fs_info->scrub_pause_wait);
1260 		}
1261 
1262 		ret = btrfs_lookup_csums_range(csum_root, logical,
1263 					       logical + map->stripe_len - 1,
1264 					       &sdev->csum_list, 1);
1265 		if (ret)
1266 			goto out;
1267 
1268 		key.objectid = logical;
1269 		key.type = BTRFS_EXTENT_ITEM_KEY;
1270 		key.offset = (u64)0;
1271 
1272 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1273 		if (ret < 0)
1274 			goto out;
1275 		if (ret > 0) {
1276 			ret = btrfs_previous_item(root, path, 0,
1277 						  BTRFS_EXTENT_ITEM_KEY);
1278 			if (ret < 0)
1279 				goto out;
1280 			if (ret > 0) {
1281 				/* there's no smaller item, so stick with the
1282 				 * larger one */
1283 				btrfs_release_path(path);
1284 				ret = btrfs_search_slot(NULL, root, &key,
1285 							path, 0, 0);
1286 				if (ret < 0)
1287 					goto out;
1288 			}
1289 		}
1290 
1291 		while (1) {
1292 			l = path->nodes[0];
1293 			slot = path->slots[0];
1294 			if (slot >= btrfs_header_nritems(l)) {
1295 				ret = btrfs_next_leaf(root, path);
1296 				if (ret == 0)
1297 					continue;
1298 				if (ret < 0)
1299 					goto out;
1300 
1301 				break;
1302 			}
1303 			btrfs_item_key_to_cpu(l, &key, slot);
1304 
1305 			if (key.objectid + key.offset <= logical)
1306 				goto next;
1307 
1308 			if (key.objectid >= logical + map->stripe_len)
1309 				break;
1310 
1311 			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1312 				goto next;
1313 
1314 			extent = btrfs_item_ptr(l, slot,
1315 						struct btrfs_extent_item);
1316 			flags = btrfs_extent_flags(l, extent);
1317 			generation = btrfs_extent_generation(l, extent);
1318 
1319 			if (key.objectid < logical &&
1320 			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1321 				printk(KERN_ERR
1322 				       "btrfs scrub: tree block %llu spanning "
1323 				       "stripes, ignored. logical=%llu\n",
1324 				       (unsigned long long)key.objectid,
1325 				       (unsigned long long)logical);
1326 				goto next;
1327 			}
1328 
1329 			/*
1330 			 * trim extent to this stripe
1331 			 */
1332 			if (key.objectid < logical) {
1333 				key.offset -= logical - key.objectid;
1334 				key.objectid = logical;
1335 			}
1336 			if (key.objectid + key.offset >
1337 			    logical + map->stripe_len) {
1338 				key.offset = logical + map->stripe_len -
1339 					     key.objectid;
1340 			}
1341 
1342 			ret = scrub_extent(sdev, key.objectid, key.offset,
1343 					   key.objectid - logical + physical,
1344 					   flags, generation, mirror_num);
1345 			if (ret)
1346 				goto out;
1347 
1348 next:
1349 			path->slots[0]++;
1350 		}
1351 		btrfs_release_path(path);
1352 		logical += increment;
1353 		physical += map->stripe_len;
1354 		spin_lock(&sdev->stat_lock);
1355 		sdev->stat.last_physical = physical;
1356 		spin_unlock(&sdev->stat_lock);
1357 	}
1358 	/* push queued extents */
1359 	scrub_submit(sdev);
1360 
1361 out:
1362 	blk_finish_plug(&plug);
1363 	btrfs_free_path(path);
1364 	return ret < 0 ? ret : 0;
1365 }
1366 
1367 static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
1368 	u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
1369 {
1370 	struct btrfs_mapping_tree *map_tree =
1371 		&sdev->dev->dev_root->fs_info->mapping_tree;
1372 	struct map_lookup *map;
1373 	struct extent_map *em;
1374 	int i;
1375 	int ret = -EINVAL;
1376 
1377 	read_lock(&map_tree->map_tree.lock);
1378 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
1379 	read_unlock(&map_tree->map_tree.lock);
1380 
1381 	if (!em)
1382 		return -EINVAL;
1383 
1384 	map = (struct map_lookup *)em->bdev;
1385 	if (em->start != chunk_offset)
1386 		goto out;
1387 
1388 	if (em->len < length)
1389 		goto out;
1390 
1391 	for (i = 0; i < map->num_stripes; ++i) {
1392 		if (map->stripes[i].dev == sdev->dev) {
1393 			ret = scrub_stripe(sdev, map, i, chunk_offset, length);
1394 			if (ret)
1395 				goto out;
1396 		}
1397 	}
1398 out:
1399 	free_extent_map(em);
1400 
1401 	return ret;
1402 }
1403 
1404 static noinline_for_stack
1405 int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1406 {
1407 	struct btrfs_dev_extent *dev_extent = NULL;
1408 	struct btrfs_path *path;
1409 	struct btrfs_root *root = sdev->dev->dev_root;
1410 	struct btrfs_fs_info *fs_info = root->fs_info;
1411 	u64 length;
1412 	u64 chunk_tree;
1413 	u64 chunk_objectid;
1414 	u64 chunk_offset;
1415 	int ret;
1416 	int slot;
1417 	struct extent_buffer *l;
1418 	struct btrfs_key key;
1419 	struct btrfs_key found_key;
1420 	struct btrfs_block_group_cache *cache;
1421 
1422 	path = btrfs_alloc_path();
1423 	if (!path)
1424 		return -ENOMEM;
1425 
1426 	path->reada = 2;
1427 	path->search_commit_root = 1;
1428 	path->skip_locking = 1;
1429 
1430 	key.objectid = sdev->dev->devid;
1431 	key.offset = 0ull;
1432 	key.type = BTRFS_DEV_EXTENT_KEY;
1433 
1434 
1435 	while (1) {
1436 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1437 		if (ret < 0)
1438 			break;
1439 		if (ret > 0) {
1440 			if (path->slots[0] >=
1441 			    btrfs_header_nritems(path->nodes[0])) {
1442 				ret = btrfs_next_leaf(root, path);
1443 				if (ret)
1444 					break;
1445 			}
1446 		}
1447 
1448 		l = path->nodes[0];
1449 		slot = path->slots[0];
1450 
1451 		btrfs_item_key_to_cpu(l, &found_key, slot);
1452 
1453 		if (found_key.objectid != sdev->dev->devid)
1454 			break;
1455 
1456 		if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
1457 			break;
1458 
1459 		if (found_key.offset >= end)
1460 			break;
1461 
1462 		if (found_key.offset < key.offset)
1463 			break;
1464 
1465 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1466 		length = btrfs_dev_extent_length(l, dev_extent);
1467 
1468 		if (found_key.offset + length <= start) {
1469 			key.offset = found_key.offset + length;
1470 			btrfs_release_path(path);
1471 			continue;
1472 		}
1473 
1474 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1475 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1476 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1477 
1478 		/*
1479 		 * get a reference on the corresponding block group to prevent
1480 		 * the chunk from going away while we scrub it
1481 		 */
1482 		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1483 		if (!cache) {
1484 			ret = -ENOENT;
1485 			break;
1486 		}
1487 		ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1488 				  chunk_offset, length);
1489 		btrfs_put_block_group(cache);
1490 		if (ret)
1491 			break;
1492 
1493 		key.offset = found_key.offset + length;
1494 		btrfs_release_path(path);
1495 	}
1496 
1497 	btrfs_free_path(path);
1498 
1499 	/*
1500 	 * ret can still be 1 from search_slot or next_leaf,
1501 	 * that's not an error
1502 	 */
1503 	return ret < 0 ? ret : 0;
1504 }
1505 
1506 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1507 {
1508 	int	i;
1509 	u64	bytenr;
1510 	u64	gen;
1511 	int	ret;
1512 	struct btrfs_device *device = sdev->dev;
1513 	struct btrfs_root *root = device->dev_root;
1514 
1515 	gen = root->fs_info->last_trans_committed;
1516 
1517 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1518 		bytenr = btrfs_sb_offset(i);
1519 		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
1520 			break;
1521 
1522 		ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
1523 				 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
1524 		if (ret)
1525 			return ret;
1526 	}
1527 	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1528 
1529 	return 0;
1530 }
1531 
1532 /*
1533  * get a reference count on fs_info->scrub_workers. start worker if necessary
1534  */
1535 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1536 {
1537 	struct btrfs_fs_info *fs_info = root->fs_info;
1538 
1539 	mutex_lock(&fs_info->scrub_lock);
1540 	if (fs_info->scrub_workers_refcnt == 0) {
1541 		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1542 			   fs_info->thread_pool_size, &fs_info->generic_worker);
1543 		fs_info->scrub_workers.idle_thresh = 4;
1544 		btrfs_start_workers(&fs_info->scrub_workers, 1);
1545 	}
1546 	++fs_info->scrub_workers_refcnt;
1547 	mutex_unlock(&fs_info->scrub_lock);
1548 
1549 	return 0;
1550 }
1551 
1552 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
1553 {
1554 	struct btrfs_fs_info *fs_info = root->fs_info;
1555 
1556 	mutex_lock(&fs_info->scrub_lock);
1557 	if (--fs_info->scrub_workers_refcnt == 0)
1558 		btrfs_stop_workers(&fs_info->scrub_workers);
1559 	WARN_ON(fs_info->scrub_workers_refcnt < 0);
1560 	mutex_unlock(&fs_info->scrub_lock);
1561 }
1562 
1563 
1564 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1565 		    struct btrfs_scrub_progress *progress, int readonly)
1566 {
1567 	struct scrub_dev *sdev;
1568 	struct btrfs_fs_info *fs_info = root->fs_info;
1569 	int ret;
1570 	struct btrfs_device *dev;
1571 
1572 	if (btrfs_fs_closing(root->fs_info))
1573 		return -EINVAL;
1574 
1575 	/*
1576 	 * check some assumptions
1577 	 */
1578 	if (root->sectorsize != PAGE_SIZE ||
1579 	    root->sectorsize != root->leafsize ||
1580 	    root->sectorsize != root->nodesize) {
1581 		printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
1582 		return -EINVAL;
1583 	}
1584 
1585 	ret = scrub_workers_get(root);
1586 	if (ret)
1587 		return ret;
1588 
1589 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1590 	dev = btrfs_find_device(root, devid, NULL, NULL);
1591 	if (!dev || dev->missing) {
1592 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1593 		scrub_workers_put(root);
1594 		return -ENODEV;
1595 	}
1596 	mutex_lock(&fs_info->scrub_lock);
1597 
1598 	if (!dev->in_fs_metadata) {
1599 		mutex_unlock(&fs_info->scrub_lock);
1600 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1601 		scrub_workers_put(root);
1602 		return -ENODEV;
1603 	}
1604 
1605 	if (dev->scrub_device) {
1606 		mutex_unlock(&fs_info->scrub_lock);
1607 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1608 		scrub_workers_put(root);
1609 		return -EINPROGRESS;
1610 	}
1611 	sdev = scrub_setup_dev(dev);
1612 	if (IS_ERR(sdev)) {
1613 		mutex_unlock(&fs_info->scrub_lock);
1614 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1615 		scrub_workers_put(root);
1616 		return PTR_ERR(sdev);
1617 	}
1618 	sdev->readonly = readonly;
1619 	dev->scrub_device = sdev;
1620 
1621 	atomic_inc(&fs_info->scrubs_running);
1622 	mutex_unlock(&fs_info->scrub_lock);
1623 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1624 
1625 	down_read(&fs_info->scrub_super_lock);
1626 	ret = scrub_supers(sdev);
1627 	up_read(&fs_info->scrub_super_lock);
1628 
1629 	if (!ret)
1630 		ret = scrub_enumerate_chunks(sdev, start, end);
1631 
1632 	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
1633 	atomic_dec(&fs_info->scrubs_running);
1634 	wake_up(&fs_info->scrub_pause_wait);
1635 
1636 	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
1637 
1638 	if (progress)
1639 		memcpy(progress, &sdev->stat, sizeof(*progress));
1640 
1641 	mutex_lock(&fs_info->scrub_lock);
1642 	dev->scrub_device = NULL;
1643 	mutex_unlock(&fs_info->scrub_lock);
1644 
1645 	scrub_free_dev(sdev);
1646 	scrub_workers_put(root);
1647 
1648 	return ret;
1649 }
1650 
1651 int btrfs_scrub_pause(struct btrfs_root *root)
1652 {
1653 	struct btrfs_fs_info *fs_info = root->fs_info;
1654 
1655 	mutex_lock(&fs_info->scrub_lock);
1656 	atomic_inc(&fs_info->scrub_pause_req);
1657 	while (atomic_read(&fs_info->scrubs_paused) !=
1658 	       atomic_read(&fs_info->scrubs_running)) {
1659 		mutex_unlock(&fs_info->scrub_lock);
1660 		wait_event(fs_info->scrub_pause_wait,
1661 			   atomic_read(&fs_info->scrubs_paused) ==
1662 			   atomic_read(&fs_info->scrubs_running));
1663 		mutex_lock(&fs_info->scrub_lock);
1664 	}
1665 	mutex_unlock(&fs_info->scrub_lock);
1666 
1667 	return 0;
1668 }
1669 
1670 int btrfs_scrub_continue(struct btrfs_root *root)
1671 {
1672 	struct btrfs_fs_info *fs_info = root->fs_info;
1673 
1674 	atomic_dec(&fs_info->scrub_pause_req);
1675 	wake_up(&fs_info->scrub_pause_wait);
1676 	return 0;
1677 }
1678 
1679 int btrfs_scrub_pause_super(struct btrfs_root *root)
1680 {
1681 	down_write(&root->fs_info->scrub_super_lock);
1682 	return 0;
1683 }
1684 
1685 int btrfs_scrub_continue_super(struct btrfs_root *root)
1686 {
1687 	up_write(&root->fs_info->scrub_super_lock);
1688 	return 0;
1689 }
1690 
1691 int btrfs_scrub_cancel(struct btrfs_root *root)
1692 {
1693 	struct btrfs_fs_info *fs_info = root->fs_info;
1694 
1695 	mutex_lock(&fs_info->scrub_lock);
1696 	if (!atomic_read(&fs_info->scrubs_running)) {
1697 		mutex_unlock(&fs_info->scrub_lock);
1698 		return -ENOTCONN;
1699 	}
1700 
1701 	atomic_inc(&fs_info->scrub_cancel_req);
1702 	while (atomic_read(&fs_info->scrubs_running)) {
1703 		mutex_unlock(&fs_info->scrub_lock);
1704 		wait_event(fs_info->scrub_pause_wait,
1705 			   atomic_read(&fs_info->scrubs_running) == 0);
1706 		mutex_lock(&fs_info->scrub_lock);
1707 	}
1708 	atomic_dec(&fs_info->scrub_cancel_req);
1709 	mutex_unlock(&fs_info->scrub_lock);
1710 
1711 	return 0;
1712 }
1713 
1714 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1715 {
1716 	struct btrfs_fs_info *fs_info = root->fs_info;
1717 	struct scrub_dev *sdev;
1718 
1719 	mutex_lock(&fs_info->scrub_lock);
1720 	sdev = dev->scrub_device;
1721 	if (!sdev) {
1722 		mutex_unlock(&fs_info->scrub_lock);
1723 		return -ENOTCONN;
1724 	}
1725 	atomic_inc(&sdev->cancel_req);
1726 	while (dev->scrub_device) {
1727 		mutex_unlock(&fs_info->scrub_lock);
1728 		wait_event(fs_info->scrub_pause_wait,
1729 			   dev->scrub_device == NULL);
1730 		mutex_lock(&fs_info->scrub_lock);
1731 	}
1732 	mutex_unlock(&fs_info->scrub_lock);
1733 
1734 	return 0;
1735 }
1736 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
1737 {
1738 	struct btrfs_fs_info *fs_info = root->fs_info;
1739 	struct btrfs_device *dev;
1740 	int ret;
1741 
1742 	/*
1743 	 * we have to hold the device_list_mutex here so the device
1744 	 * does not go away in cancel_dev. FIXME: find a better solution
1745 	 */
1746 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
1747 	dev = btrfs_find_device(root, devid, NULL, NULL);
1748 	if (!dev) {
1749 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1750 		return -ENODEV;
1751 	}
1752 	ret = btrfs_scrub_cancel_dev(root, dev);
1753 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1754 
1755 	return ret;
1756 }
1757 
1758 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
1759 			 struct btrfs_scrub_progress *progress)
1760 {
1761 	struct btrfs_device *dev;
1762 	struct scrub_dev *sdev = NULL;
1763 
1764 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1765 	dev = btrfs_find_device(root, devid, NULL, NULL);
1766 	if (dev)
1767 		sdev = dev->scrub_device;
1768 	if (sdev)
1769 		memcpy(progress, &sdev->stat, sizeof(*progress));
1770 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1771 
1772 	return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
1773 }
1774