xref: /linux/fs/btrfs/scrub.c (revision fea88a0c02822fbb91a0b8301bf9af04377876a3)
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "check-integrity.h"
29 
30 /*
31  * This is only the first step towards a full-features scrub. It reads all
32  * extent and super block and verifies the checksums. In case a bad checksum
33  * is found or the extent cannot be read, good data will be written back if
34  * any can be found.
35  *
36  * Future enhancements:
37  *  - In case an unrepairable extent is encountered, track which files are
38  *    affected and report them
39  *  - track and record media errors, throw out bad devices
40  *  - add a mode to also read unallocated space
41  */
42 
43 struct scrub_block;
44 struct scrub_dev;
45 
46 #define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
47 #define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
48 #define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
49 
50 struct scrub_page {
51 	struct scrub_block	*sblock;
52 	struct page		*page;
53 	struct block_device	*bdev;
54 	u64			flags;  /* extent flags */
55 	u64			generation;
56 	u64			logical;
57 	u64			physical;
58 	struct {
59 		unsigned int	mirror_num:8;
60 		unsigned int	have_csum:1;
61 		unsigned int	io_error:1;
62 	};
63 	u8			csum[BTRFS_CSUM_SIZE];
64 };
65 
66 struct scrub_bio {
67 	int			index;
68 	struct scrub_dev	*sdev;
69 	struct bio		*bio;
70 	int			err;
71 	u64			logical;
72 	u64			physical;
73 	struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO];
74 	int			page_count;
75 	int			next_free;
76 	struct btrfs_work	work;
77 };
78 
79 struct scrub_block {
80 	struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK];
81 	int			page_count;
82 	atomic_t		outstanding_pages;
83 	atomic_t		ref_count; /* free mem on transition to zero */
84 	struct scrub_dev	*sdev;
85 	struct {
86 		unsigned int	header_error:1;
87 		unsigned int	checksum_error:1;
88 		unsigned int	no_io_error_seen:1;
89 	};
90 };
91 
92 struct scrub_dev {
93 	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
94 	struct btrfs_device	*dev;
95 	int			first_free;
96 	int			curr;
97 	atomic_t		in_flight;
98 	atomic_t		fixup_cnt;
99 	spinlock_t		list_lock;
100 	wait_queue_head_t	list_wait;
101 	u16			csum_size;
102 	struct list_head	csum_list;
103 	atomic_t		cancel_req;
104 	int			readonly;
105 	int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
106 	u32			sectorsize;
107 	u32			nodesize;
108 	u32			leafsize;
109 	/*
110 	 * statistics
111 	 */
112 	struct btrfs_scrub_progress stat;
113 	spinlock_t		stat_lock;
114 };
115 
116 struct scrub_fixup_nodatasum {
117 	struct scrub_dev	*sdev;
118 	u64			logical;
119 	struct btrfs_root	*root;
120 	struct btrfs_work	work;
121 	int			mirror_num;
122 };
123 
124 struct scrub_warning {
125 	struct btrfs_path	*path;
126 	u64			extent_item_size;
127 	char			*scratch_buf;
128 	char			*msg_buf;
129 	const char		*errstr;
130 	sector_t		sector;
131 	u64			logical;
132 	struct btrfs_device	*dev;
133 	int			msg_bufsize;
134 	int			scratch_bufsize;
135 };
136 
137 
138 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
139 static int scrub_setup_recheck_block(struct scrub_dev *sdev,
140 				     struct btrfs_mapping_tree *map_tree,
141 				     u64 length, u64 logical,
142 				     struct scrub_block *sblock);
143 static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
144 			       struct scrub_block *sblock, int is_metadata,
145 			       int have_csum, u8 *csum, u64 generation,
146 			       u16 csum_size);
147 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
148 					 struct scrub_block *sblock,
149 					 int is_metadata, int have_csum,
150 					 const u8 *csum, u64 generation,
151 					 u16 csum_size);
152 static void scrub_complete_bio_end_io(struct bio *bio, int err);
153 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
154 					     struct scrub_block *sblock_good,
155 					     int force_write);
156 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
157 					    struct scrub_block *sblock_good,
158 					    int page_num, int force_write);
159 static int scrub_checksum_data(struct scrub_block *sblock);
160 static int scrub_checksum_tree_block(struct scrub_block *sblock);
161 static int scrub_checksum_super(struct scrub_block *sblock);
162 static void scrub_block_get(struct scrub_block *sblock);
163 static void scrub_block_put(struct scrub_block *sblock);
164 static int scrub_add_page_to_bio(struct scrub_dev *sdev,
165 				 struct scrub_page *spage);
166 static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
167 		       u64 physical, u64 flags, u64 gen, int mirror_num,
168 		       u8 *csum, int force);
169 static void scrub_bio_end_io(struct bio *bio, int err);
170 static void scrub_bio_end_io_worker(struct btrfs_work *work);
171 static void scrub_block_complete(struct scrub_block *sblock);
172 
173 
174 static void scrub_free_csums(struct scrub_dev *sdev)
175 {
176 	while (!list_empty(&sdev->csum_list)) {
177 		struct btrfs_ordered_sum *sum;
178 		sum = list_first_entry(&sdev->csum_list,
179 				       struct btrfs_ordered_sum, list);
180 		list_del(&sum->list);
181 		kfree(sum);
182 	}
183 }
184 
185 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
186 {
187 	int i;
188 
189 	if (!sdev)
190 		return;
191 
192 	/* this can happen when scrub is cancelled */
193 	if (sdev->curr != -1) {
194 		struct scrub_bio *sbio = sdev->bios[sdev->curr];
195 
196 		for (i = 0; i < sbio->page_count; i++) {
197 			BUG_ON(!sbio->pagev[i]);
198 			BUG_ON(!sbio->pagev[i]->page);
199 			scrub_block_put(sbio->pagev[i]->sblock);
200 		}
201 		bio_put(sbio->bio);
202 	}
203 
204 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
205 		struct scrub_bio *sbio = sdev->bios[i];
206 
207 		if (!sbio)
208 			break;
209 		kfree(sbio);
210 	}
211 
212 	scrub_free_csums(sdev);
213 	kfree(sdev);
214 }
215 
216 static noinline_for_stack
217 struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
218 {
219 	struct scrub_dev *sdev;
220 	int		i;
221 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
222 	int pages_per_bio;
223 
224 	pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
225 			      bio_get_nr_vecs(dev->bdev));
226 	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
227 	if (!sdev)
228 		goto nomem;
229 	sdev->dev = dev;
230 	sdev->pages_per_bio = pages_per_bio;
231 	sdev->curr = -1;
232 	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
233 		struct scrub_bio *sbio;
234 
235 		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
236 		if (!sbio)
237 			goto nomem;
238 		sdev->bios[i] = sbio;
239 
240 		sbio->index = i;
241 		sbio->sdev = sdev;
242 		sbio->page_count = 0;
243 		sbio->work.func = scrub_bio_end_io_worker;
244 
245 		if (i != SCRUB_BIOS_PER_DEV-1)
246 			sdev->bios[i]->next_free = i + 1;
247 		else
248 			sdev->bios[i]->next_free = -1;
249 	}
250 	sdev->first_free = 0;
251 	sdev->nodesize = dev->dev_root->nodesize;
252 	sdev->leafsize = dev->dev_root->leafsize;
253 	sdev->sectorsize = dev->dev_root->sectorsize;
254 	atomic_set(&sdev->in_flight, 0);
255 	atomic_set(&sdev->fixup_cnt, 0);
256 	atomic_set(&sdev->cancel_req, 0);
257 	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
258 	INIT_LIST_HEAD(&sdev->csum_list);
259 
260 	spin_lock_init(&sdev->list_lock);
261 	spin_lock_init(&sdev->stat_lock);
262 	init_waitqueue_head(&sdev->list_wait);
263 	return sdev;
264 
265 nomem:
266 	scrub_free_dev(sdev);
267 	return ERR_PTR(-ENOMEM);
268 }
269 
270 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
271 {
272 	u64 isize;
273 	u32 nlink;
274 	int ret;
275 	int i;
276 	struct extent_buffer *eb;
277 	struct btrfs_inode_item *inode_item;
278 	struct scrub_warning *swarn = ctx;
279 	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
280 	struct inode_fs_paths *ipath = NULL;
281 	struct btrfs_root *local_root;
282 	struct btrfs_key root_key;
283 
284 	root_key.objectid = root;
285 	root_key.type = BTRFS_ROOT_ITEM_KEY;
286 	root_key.offset = (u64)-1;
287 	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
288 	if (IS_ERR(local_root)) {
289 		ret = PTR_ERR(local_root);
290 		goto err;
291 	}
292 
293 	ret = inode_item_info(inum, 0, local_root, swarn->path);
294 	if (ret) {
295 		btrfs_release_path(swarn->path);
296 		goto err;
297 	}
298 
299 	eb = swarn->path->nodes[0];
300 	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
301 					struct btrfs_inode_item);
302 	isize = btrfs_inode_size(eb, inode_item);
303 	nlink = btrfs_inode_nlink(eb, inode_item);
304 	btrfs_release_path(swarn->path);
305 
306 	ipath = init_ipath(4096, local_root, swarn->path);
307 	if (IS_ERR(ipath)) {
308 		ret = PTR_ERR(ipath);
309 		ipath = NULL;
310 		goto err;
311 	}
312 	ret = paths_from_inode(inum, ipath);
313 
314 	if (ret < 0)
315 		goto err;
316 
317 	/*
318 	 * we deliberately ignore the bit ipath might have been too small to
319 	 * hold all of the paths here
320 	 */
321 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
322 		printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
323 			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
324 			"length %llu, links %u (path: %s)\n", swarn->errstr,
325 			swarn->logical, swarn->dev->name,
326 			(unsigned long long)swarn->sector, root, inum, offset,
327 			min(isize - offset, (u64)PAGE_SIZE), nlink,
328 			(char *)(unsigned long)ipath->fspath->val[i]);
329 
330 	free_ipath(ipath);
331 	return 0;
332 
333 err:
334 	printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
335 		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
336 		"resolving failed with ret=%d\n", swarn->errstr,
337 		swarn->logical, swarn->dev->name,
338 		(unsigned long long)swarn->sector, root, inum, offset, ret);
339 
340 	free_ipath(ipath);
341 	return 0;
342 }
343 
344 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
345 {
346 	struct btrfs_device *dev = sblock->sdev->dev;
347 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
348 	struct btrfs_path *path;
349 	struct btrfs_key found_key;
350 	struct extent_buffer *eb;
351 	struct btrfs_extent_item *ei;
352 	struct scrub_warning swarn;
353 	u32 item_size;
354 	int ret;
355 	u64 ref_root;
356 	u8 ref_level;
357 	unsigned long ptr = 0;
358 	const int bufsize = 4096;
359 	u64 extent_item_pos;
360 
361 	path = btrfs_alloc_path();
362 
363 	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
364 	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
365 	BUG_ON(sblock->page_count < 1);
366 	swarn.sector = (sblock->pagev[0].physical) >> 9;
367 	swarn.logical = sblock->pagev[0].logical;
368 	swarn.errstr = errstr;
369 	swarn.dev = dev;
370 	swarn.msg_bufsize = bufsize;
371 	swarn.scratch_bufsize = bufsize;
372 
373 	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
374 		goto out;
375 
376 	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
377 	if (ret < 0)
378 		goto out;
379 
380 	extent_item_pos = swarn.logical - found_key.objectid;
381 	swarn.extent_item_size = found_key.offset;
382 
383 	eb = path->nodes[0];
384 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
385 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
386 	btrfs_release_path(path);
387 
388 	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
389 		do {
390 			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
391 							&ref_root, &ref_level);
392 			printk(KERN_WARNING
393 				"btrfs: %s at logical %llu on dev %s, "
394 				"sector %llu: metadata %s (level %d) in tree "
395 				"%llu\n", errstr, swarn.logical, dev->name,
396 				(unsigned long long)swarn.sector,
397 				ref_level ? "node" : "leaf",
398 				ret < 0 ? -1 : ref_level,
399 				ret < 0 ? -1 : ref_root);
400 		} while (ret != 1);
401 	} else {
402 		swarn.path = path;
403 		iterate_extent_inodes(fs_info, found_key.objectid,
404 					extent_item_pos, 1,
405 					scrub_print_warning_inode, &swarn);
406 	}
407 
408 out:
409 	btrfs_free_path(path);
410 	kfree(swarn.scratch_buf);
411 	kfree(swarn.msg_buf);
412 }
413 
414 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
415 {
416 	struct page *page = NULL;
417 	unsigned long index;
418 	struct scrub_fixup_nodatasum *fixup = ctx;
419 	int ret;
420 	int corrected = 0;
421 	struct btrfs_key key;
422 	struct inode *inode = NULL;
423 	u64 end = offset + PAGE_SIZE - 1;
424 	struct btrfs_root *local_root;
425 
426 	key.objectid = root;
427 	key.type = BTRFS_ROOT_ITEM_KEY;
428 	key.offset = (u64)-1;
429 	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
430 	if (IS_ERR(local_root))
431 		return PTR_ERR(local_root);
432 
433 	key.type = BTRFS_INODE_ITEM_KEY;
434 	key.objectid = inum;
435 	key.offset = 0;
436 	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
437 	if (IS_ERR(inode))
438 		return PTR_ERR(inode);
439 
440 	index = offset >> PAGE_CACHE_SHIFT;
441 
442 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
443 	if (!page) {
444 		ret = -ENOMEM;
445 		goto out;
446 	}
447 
448 	if (PageUptodate(page)) {
449 		struct btrfs_mapping_tree *map_tree;
450 		if (PageDirty(page)) {
451 			/*
452 			 * we need to write the data to the defect sector. the
453 			 * data that was in that sector is not in memory,
454 			 * because the page was modified. we must not write the
455 			 * modified page to that sector.
456 			 *
457 			 * TODO: what could be done here: wait for the delalloc
458 			 *       runner to write out that page (might involve
459 			 *       COW) and see whether the sector is still
460 			 *       referenced afterwards.
461 			 *
462 			 * For the meantime, we'll treat this error
463 			 * incorrectable, although there is a chance that a
464 			 * later scrub will find the bad sector again and that
465 			 * there's no dirty page in memory, then.
466 			 */
467 			ret = -EIO;
468 			goto out;
469 		}
470 		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
471 		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
472 					fixup->logical, page,
473 					fixup->mirror_num);
474 		unlock_page(page);
475 		corrected = !ret;
476 	} else {
477 		/*
478 		 * we need to get good data first. the general readpage path
479 		 * will call repair_io_failure for us, we just have to make
480 		 * sure we read the bad mirror.
481 		 */
482 		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
483 					EXTENT_DAMAGED, GFP_NOFS);
484 		if (ret) {
485 			/* set_extent_bits should give proper error */
486 			WARN_ON(ret > 0);
487 			if (ret > 0)
488 				ret = -EFAULT;
489 			goto out;
490 		}
491 
492 		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
493 						btrfs_get_extent,
494 						fixup->mirror_num);
495 		wait_on_page_locked(page);
496 
497 		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
498 						end, EXTENT_DAMAGED, 0, NULL);
499 		if (!corrected)
500 			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
501 						EXTENT_DAMAGED, GFP_NOFS);
502 	}
503 
504 out:
505 	if (page)
506 		put_page(page);
507 	if (inode)
508 		iput(inode);
509 
510 	if (ret < 0)
511 		return ret;
512 
513 	if (ret == 0 && corrected) {
514 		/*
515 		 * we only need to call readpage for one of the inodes belonging
516 		 * to this extent. so make iterate_extent_inodes stop
517 		 */
518 		return 1;
519 	}
520 
521 	return -EIO;
522 }
523 
524 static void scrub_fixup_nodatasum(struct btrfs_work *work)
525 {
526 	int ret;
527 	struct scrub_fixup_nodatasum *fixup;
528 	struct scrub_dev *sdev;
529 	struct btrfs_trans_handle *trans = NULL;
530 	struct btrfs_fs_info *fs_info;
531 	struct btrfs_path *path;
532 	int uncorrectable = 0;
533 
534 	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
535 	sdev = fixup->sdev;
536 	fs_info = fixup->root->fs_info;
537 
538 	path = btrfs_alloc_path();
539 	if (!path) {
540 		spin_lock(&sdev->stat_lock);
541 		++sdev->stat.malloc_errors;
542 		spin_unlock(&sdev->stat_lock);
543 		uncorrectable = 1;
544 		goto out;
545 	}
546 
547 	trans = btrfs_join_transaction(fixup->root);
548 	if (IS_ERR(trans)) {
549 		uncorrectable = 1;
550 		goto out;
551 	}
552 
553 	/*
554 	 * the idea is to trigger a regular read through the standard path. we
555 	 * read a page from the (failed) logical address by specifying the
556 	 * corresponding copynum of the failed sector. thus, that readpage is
557 	 * expected to fail.
558 	 * that is the point where on-the-fly error correction will kick in
559 	 * (once it's finished) and rewrite the failed sector if a good copy
560 	 * can be found.
561 	 */
562 	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
563 						path, scrub_fixup_readpage,
564 						fixup);
565 	if (ret < 0) {
566 		uncorrectable = 1;
567 		goto out;
568 	}
569 	WARN_ON(ret != 1);
570 
571 	spin_lock(&sdev->stat_lock);
572 	++sdev->stat.corrected_errors;
573 	spin_unlock(&sdev->stat_lock);
574 
575 out:
576 	if (trans && !IS_ERR(trans))
577 		btrfs_end_transaction(trans, fixup->root);
578 	if (uncorrectable) {
579 		spin_lock(&sdev->stat_lock);
580 		++sdev->stat.uncorrectable_errors;
581 		spin_unlock(&sdev->stat_lock);
582 		printk_ratelimited(KERN_ERR
583 			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
584 			(unsigned long long)fixup->logical, sdev->dev->name);
585 	}
586 
587 	btrfs_free_path(path);
588 	kfree(fixup);
589 
590 	/* see caller why we're pretending to be paused in the scrub counters */
591 	mutex_lock(&fs_info->scrub_lock);
592 	atomic_dec(&fs_info->scrubs_running);
593 	atomic_dec(&fs_info->scrubs_paused);
594 	mutex_unlock(&fs_info->scrub_lock);
595 	atomic_dec(&sdev->fixup_cnt);
596 	wake_up(&fs_info->scrub_pause_wait);
597 	wake_up(&sdev->list_wait);
598 }
599 
600 /*
601  * scrub_handle_errored_block gets called when either verification of the
602  * pages failed or the bio failed to read, e.g. with EIO. In the latter
603  * case, this function handles all pages in the bio, even though only one
604  * may be bad.
605  * The goal of this function is to repair the errored block by using the
606  * contents of one of the mirrors.
607  */
608 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
609 {
610 	struct scrub_dev *sdev = sblock_to_check->sdev;
611 	struct btrfs_fs_info *fs_info;
612 	u64 length;
613 	u64 logical;
614 	u64 generation;
615 	unsigned int failed_mirror_index;
616 	unsigned int is_metadata;
617 	unsigned int have_csum;
618 	u8 *csum;
619 	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
620 	struct scrub_block *sblock_bad;
621 	int ret;
622 	int mirror_index;
623 	int page_num;
624 	int success;
625 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
626 				      DEFAULT_RATELIMIT_BURST);
627 
628 	BUG_ON(sblock_to_check->page_count < 1);
629 	fs_info = sdev->dev->dev_root->fs_info;
630 	length = sblock_to_check->page_count * PAGE_SIZE;
631 	logical = sblock_to_check->pagev[0].logical;
632 	generation = sblock_to_check->pagev[0].generation;
633 	BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
634 	failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
635 	is_metadata = !(sblock_to_check->pagev[0].flags &
636 			BTRFS_EXTENT_FLAG_DATA);
637 	have_csum = sblock_to_check->pagev[0].have_csum;
638 	csum = sblock_to_check->pagev[0].csum;
639 
640 	/*
641 	 * read all mirrors one after the other. This includes to
642 	 * re-read the extent or metadata block that failed (that was
643 	 * the cause that this fixup code is called) another time,
644 	 * page by page this time in order to know which pages
645 	 * caused I/O errors and which ones are good (for all mirrors).
646 	 * It is the goal to handle the situation when more than one
647 	 * mirror contains I/O errors, but the errors do not
648 	 * overlap, i.e. the data can be repaired by selecting the
649 	 * pages from those mirrors without I/O error on the
650 	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
651 	 * would be that mirror #1 has an I/O error on the first page,
652 	 * the second page is good, and mirror #2 has an I/O error on
653 	 * the second page, but the first page is good.
654 	 * Then the first page of the first mirror can be repaired by
655 	 * taking the first page of the second mirror, and the
656 	 * second page of the second mirror can be repaired by
657 	 * copying the contents of the 2nd page of the 1st mirror.
658 	 * One more note: if the pages of one mirror contain I/O
659 	 * errors, the checksum cannot be verified. In order to get
660 	 * the best data for repairing, the first attempt is to find
661 	 * a mirror without I/O errors and with a validated checksum.
662 	 * Only if this is not possible, the pages are picked from
663 	 * mirrors with I/O errors without considering the checksum.
664 	 * If the latter is the case, at the end, the checksum of the
665 	 * repaired area is verified in order to correctly maintain
666 	 * the statistics.
667 	 */
668 
669 	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
670 				     sizeof(*sblocks_for_recheck),
671 				     GFP_NOFS);
672 	if (!sblocks_for_recheck) {
673 		spin_lock(&sdev->stat_lock);
674 		sdev->stat.malloc_errors++;
675 		sdev->stat.read_errors++;
676 		sdev->stat.uncorrectable_errors++;
677 		spin_unlock(&sdev->stat_lock);
678 		goto out;
679 	}
680 
681 	/* setup the context, map the logical blocks and alloc the pages */
682 	ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
683 					logical, sblocks_for_recheck);
684 	if (ret) {
685 		spin_lock(&sdev->stat_lock);
686 		sdev->stat.read_errors++;
687 		sdev->stat.uncorrectable_errors++;
688 		spin_unlock(&sdev->stat_lock);
689 		goto out;
690 	}
691 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
692 	sblock_bad = sblocks_for_recheck + failed_mirror_index;
693 
694 	/* build and submit the bios for the failed mirror, check checksums */
695 	ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
696 				  csum, generation, sdev->csum_size);
697 	if (ret) {
698 		spin_lock(&sdev->stat_lock);
699 		sdev->stat.read_errors++;
700 		sdev->stat.uncorrectable_errors++;
701 		spin_unlock(&sdev->stat_lock);
702 		goto out;
703 	}
704 
705 	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
706 	    sblock_bad->no_io_error_seen) {
707 		/*
708 		 * the error disappeared after reading page by page, or
709 		 * the area was part of a huge bio and other parts of the
710 		 * bio caused I/O errors, or the block layer merged several
711 		 * read requests into one and the error is caused by a
712 		 * different bio (usually one of the two latter cases is
713 		 * the cause)
714 		 */
715 		spin_lock(&sdev->stat_lock);
716 		sdev->stat.unverified_errors++;
717 		spin_unlock(&sdev->stat_lock);
718 
719 		goto out;
720 	}
721 
722 	if (!sblock_bad->no_io_error_seen) {
723 		spin_lock(&sdev->stat_lock);
724 		sdev->stat.read_errors++;
725 		spin_unlock(&sdev->stat_lock);
726 		if (__ratelimit(&_rs))
727 			scrub_print_warning("i/o error", sblock_to_check);
728 	} else if (sblock_bad->checksum_error) {
729 		spin_lock(&sdev->stat_lock);
730 		sdev->stat.csum_errors++;
731 		spin_unlock(&sdev->stat_lock);
732 		if (__ratelimit(&_rs))
733 			scrub_print_warning("checksum error", sblock_to_check);
734 	} else if (sblock_bad->header_error) {
735 		spin_lock(&sdev->stat_lock);
736 		sdev->stat.verify_errors++;
737 		spin_unlock(&sdev->stat_lock);
738 		if (__ratelimit(&_rs))
739 			scrub_print_warning("checksum/header error",
740 					    sblock_to_check);
741 	}
742 
743 	if (sdev->readonly)
744 		goto did_not_correct_error;
745 
746 	if (!is_metadata && !have_csum) {
747 		struct scrub_fixup_nodatasum *fixup_nodatasum;
748 
749 		/*
750 		 * !is_metadata and !have_csum, this means that the data
751 		 * might not be COW'ed, that it might be modified
752 		 * concurrently. The general strategy to work on the
753 		 * commit root does not help in the case when COW is not
754 		 * used.
755 		 */
756 		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
757 		if (!fixup_nodatasum)
758 			goto did_not_correct_error;
759 		fixup_nodatasum->sdev = sdev;
760 		fixup_nodatasum->logical = logical;
761 		fixup_nodatasum->root = fs_info->extent_root;
762 		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
763 		/*
764 		 * increment scrubs_running to prevent cancel requests from
765 		 * completing as long as a fixup worker is running. we must also
766 		 * increment scrubs_paused to prevent deadlocking on pause
767 		 * requests used for transactions commits (as the worker uses a
768 		 * transaction context). it is safe to regard the fixup worker
769 		 * as paused for all matters practical. effectively, we only
770 		 * avoid cancellation requests from completing.
771 		 */
772 		mutex_lock(&fs_info->scrub_lock);
773 		atomic_inc(&fs_info->scrubs_running);
774 		atomic_inc(&fs_info->scrubs_paused);
775 		mutex_unlock(&fs_info->scrub_lock);
776 		atomic_inc(&sdev->fixup_cnt);
777 		fixup_nodatasum->work.func = scrub_fixup_nodatasum;
778 		btrfs_queue_worker(&fs_info->scrub_workers,
779 				   &fixup_nodatasum->work);
780 		goto out;
781 	}
782 
783 	/*
784 	 * now build and submit the bios for the other mirrors, check
785 	 * checksums
786 	 */
787 	for (mirror_index = 0;
788 	     mirror_index < BTRFS_MAX_MIRRORS &&
789 	     sblocks_for_recheck[mirror_index].page_count > 0;
790 	     mirror_index++) {
791 		if (mirror_index == failed_mirror_index)
792 			continue;
793 
794 		/* build and submit the bios, check checksums */
795 		ret = scrub_recheck_block(fs_info,
796 					  sblocks_for_recheck + mirror_index,
797 					  is_metadata, have_csum, csum,
798 					  generation, sdev->csum_size);
799 		if (ret)
800 			goto did_not_correct_error;
801 	}
802 
803 	/*
804 	 * first try to pick the mirror which is completely without I/O
805 	 * errors and also does not have a checksum error.
806 	 * If one is found, and if a checksum is present, the full block
807 	 * that is known to contain an error is rewritten. Afterwards
808 	 * the block is known to be corrected.
809 	 * If a mirror is found which is completely correct, and no
810 	 * checksum is present, only those pages are rewritten that had
811 	 * an I/O error in the block to be repaired, since it cannot be
812 	 * determined, which copy of the other pages is better (and it
813 	 * could happen otherwise that a correct page would be
814 	 * overwritten by a bad one).
815 	 */
816 	for (mirror_index = 0;
817 	     mirror_index < BTRFS_MAX_MIRRORS &&
818 	     sblocks_for_recheck[mirror_index].page_count > 0;
819 	     mirror_index++) {
820 		struct scrub_block *sblock_other = sblocks_for_recheck +
821 						   mirror_index;
822 
823 		if (!sblock_other->header_error &&
824 		    !sblock_other->checksum_error &&
825 		    sblock_other->no_io_error_seen) {
826 			int force_write = is_metadata || have_csum;
827 
828 			ret = scrub_repair_block_from_good_copy(sblock_bad,
829 								sblock_other,
830 								force_write);
831 			if (0 == ret)
832 				goto corrected_error;
833 		}
834 	}
835 
836 	/*
837 	 * in case of I/O errors in the area that is supposed to be
838 	 * repaired, continue by picking good copies of those pages.
839 	 * Select the good pages from mirrors to rewrite bad pages from
840 	 * the area to fix. Afterwards verify the checksum of the block
841 	 * that is supposed to be repaired. This verification step is
842 	 * only done for the purpose of statistic counting and for the
843 	 * final scrub report, whether errors remain.
844 	 * A perfect algorithm could make use of the checksum and try
845 	 * all possible combinations of pages from the different mirrors
846 	 * until the checksum verification succeeds. For example, when
847 	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
848 	 * of mirror #2 is readable but the final checksum test fails,
849 	 * then the 2nd page of mirror #3 could be tried, whether now
850 	 * the final checksum succeedes. But this would be a rare
851 	 * exception and is therefore not implemented. At least it is
852 	 * avoided that the good copy is overwritten.
853 	 * A more useful improvement would be to pick the sectors
854 	 * without I/O error based on sector sizes (512 bytes on legacy
855 	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
856 	 * mirror could be repaired by taking 512 byte of a different
857 	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
858 	 * area are unreadable.
859 	 */
860 
861 	/* can only fix I/O errors from here on */
862 	if (sblock_bad->no_io_error_seen)
863 		goto did_not_correct_error;
864 
865 	success = 1;
866 	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
867 		struct scrub_page *page_bad = sblock_bad->pagev + page_num;
868 
869 		if (!page_bad->io_error)
870 			continue;
871 
872 		for (mirror_index = 0;
873 		     mirror_index < BTRFS_MAX_MIRRORS &&
874 		     sblocks_for_recheck[mirror_index].page_count > 0;
875 		     mirror_index++) {
876 			struct scrub_block *sblock_other = sblocks_for_recheck +
877 							   mirror_index;
878 			struct scrub_page *page_other = sblock_other->pagev +
879 							page_num;
880 
881 			if (!page_other->io_error) {
882 				ret = scrub_repair_page_from_good_copy(
883 					sblock_bad, sblock_other, page_num, 0);
884 				if (0 == ret) {
885 					page_bad->io_error = 0;
886 					break; /* succeeded for this page */
887 				}
888 			}
889 		}
890 
891 		if (page_bad->io_error) {
892 			/* did not find a mirror to copy the page from */
893 			success = 0;
894 		}
895 	}
896 
897 	if (success) {
898 		if (is_metadata || have_csum) {
899 			/*
900 			 * need to verify the checksum now that all
901 			 * sectors on disk are repaired (the write
902 			 * request for data to be repaired is on its way).
903 			 * Just be lazy and use scrub_recheck_block()
904 			 * which re-reads the data before the checksum
905 			 * is verified, but most likely the data comes out
906 			 * of the page cache.
907 			 */
908 			ret = scrub_recheck_block(fs_info, sblock_bad,
909 						  is_metadata, have_csum, csum,
910 						  generation, sdev->csum_size);
911 			if (!ret && !sblock_bad->header_error &&
912 			    !sblock_bad->checksum_error &&
913 			    sblock_bad->no_io_error_seen)
914 				goto corrected_error;
915 			else
916 				goto did_not_correct_error;
917 		} else {
918 corrected_error:
919 			spin_lock(&sdev->stat_lock);
920 			sdev->stat.corrected_errors++;
921 			spin_unlock(&sdev->stat_lock);
922 			printk_ratelimited(KERN_ERR
923 				"btrfs: fixed up error at logical %llu on dev %s\n",
924 				(unsigned long long)logical, sdev->dev->name);
925 		}
926 	} else {
927 did_not_correct_error:
928 		spin_lock(&sdev->stat_lock);
929 		sdev->stat.uncorrectable_errors++;
930 		spin_unlock(&sdev->stat_lock);
931 		printk_ratelimited(KERN_ERR
932 			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
933 			(unsigned long long)logical, sdev->dev->name);
934 	}
935 
936 out:
937 	if (sblocks_for_recheck) {
938 		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
939 		     mirror_index++) {
940 			struct scrub_block *sblock = sblocks_for_recheck +
941 						     mirror_index;
942 			int page_index;
943 
944 			for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
945 			     page_index++)
946 				if (sblock->pagev[page_index].page)
947 					__free_page(
948 						sblock->pagev[page_index].page);
949 		}
950 		kfree(sblocks_for_recheck);
951 	}
952 
953 	return 0;
954 }
955 
956 static int scrub_setup_recheck_block(struct scrub_dev *sdev,
957 				     struct btrfs_mapping_tree *map_tree,
958 				     u64 length, u64 logical,
959 				     struct scrub_block *sblocks_for_recheck)
960 {
961 	int page_index;
962 	int mirror_index;
963 	int ret;
964 
965 	/*
966 	 * note: the three members sdev, ref_count and outstanding_pages
967 	 * are not used (and not set) in the blocks that are used for
968 	 * the recheck procedure
969 	 */
970 
971 	page_index = 0;
972 	while (length > 0) {
973 		u64 sublen = min_t(u64, length, PAGE_SIZE);
974 		u64 mapped_length = sublen;
975 		struct btrfs_bio *bbio = NULL;
976 
977 		/*
978 		 * with a length of PAGE_SIZE, each returned stripe
979 		 * represents one mirror
980 		 */
981 		ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
982 				      &bbio, 0);
983 		if (ret || !bbio || mapped_length < sublen) {
984 			kfree(bbio);
985 			return -EIO;
986 		}
987 
988 		BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
989 		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
990 		     mirror_index++) {
991 			struct scrub_block *sblock;
992 			struct scrub_page *page;
993 
994 			if (mirror_index >= BTRFS_MAX_MIRRORS)
995 				continue;
996 
997 			sblock = sblocks_for_recheck + mirror_index;
998 			page = sblock->pagev + page_index;
999 			page->logical = logical;
1000 			page->physical = bbio->stripes[mirror_index].physical;
1001 			page->bdev = bbio->stripes[mirror_index].dev->bdev;
1002 			page->mirror_num = mirror_index + 1;
1003 			page->page = alloc_page(GFP_NOFS);
1004 			if (!page->page) {
1005 				spin_lock(&sdev->stat_lock);
1006 				sdev->stat.malloc_errors++;
1007 				spin_unlock(&sdev->stat_lock);
1008 				return -ENOMEM;
1009 			}
1010 			sblock->page_count++;
1011 		}
1012 		kfree(bbio);
1013 		length -= sublen;
1014 		logical += sublen;
1015 		page_index++;
1016 	}
1017 
1018 	return 0;
1019 }
1020 
1021 /*
1022  * this function will check the on disk data for checksum errors, header
1023  * errors and read I/O errors. If any I/O errors happen, the exact pages
1024  * which are errored are marked as being bad. The goal is to enable scrub
1025  * to take those pages that are not errored from all the mirrors so that
1026  * the pages that are errored in the just handled mirror can be repaired.
1027  */
1028 static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1029 			       struct scrub_block *sblock, int is_metadata,
1030 			       int have_csum, u8 *csum, u64 generation,
1031 			       u16 csum_size)
1032 {
1033 	int page_num;
1034 
1035 	sblock->no_io_error_seen = 1;
1036 	sblock->header_error = 0;
1037 	sblock->checksum_error = 0;
1038 
1039 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1040 		struct bio *bio;
1041 		int ret;
1042 		struct scrub_page *page = sblock->pagev + page_num;
1043 		DECLARE_COMPLETION_ONSTACK(complete);
1044 
1045 		BUG_ON(!page->page);
1046 		bio = bio_alloc(GFP_NOFS, 1);
1047 		bio->bi_bdev = page->bdev;
1048 		bio->bi_sector = page->physical >> 9;
1049 		bio->bi_end_io = scrub_complete_bio_end_io;
1050 		bio->bi_private = &complete;
1051 
1052 		ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1053 		if (PAGE_SIZE != ret) {
1054 			bio_put(bio);
1055 			return -EIO;
1056 		}
1057 		btrfsic_submit_bio(READ, bio);
1058 
1059 		/* this will also unplug the queue */
1060 		wait_for_completion(&complete);
1061 
1062 		page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1063 		if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1064 			sblock->no_io_error_seen = 0;
1065 		bio_put(bio);
1066 	}
1067 
1068 	if (sblock->no_io_error_seen)
1069 		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1070 					     have_csum, csum, generation,
1071 					     csum_size);
1072 
1073 	return 0;
1074 }
1075 
1076 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1077 					 struct scrub_block *sblock,
1078 					 int is_metadata, int have_csum,
1079 					 const u8 *csum, u64 generation,
1080 					 u16 csum_size)
1081 {
1082 	int page_num;
1083 	u8 calculated_csum[BTRFS_CSUM_SIZE];
1084 	u32 crc = ~(u32)0;
1085 	struct btrfs_root *root = fs_info->extent_root;
1086 	void *mapped_buffer;
1087 
1088 	BUG_ON(!sblock->pagev[0].page);
1089 	if (is_metadata) {
1090 		struct btrfs_header *h;
1091 
1092 		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1093 		h = (struct btrfs_header *)mapped_buffer;
1094 
1095 		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1096 		    generation != le64_to_cpu(h->generation) ||
1097 		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1098 		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1099 			   BTRFS_UUID_SIZE))
1100 			sblock->header_error = 1;
1101 		csum = h->csum;
1102 	} else {
1103 		if (!have_csum)
1104 			return;
1105 
1106 		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1107 	}
1108 
1109 	for (page_num = 0;;) {
1110 		if (page_num == 0 && is_metadata)
1111 			crc = btrfs_csum_data(root,
1112 				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1113 				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1114 		else
1115 			crc = btrfs_csum_data(root, mapped_buffer, crc,
1116 					      PAGE_SIZE);
1117 
1118 		kunmap_atomic(mapped_buffer);
1119 		page_num++;
1120 		if (page_num >= sblock->page_count)
1121 			break;
1122 		BUG_ON(!sblock->pagev[page_num].page);
1123 
1124 		mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
1125 	}
1126 
1127 	btrfs_csum_final(crc, calculated_csum);
1128 	if (memcmp(calculated_csum, csum, csum_size))
1129 		sblock->checksum_error = 1;
1130 }
1131 
1132 static void scrub_complete_bio_end_io(struct bio *bio, int err)
1133 {
1134 	complete((struct completion *)bio->bi_private);
1135 }
1136 
1137 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1138 					     struct scrub_block *sblock_good,
1139 					     int force_write)
1140 {
1141 	int page_num;
1142 	int ret = 0;
1143 
1144 	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1145 		int ret_sub;
1146 
1147 		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1148 							   sblock_good,
1149 							   page_num,
1150 							   force_write);
1151 		if (ret_sub)
1152 			ret = ret_sub;
1153 	}
1154 
1155 	return ret;
1156 }
1157 
1158 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1159 					    struct scrub_block *sblock_good,
1160 					    int page_num, int force_write)
1161 {
1162 	struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1163 	struct scrub_page *page_good = sblock_good->pagev + page_num;
1164 
1165 	BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1166 	BUG_ON(sblock_good->pagev[page_num].page == NULL);
1167 	if (force_write || sblock_bad->header_error ||
1168 	    sblock_bad->checksum_error || page_bad->io_error) {
1169 		struct bio *bio;
1170 		int ret;
1171 		DECLARE_COMPLETION_ONSTACK(complete);
1172 
1173 		bio = bio_alloc(GFP_NOFS, 1);
1174 		bio->bi_bdev = page_bad->bdev;
1175 		bio->bi_sector = page_bad->physical >> 9;
1176 		bio->bi_end_io = scrub_complete_bio_end_io;
1177 		bio->bi_private = &complete;
1178 
1179 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1180 		if (PAGE_SIZE != ret) {
1181 			bio_put(bio);
1182 			return -EIO;
1183 		}
1184 		btrfsic_submit_bio(WRITE, bio);
1185 
1186 		/* this will also unplug the queue */
1187 		wait_for_completion(&complete);
1188 		bio_put(bio);
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 static void scrub_checksum(struct scrub_block *sblock)
1195 {
1196 	u64 flags;
1197 	int ret;
1198 
1199 	BUG_ON(sblock->page_count < 1);
1200 	flags = sblock->pagev[0].flags;
1201 	ret = 0;
1202 	if (flags & BTRFS_EXTENT_FLAG_DATA)
1203 		ret = scrub_checksum_data(sblock);
1204 	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1205 		ret = scrub_checksum_tree_block(sblock);
1206 	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1207 		(void)scrub_checksum_super(sblock);
1208 	else
1209 		WARN_ON(1);
1210 	if (ret)
1211 		scrub_handle_errored_block(sblock);
1212 }
1213 
1214 static int scrub_checksum_data(struct scrub_block *sblock)
1215 {
1216 	struct scrub_dev *sdev = sblock->sdev;
1217 	u8 csum[BTRFS_CSUM_SIZE];
1218 	u8 *on_disk_csum;
1219 	struct page *page;
1220 	void *buffer;
1221 	u32 crc = ~(u32)0;
1222 	int fail = 0;
1223 	struct btrfs_root *root = sdev->dev->dev_root;
1224 	u64 len;
1225 	int index;
1226 
1227 	BUG_ON(sblock->page_count < 1);
1228 	if (!sblock->pagev[0].have_csum)
1229 		return 0;
1230 
1231 	on_disk_csum = sblock->pagev[0].csum;
1232 	page = sblock->pagev[0].page;
1233 	buffer = kmap_atomic(page);
1234 
1235 	len = sdev->sectorsize;
1236 	index = 0;
1237 	for (;;) {
1238 		u64 l = min_t(u64, len, PAGE_SIZE);
1239 
1240 		crc = btrfs_csum_data(root, buffer, crc, l);
1241 		kunmap_atomic(buffer);
1242 		len -= l;
1243 		if (len == 0)
1244 			break;
1245 		index++;
1246 		BUG_ON(index >= sblock->page_count);
1247 		BUG_ON(!sblock->pagev[index].page);
1248 		page = sblock->pagev[index].page;
1249 		buffer = kmap_atomic(page);
1250 	}
1251 
1252 	btrfs_csum_final(crc, csum);
1253 	if (memcmp(csum, on_disk_csum, sdev->csum_size))
1254 		fail = 1;
1255 
1256 	if (fail) {
1257 		spin_lock(&sdev->stat_lock);
1258 		++sdev->stat.csum_errors;
1259 		spin_unlock(&sdev->stat_lock);
1260 	}
1261 
1262 	return fail;
1263 }
1264 
1265 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1266 {
1267 	struct scrub_dev *sdev = sblock->sdev;
1268 	struct btrfs_header *h;
1269 	struct btrfs_root *root = sdev->dev->dev_root;
1270 	struct btrfs_fs_info *fs_info = root->fs_info;
1271 	u8 calculated_csum[BTRFS_CSUM_SIZE];
1272 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1273 	struct page *page;
1274 	void *mapped_buffer;
1275 	u64 mapped_size;
1276 	void *p;
1277 	u32 crc = ~(u32)0;
1278 	int fail = 0;
1279 	int crc_fail = 0;
1280 	u64 len;
1281 	int index;
1282 
1283 	BUG_ON(sblock->page_count < 1);
1284 	page = sblock->pagev[0].page;
1285 	mapped_buffer = kmap_atomic(page);
1286 	h = (struct btrfs_header *)mapped_buffer;
1287 	memcpy(on_disk_csum, h->csum, sdev->csum_size);
1288 
1289 	/*
1290 	 * we don't use the getter functions here, as we
1291 	 * a) don't have an extent buffer and
1292 	 * b) the page is already kmapped
1293 	 */
1294 
1295 	if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1296 		++fail;
1297 
1298 	if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1299 		++fail;
1300 
1301 	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1302 		++fail;
1303 
1304 	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1305 		   BTRFS_UUID_SIZE))
1306 		++fail;
1307 
1308 	BUG_ON(sdev->nodesize != sdev->leafsize);
1309 	len = sdev->nodesize - BTRFS_CSUM_SIZE;
1310 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1311 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1312 	index = 0;
1313 	for (;;) {
1314 		u64 l = min_t(u64, len, mapped_size);
1315 
1316 		crc = btrfs_csum_data(root, p, crc, l);
1317 		kunmap_atomic(mapped_buffer);
1318 		len -= l;
1319 		if (len == 0)
1320 			break;
1321 		index++;
1322 		BUG_ON(index >= sblock->page_count);
1323 		BUG_ON(!sblock->pagev[index].page);
1324 		page = sblock->pagev[index].page;
1325 		mapped_buffer = kmap_atomic(page);
1326 		mapped_size = PAGE_SIZE;
1327 		p = mapped_buffer;
1328 	}
1329 
1330 	btrfs_csum_final(crc, calculated_csum);
1331 	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1332 		++crc_fail;
1333 
1334 	if (crc_fail || fail) {
1335 		spin_lock(&sdev->stat_lock);
1336 		if (crc_fail)
1337 			++sdev->stat.csum_errors;
1338 		if (fail)
1339 			++sdev->stat.verify_errors;
1340 		spin_unlock(&sdev->stat_lock);
1341 	}
1342 
1343 	return fail || crc_fail;
1344 }
1345 
1346 static int scrub_checksum_super(struct scrub_block *sblock)
1347 {
1348 	struct btrfs_super_block *s;
1349 	struct scrub_dev *sdev = sblock->sdev;
1350 	struct btrfs_root *root = sdev->dev->dev_root;
1351 	struct btrfs_fs_info *fs_info = root->fs_info;
1352 	u8 calculated_csum[BTRFS_CSUM_SIZE];
1353 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1354 	struct page *page;
1355 	void *mapped_buffer;
1356 	u64 mapped_size;
1357 	void *p;
1358 	u32 crc = ~(u32)0;
1359 	int fail = 0;
1360 	u64 len;
1361 	int index;
1362 
1363 	BUG_ON(sblock->page_count < 1);
1364 	page = sblock->pagev[0].page;
1365 	mapped_buffer = kmap_atomic(page);
1366 	s = (struct btrfs_super_block *)mapped_buffer;
1367 	memcpy(on_disk_csum, s->csum, sdev->csum_size);
1368 
1369 	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1370 		++fail;
1371 
1372 	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1373 		++fail;
1374 
1375 	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1376 		++fail;
1377 
1378 	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1379 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1380 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1381 	index = 0;
1382 	for (;;) {
1383 		u64 l = min_t(u64, len, mapped_size);
1384 
1385 		crc = btrfs_csum_data(root, p, crc, l);
1386 		kunmap_atomic(mapped_buffer);
1387 		len -= l;
1388 		if (len == 0)
1389 			break;
1390 		index++;
1391 		BUG_ON(index >= sblock->page_count);
1392 		BUG_ON(!sblock->pagev[index].page);
1393 		page = sblock->pagev[index].page;
1394 		mapped_buffer = kmap_atomic(page);
1395 		mapped_size = PAGE_SIZE;
1396 		p = mapped_buffer;
1397 	}
1398 
1399 	btrfs_csum_final(crc, calculated_csum);
1400 	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1401 		++fail;
1402 
1403 	if (fail) {
1404 		/*
1405 		 * if we find an error in a super block, we just report it.
1406 		 * They will get written with the next transaction commit
1407 		 * anyway
1408 		 */
1409 		spin_lock(&sdev->stat_lock);
1410 		++sdev->stat.super_errors;
1411 		spin_unlock(&sdev->stat_lock);
1412 	}
1413 
1414 	return fail;
1415 }
1416 
1417 static void scrub_block_get(struct scrub_block *sblock)
1418 {
1419 	atomic_inc(&sblock->ref_count);
1420 }
1421 
1422 static void scrub_block_put(struct scrub_block *sblock)
1423 {
1424 	if (atomic_dec_and_test(&sblock->ref_count)) {
1425 		int i;
1426 
1427 		for (i = 0; i < sblock->page_count; i++)
1428 			if (sblock->pagev[i].page)
1429 				__free_page(sblock->pagev[i].page);
1430 		kfree(sblock);
1431 	}
1432 }
1433 
1434 static void scrub_submit(struct scrub_dev *sdev)
1435 {
1436 	struct scrub_bio *sbio;
1437 
1438 	if (sdev->curr == -1)
1439 		return;
1440 
1441 	sbio = sdev->bios[sdev->curr];
1442 	sdev->curr = -1;
1443 	atomic_inc(&sdev->in_flight);
1444 
1445 	btrfsic_submit_bio(READ, sbio->bio);
1446 }
1447 
1448 static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1449 				 struct scrub_page *spage)
1450 {
1451 	struct scrub_block *sblock = spage->sblock;
1452 	struct scrub_bio *sbio;
1453 	int ret;
1454 
1455 again:
1456 	/*
1457 	 * grab a fresh bio or wait for one to become available
1458 	 */
1459 	while (sdev->curr == -1) {
1460 		spin_lock(&sdev->list_lock);
1461 		sdev->curr = sdev->first_free;
1462 		if (sdev->curr != -1) {
1463 			sdev->first_free = sdev->bios[sdev->curr]->next_free;
1464 			sdev->bios[sdev->curr]->next_free = -1;
1465 			sdev->bios[sdev->curr]->page_count = 0;
1466 			spin_unlock(&sdev->list_lock);
1467 		} else {
1468 			spin_unlock(&sdev->list_lock);
1469 			wait_event(sdev->list_wait, sdev->first_free != -1);
1470 		}
1471 	}
1472 	sbio = sdev->bios[sdev->curr];
1473 	if (sbio->page_count == 0) {
1474 		struct bio *bio;
1475 
1476 		sbio->physical = spage->physical;
1477 		sbio->logical = spage->logical;
1478 		bio = sbio->bio;
1479 		if (!bio) {
1480 			bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1481 			if (!bio)
1482 				return -ENOMEM;
1483 			sbio->bio = bio;
1484 		}
1485 
1486 		bio->bi_private = sbio;
1487 		bio->bi_end_io = scrub_bio_end_io;
1488 		bio->bi_bdev = sdev->dev->bdev;
1489 		bio->bi_sector = spage->physical >> 9;
1490 		sbio->err = 0;
1491 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1492 		   spage->physical ||
1493 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1494 		   spage->logical) {
1495 		scrub_submit(sdev);
1496 		goto again;
1497 	}
1498 
1499 	sbio->pagev[sbio->page_count] = spage;
1500 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1501 	if (ret != PAGE_SIZE) {
1502 		if (sbio->page_count < 1) {
1503 			bio_put(sbio->bio);
1504 			sbio->bio = NULL;
1505 			return -EIO;
1506 		}
1507 		scrub_submit(sdev);
1508 		goto again;
1509 	}
1510 
1511 	scrub_block_get(sblock); /* one for the added page */
1512 	atomic_inc(&sblock->outstanding_pages);
1513 	sbio->page_count++;
1514 	if (sbio->page_count == sdev->pages_per_bio)
1515 		scrub_submit(sdev);
1516 
1517 	return 0;
1518 }
1519 
1520 static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1521 		       u64 physical, u64 flags, u64 gen, int mirror_num,
1522 		       u8 *csum, int force)
1523 {
1524 	struct scrub_block *sblock;
1525 	int index;
1526 
1527 	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1528 	if (!sblock) {
1529 		spin_lock(&sdev->stat_lock);
1530 		sdev->stat.malloc_errors++;
1531 		spin_unlock(&sdev->stat_lock);
1532 		return -ENOMEM;
1533 	}
1534 
1535 	/* one ref inside this function, plus one for each page later on */
1536 	atomic_set(&sblock->ref_count, 1);
1537 	sblock->sdev = sdev;
1538 	sblock->no_io_error_seen = 1;
1539 
1540 	for (index = 0; len > 0; index++) {
1541 		struct scrub_page *spage = sblock->pagev + index;
1542 		u64 l = min_t(u64, len, PAGE_SIZE);
1543 
1544 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1545 		spage->page = alloc_page(GFP_NOFS);
1546 		if (!spage->page) {
1547 			spin_lock(&sdev->stat_lock);
1548 			sdev->stat.malloc_errors++;
1549 			spin_unlock(&sdev->stat_lock);
1550 			while (index > 0) {
1551 				index--;
1552 				__free_page(sblock->pagev[index].page);
1553 			}
1554 			kfree(sblock);
1555 			return -ENOMEM;
1556 		}
1557 		spage->sblock = sblock;
1558 		spage->bdev = sdev->dev->bdev;
1559 		spage->flags = flags;
1560 		spage->generation = gen;
1561 		spage->logical = logical;
1562 		spage->physical = physical;
1563 		spage->mirror_num = mirror_num;
1564 		if (csum) {
1565 			spage->have_csum = 1;
1566 			memcpy(spage->csum, csum, sdev->csum_size);
1567 		} else {
1568 			spage->have_csum = 0;
1569 		}
1570 		sblock->page_count++;
1571 		len -= l;
1572 		logical += l;
1573 		physical += l;
1574 	}
1575 
1576 	BUG_ON(sblock->page_count == 0);
1577 	for (index = 0; index < sblock->page_count; index++) {
1578 		struct scrub_page *spage = sblock->pagev + index;
1579 		int ret;
1580 
1581 		ret = scrub_add_page_to_bio(sdev, spage);
1582 		if (ret) {
1583 			scrub_block_put(sblock);
1584 			return ret;
1585 		}
1586 	}
1587 
1588 	if (force)
1589 		scrub_submit(sdev);
1590 
1591 	/* last one frees, either here or in bio completion for last page */
1592 	scrub_block_put(sblock);
1593 	return 0;
1594 }
1595 
1596 static void scrub_bio_end_io(struct bio *bio, int err)
1597 {
1598 	struct scrub_bio *sbio = bio->bi_private;
1599 	struct scrub_dev *sdev = sbio->sdev;
1600 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1601 
1602 	sbio->err = err;
1603 	sbio->bio = bio;
1604 
1605 	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1606 }
1607 
1608 static void scrub_bio_end_io_worker(struct btrfs_work *work)
1609 {
1610 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1611 	struct scrub_dev *sdev = sbio->sdev;
1612 	int i;
1613 
1614 	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1615 	if (sbio->err) {
1616 		for (i = 0; i < sbio->page_count; i++) {
1617 			struct scrub_page *spage = sbio->pagev[i];
1618 
1619 			spage->io_error = 1;
1620 			spage->sblock->no_io_error_seen = 0;
1621 		}
1622 	}
1623 
1624 	/* now complete the scrub_block items that have all pages completed */
1625 	for (i = 0; i < sbio->page_count; i++) {
1626 		struct scrub_page *spage = sbio->pagev[i];
1627 		struct scrub_block *sblock = spage->sblock;
1628 
1629 		if (atomic_dec_and_test(&sblock->outstanding_pages))
1630 			scrub_block_complete(sblock);
1631 		scrub_block_put(sblock);
1632 	}
1633 
1634 	if (sbio->err) {
1635 		/* what is this good for??? */
1636 		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1637 		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1638 		sbio->bio->bi_phys_segments = 0;
1639 		sbio->bio->bi_idx = 0;
1640 
1641 		for (i = 0; i < sbio->page_count; i++) {
1642 			struct bio_vec *bi;
1643 			bi = &sbio->bio->bi_io_vec[i];
1644 			bi->bv_offset = 0;
1645 			bi->bv_len = PAGE_SIZE;
1646 		}
1647 	}
1648 
1649 	bio_put(sbio->bio);
1650 	sbio->bio = NULL;
1651 	spin_lock(&sdev->list_lock);
1652 	sbio->next_free = sdev->first_free;
1653 	sdev->first_free = sbio->index;
1654 	spin_unlock(&sdev->list_lock);
1655 	atomic_dec(&sdev->in_flight);
1656 	wake_up(&sdev->list_wait);
1657 }
1658 
1659 static void scrub_block_complete(struct scrub_block *sblock)
1660 {
1661 	if (!sblock->no_io_error_seen)
1662 		scrub_handle_errored_block(sblock);
1663 	else
1664 		scrub_checksum(sblock);
1665 }
1666 
1667 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1668 			   u8 *csum)
1669 {
1670 	struct btrfs_ordered_sum *sum = NULL;
1671 	int ret = 0;
1672 	unsigned long i;
1673 	unsigned long num_sectors;
1674 
1675 	while (!list_empty(&sdev->csum_list)) {
1676 		sum = list_first_entry(&sdev->csum_list,
1677 				       struct btrfs_ordered_sum, list);
1678 		if (sum->bytenr > logical)
1679 			return 0;
1680 		if (sum->bytenr + sum->len > logical)
1681 			break;
1682 
1683 		++sdev->stat.csum_discards;
1684 		list_del(&sum->list);
1685 		kfree(sum);
1686 		sum = NULL;
1687 	}
1688 	if (!sum)
1689 		return 0;
1690 
1691 	num_sectors = sum->len / sdev->sectorsize;
1692 	for (i = 0; i < num_sectors; ++i) {
1693 		if (sum->sums[i].bytenr == logical) {
1694 			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1695 			ret = 1;
1696 			break;
1697 		}
1698 	}
1699 	if (ret && i == num_sectors - 1) {
1700 		list_del(&sum->list);
1701 		kfree(sum);
1702 	}
1703 	return ret;
1704 }
1705 
1706 /* scrub extent tries to collect up to 64 kB for each bio */
1707 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1708 			u64 physical, u64 flags, u64 gen, int mirror_num)
1709 {
1710 	int ret;
1711 	u8 csum[BTRFS_CSUM_SIZE];
1712 	u32 blocksize;
1713 
1714 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
1715 		blocksize = sdev->sectorsize;
1716 		spin_lock(&sdev->stat_lock);
1717 		sdev->stat.data_extents_scrubbed++;
1718 		sdev->stat.data_bytes_scrubbed += len;
1719 		spin_unlock(&sdev->stat_lock);
1720 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1721 		BUG_ON(sdev->nodesize != sdev->leafsize);
1722 		blocksize = sdev->nodesize;
1723 		spin_lock(&sdev->stat_lock);
1724 		sdev->stat.tree_extents_scrubbed++;
1725 		sdev->stat.tree_bytes_scrubbed += len;
1726 		spin_unlock(&sdev->stat_lock);
1727 	} else {
1728 		blocksize = sdev->sectorsize;
1729 		BUG_ON(1);
1730 	}
1731 
1732 	while (len) {
1733 		u64 l = min_t(u64, len, blocksize);
1734 		int have_csum = 0;
1735 
1736 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
1737 			/* push csums to sbio */
1738 			have_csum = scrub_find_csum(sdev, logical, l, csum);
1739 			if (have_csum == 0)
1740 				++sdev->stat.no_csum;
1741 		}
1742 		ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1743 				  mirror_num, have_csum ? csum : NULL, 0);
1744 		if (ret)
1745 			return ret;
1746 		len -= l;
1747 		logical += l;
1748 		physical += l;
1749 	}
1750 	return 0;
1751 }
1752 
1753 static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1754 	struct map_lookup *map, int num, u64 base, u64 length)
1755 {
1756 	struct btrfs_path *path;
1757 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1758 	struct btrfs_root *root = fs_info->extent_root;
1759 	struct btrfs_root *csum_root = fs_info->csum_root;
1760 	struct btrfs_extent_item *extent;
1761 	struct blk_plug plug;
1762 	u64 flags;
1763 	int ret;
1764 	int slot;
1765 	int i;
1766 	u64 nstripes;
1767 	struct extent_buffer *l;
1768 	struct btrfs_key key;
1769 	u64 physical;
1770 	u64 logical;
1771 	u64 generation;
1772 	int mirror_num;
1773 	struct reada_control *reada1;
1774 	struct reada_control *reada2;
1775 	struct btrfs_key key_start;
1776 	struct btrfs_key key_end;
1777 
1778 	u64 increment = map->stripe_len;
1779 	u64 offset;
1780 
1781 	nstripes = length;
1782 	offset = 0;
1783 	do_div(nstripes, map->stripe_len);
1784 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1785 		offset = map->stripe_len * num;
1786 		increment = map->stripe_len * map->num_stripes;
1787 		mirror_num = 1;
1788 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1789 		int factor = map->num_stripes / map->sub_stripes;
1790 		offset = map->stripe_len * (num / map->sub_stripes);
1791 		increment = map->stripe_len * factor;
1792 		mirror_num = num % map->sub_stripes + 1;
1793 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1794 		increment = map->stripe_len;
1795 		mirror_num = num % map->num_stripes + 1;
1796 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1797 		increment = map->stripe_len;
1798 		mirror_num = num % map->num_stripes + 1;
1799 	} else {
1800 		increment = map->stripe_len;
1801 		mirror_num = 1;
1802 	}
1803 
1804 	path = btrfs_alloc_path();
1805 	if (!path)
1806 		return -ENOMEM;
1807 
1808 	/*
1809 	 * work on commit root. The related disk blocks are static as
1810 	 * long as COW is applied. This means, it is save to rewrite
1811 	 * them to repair disk errors without any race conditions
1812 	 */
1813 	path->search_commit_root = 1;
1814 	path->skip_locking = 1;
1815 
1816 	/*
1817 	 * trigger the readahead for extent tree csum tree and wait for
1818 	 * completion. During readahead, the scrub is officially paused
1819 	 * to not hold off transaction commits
1820 	 */
1821 	logical = base + offset;
1822 
1823 	wait_event(sdev->list_wait,
1824 		   atomic_read(&sdev->in_flight) == 0);
1825 	atomic_inc(&fs_info->scrubs_paused);
1826 	wake_up(&fs_info->scrub_pause_wait);
1827 
1828 	/* FIXME it might be better to start readahead at commit root */
1829 	key_start.objectid = logical;
1830 	key_start.type = BTRFS_EXTENT_ITEM_KEY;
1831 	key_start.offset = (u64)0;
1832 	key_end.objectid = base + offset + nstripes * increment;
1833 	key_end.type = BTRFS_EXTENT_ITEM_KEY;
1834 	key_end.offset = (u64)0;
1835 	reada1 = btrfs_reada_add(root, &key_start, &key_end);
1836 
1837 	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1838 	key_start.type = BTRFS_EXTENT_CSUM_KEY;
1839 	key_start.offset = logical;
1840 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1841 	key_end.type = BTRFS_EXTENT_CSUM_KEY;
1842 	key_end.offset = base + offset + nstripes * increment;
1843 	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1844 
1845 	if (!IS_ERR(reada1))
1846 		btrfs_reada_wait(reada1);
1847 	if (!IS_ERR(reada2))
1848 		btrfs_reada_wait(reada2);
1849 
1850 	mutex_lock(&fs_info->scrub_lock);
1851 	while (atomic_read(&fs_info->scrub_pause_req)) {
1852 		mutex_unlock(&fs_info->scrub_lock);
1853 		wait_event(fs_info->scrub_pause_wait,
1854 		   atomic_read(&fs_info->scrub_pause_req) == 0);
1855 		mutex_lock(&fs_info->scrub_lock);
1856 	}
1857 	atomic_dec(&fs_info->scrubs_paused);
1858 	mutex_unlock(&fs_info->scrub_lock);
1859 	wake_up(&fs_info->scrub_pause_wait);
1860 
1861 	/*
1862 	 * collect all data csums for the stripe to avoid seeking during
1863 	 * the scrub. This might currently (crc32) end up to be about 1MB
1864 	 */
1865 	blk_start_plug(&plug);
1866 
1867 	/*
1868 	 * now find all extents for each stripe and scrub them
1869 	 */
1870 	logical = base + offset;
1871 	physical = map->stripes[num].physical;
1872 	ret = 0;
1873 	for (i = 0; i < nstripes; ++i) {
1874 		/*
1875 		 * canceled?
1876 		 */
1877 		if (atomic_read(&fs_info->scrub_cancel_req) ||
1878 		    atomic_read(&sdev->cancel_req)) {
1879 			ret = -ECANCELED;
1880 			goto out;
1881 		}
1882 		/*
1883 		 * check to see if we have to pause
1884 		 */
1885 		if (atomic_read(&fs_info->scrub_pause_req)) {
1886 			/* push queued extents */
1887 			scrub_submit(sdev);
1888 			wait_event(sdev->list_wait,
1889 				   atomic_read(&sdev->in_flight) == 0);
1890 			atomic_inc(&fs_info->scrubs_paused);
1891 			wake_up(&fs_info->scrub_pause_wait);
1892 			mutex_lock(&fs_info->scrub_lock);
1893 			while (atomic_read(&fs_info->scrub_pause_req)) {
1894 				mutex_unlock(&fs_info->scrub_lock);
1895 				wait_event(fs_info->scrub_pause_wait,
1896 				   atomic_read(&fs_info->scrub_pause_req) == 0);
1897 				mutex_lock(&fs_info->scrub_lock);
1898 			}
1899 			atomic_dec(&fs_info->scrubs_paused);
1900 			mutex_unlock(&fs_info->scrub_lock);
1901 			wake_up(&fs_info->scrub_pause_wait);
1902 		}
1903 
1904 		ret = btrfs_lookup_csums_range(csum_root, logical,
1905 					       logical + map->stripe_len - 1,
1906 					       &sdev->csum_list, 1);
1907 		if (ret)
1908 			goto out;
1909 
1910 		key.objectid = logical;
1911 		key.type = BTRFS_EXTENT_ITEM_KEY;
1912 		key.offset = (u64)0;
1913 
1914 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1915 		if (ret < 0)
1916 			goto out;
1917 		if (ret > 0) {
1918 			ret = btrfs_previous_item(root, path, 0,
1919 						  BTRFS_EXTENT_ITEM_KEY);
1920 			if (ret < 0)
1921 				goto out;
1922 			if (ret > 0) {
1923 				/* there's no smaller item, so stick with the
1924 				 * larger one */
1925 				btrfs_release_path(path);
1926 				ret = btrfs_search_slot(NULL, root, &key,
1927 							path, 0, 0);
1928 				if (ret < 0)
1929 					goto out;
1930 			}
1931 		}
1932 
1933 		while (1) {
1934 			l = path->nodes[0];
1935 			slot = path->slots[0];
1936 			if (slot >= btrfs_header_nritems(l)) {
1937 				ret = btrfs_next_leaf(root, path);
1938 				if (ret == 0)
1939 					continue;
1940 				if (ret < 0)
1941 					goto out;
1942 
1943 				break;
1944 			}
1945 			btrfs_item_key_to_cpu(l, &key, slot);
1946 
1947 			if (key.objectid + key.offset <= logical)
1948 				goto next;
1949 
1950 			if (key.objectid >= logical + map->stripe_len)
1951 				break;
1952 
1953 			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1954 				goto next;
1955 
1956 			extent = btrfs_item_ptr(l, slot,
1957 						struct btrfs_extent_item);
1958 			flags = btrfs_extent_flags(l, extent);
1959 			generation = btrfs_extent_generation(l, extent);
1960 
1961 			if (key.objectid < logical &&
1962 			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1963 				printk(KERN_ERR
1964 				       "btrfs scrub: tree block %llu spanning "
1965 				       "stripes, ignored. logical=%llu\n",
1966 				       (unsigned long long)key.objectid,
1967 				       (unsigned long long)logical);
1968 				goto next;
1969 			}
1970 
1971 			/*
1972 			 * trim extent to this stripe
1973 			 */
1974 			if (key.objectid < logical) {
1975 				key.offset -= logical - key.objectid;
1976 				key.objectid = logical;
1977 			}
1978 			if (key.objectid + key.offset >
1979 			    logical + map->stripe_len) {
1980 				key.offset = logical + map->stripe_len -
1981 					     key.objectid;
1982 			}
1983 
1984 			ret = scrub_extent(sdev, key.objectid, key.offset,
1985 					   key.objectid - logical + physical,
1986 					   flags, generation, mirror_num);
1987 			if (ret)
1988 				goto out;
1989 
1990 next:
1991 			path->slots[0]++;
1992 		}
1993 		btrfs_release_path(path);
1994 		logical += increment;
1995 		physical += map->stripe_len;
1996 		spin_lock(&sdev->stat_lock);
1997 		sdev->stat.last_physical = physical;
1998 		spin_unlock(&sdev->stat_lock);
1999 	}
2000 	/* push queued extents */
2001 	scrub_submit(sdev);
2002 
2003 out:
2004 	blk_finish_plug(&plug);
2005 	btrfs_free_path(path);
2006 	return ret < 0 ? ret : 0;
2007 }
2008 
2009 static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2010 	u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2011 	u64 dev_offset)
2012 {
2013 	struct btrfs_mapping_tree *map_tree =
2014 		&sdev->dev->dev_root->fs_info->mapping_tree;
2015 	struct map_lookup *map;
2016 	struct extent_map *em;
2017 	int i;
2018 	int ret = -EINVAL;
2019 
2020 	read_lock(&map_tree->map_tree.lock);
2021 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2022 	read_unlock(&map_tree->map_tree.lock);
2023 
2024 	if (!em)
2025 		return -EINVAL;
2026 
2027 	map = (struct map_lookup *)em->bdev;
2028 	if (em->start != chunk_offset)
2029 		goto out;
2030 
2031 	if (em->len < length)
2032 		goto out;
2033 
2034 	for (i = 0; i < map->num_stripes; ++i) {
2035 		if (map->stripes[i].dev == sdev->dev &&
2036 		    map->stripes[i].physical == dev_offset) {
2037 			ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2038 			if (ret)
2039 				goto out;
2040 		}
2041 	}
2042 out:
2043 	free_extent_map(em);
2044 
2045 	return ret;
2046 }
2047 
2048 static noinline_for_stack
2049 int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2050 {
2051 	struct btrfs_dev_extent *dev_extent = NULL;
2052 	struct btrfs_path *path;
2053 	struct btrfs_root *root = sdev->dev->dev_root;
2054 	struct btrfs_fs_info *fs_info = root->fs_info;
2055 	u64 length;
2056 	u64 chunk_tree;
2057 	u64 chunk_objectid;
2058 	u64 chunk_offset;
2059 	int ret;
2060 	int slot;
2061 	struct extent_buffer *l;
2062 	struct btrfs_key key;
2063 	struct btrfs_key found_key;
2064 	struct btrfs_block_group_cache *cache;
2065 
2066 	path = btrfs_alloc_path();
2067 	if (!path)
2068 		return -ENOMEM;
2069 
2070 	path->reada = 2;
2071 	path->search_commit_root = 1;
2072 	path->skip_locking = 1;
2073 
2074 	key.objectid = sdev->dev->devid;
2075 	key.offset = 0ull;
2076 	key.type = BTRFS_DEV_EXTENT_KEY;
2077 
2078 
2079 	while (1) {
2080 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2081 		if (ret < 0)
2082 			break;
2083 		if (ret > 0) {
2084 			if (path->slots[0] >=
2085 			    btrfs_header_nritems(path->nodes[0])) {
2086 				ret = btrfs_next_leaf(root, path);
2087 				if (ret)
2088 					break;
2089 			}
2090 		}
2091 
2092 		l = path->nodes[0];
2093 		slot = path->slots[0];
2094 
2095 		btrfs_item_key_to_cpu(l, &found_key, slot);
2096 
2097 		if (found_key.objectid != sdev->dev->devid)
2098 			break;
2099 
2100 		if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2101 			break;
2102 
2103 		if (found_key.offset >= end)
2104 			break;
2105 
2106 		if (found_key.offset < key.offset)
2107 			break;
2108 
2109 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2110 		length = btrfs_dev_extent_length(l, dev_extent);
2111 
2112 		if (found_key.offset + length <= start) {
2113 			key.offset = found_key.offset + length;
2114 			btrfs_release_path(path);
2115 			continue;
2116 		}
2117 
2118 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2119 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2120 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2121 
2122 		/*
2123 		 * get a reference on the corresponding block group to prevent
2124 		 * the chunk from going away while we scrub it
2125 		 */
2126 		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2127 		if (!cache) {
2128 			ret = -ENOENT;
2129 			break;
2130 		}
2131 		ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2132 				  chunk_offset, length, found_key.offset);
2133 		btrfs_put_block_group(cache);
2134 		if (ret)
2135 			break;
2136 
2137 		key.offset = found_key.offset + length;
2138 		btrfs_release_path(path);
2139 	}
2140 
2141 	btrfs_free_path(path);
2142 
2143 	/*
2144 	 * ret can still be 1 from search_slot or next_leaf,
2145 	 * that's not an error
2146 	 */
2147 	return ret < 0 ? ret : 0;
2148 }
2149 
2150 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2151 {
2152 	int	i;
2153 	u64	bytenr;
2154 	u64	gen;
2155 	int	ret;
2156 	struct btrfs_device *device = sdev->dev;
2157 	struct btrfs_root *root = device->dev_root;
2158 
2159 	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2160 		return -EIO;
2161 
2162 	gen = root->fs_info->last_trans_committed;
2163 
2164 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2165 		bytenr = btrfs_sb_offset(i);
2166 		if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2167 			break;
2168 
2169 		ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2170 				     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2171 		if (ret)
2172 			return ret;
2173 	}
2174 	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2175 
2176 	return 0;
2177 }
2178 
2179 /*
2180  * get a reference count on fs_info->scrub_workers. start worker if necessary
2181  */
2182 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2183 {
2184 	struct btrfs_fs_info *fs_info = root->fs_info;
2185 	int ret = 0;
2186 
2187 	mutex_lock(&fs_info->scrub_lock);
2188 	if (fs_info->scrub_workers_refcnt == 0) {
2189 		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2190 			   fs_info->thread_pool_size, &fs_info->generic_worker);
2191 		fs_info->scrub_workers.idle_thresh = 4;
2192 		ret = btrfs_start_workers(&fs_info->scrub_workers);
2193 		if (ret)
2194 			goto out;
2195 	}
2196 	++fs_info->scrub_workers_refcnt;
2197 out:
2198 	mutex_unlock(&fs_info->scrub_lock);
2199 
2200 	return ret;
2201 }
2202 
2203 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2204 {
2205 	struct btrfs_fs_info *fs_info = root->fs_info;
2206 
2207 	mutex_lock(&fs_info->scrub_lock);
2208 	if (--fs_info->scrub_workers_refcnt == 0)
2209 		btrfs_stop_workers(&fs_info->scrub_workers);
2210 	WARN_ON(fs_info->scrub_workers_refcnt < 0);
2211 	mutex_unlock(&fs_info->scrub_lock);
2212 }
2213 
2214 
2215 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2216 		    struct btrfs_scrub_progress *progress, int readonly)
2217 {
2218 	struct scrub_dev *sdev;
2219 	struct btrfs_fs_info *fs_info = root->fs_info;
2220 	int ret;
2221 	struct btrfs_device *dev;
2222 
2223 	if (btrfs_fs_closing(root->fs_info))
2224 		return -EINVAL;
2225 
2226 	/*
2227 	 * check some assumptions
2228 	 */
2229 	if (root->nodesize != root->leafsize) {
2230 		printk(KERN_ERR
2231 		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2232 		       root->nodesize, root->leafsize);
2233 		return -EINVAL;
2234 	}
2235 
2236 	if (root->nodesize > BTRFS_STRIPE_LEN) {
2237 		/*
2238 		 * in this case scrub is unable to calculate the checksum
2239 		 * the way scrub is implemented. Do not handle this
2240 		 * situation at all because it won't ever happen.
2241 		 */
2242 		printk(KERN_ERR
2243 		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2244 		       root->nodesize, BTRFS_STRIPE_LEN);
2245 		return -EINVAL;
2246 	}
2247 
2248 	if (root->sectorsize != PAGE_SIZE) {
2249 		/* not supported for data w/o checksums */
2250 		printk(KERN_ERR
2251 		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2252 		       root->sectorsize, (unsigned long long)PAGE_SIZE);
2253 		return -EINVAL;
2254 	}
2255 
2256 	ret = scrub_workers_get(root);
2257 	if (ret)
2258 		return ret;
2259 
2260 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2261 	dev = btrfs_find_device(root, devid, NULL, NULL);
2262 	if (!dev || dev->missing) {
2263 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2264 		scrub_workers_put(root);
2265 		return -ENODEV;
2266 	}
2267 	mutex_lock(&fs_info->scrub_lock);
2268 
2269 	if (!dev->in_fs_metadata) {
2270 		mutex_unlock(&fs_info->scrub_lock);
2271 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2272 		scrub_workers_put(root);
2273 		return -ENODEV;
2274 	}
2275 
2276 	if (dev->scrub_device) {
2277 		mutex_unlock(&fs_info->scrub_lock);
2278 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2279 		scrub_workers_put(root);
2280 		return -EINPROGRESS;
2281 	}
2282 	sdev = scrub_setup_dev(dev);
2283 	if (IS_ERR(sdev)) {
2284 		mutex_unlock(&fs_info->scrub_lock);
2285 		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2286 		scrub_workers_put(root);
2287 		return PTR_ERR(sdev);
2288 	}
2289 	sdev->readonly = readonly;
2290 	dev->scrub_device = sdev;
2291 
2292 	atomic_inc(&fs_info->scrubs_running);
2293 	mutex_unlock(&fs_info->scrub_lock);
2294 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2295 
2296 	down_read(&fs_info->scrub_super_lock);
2297 	ret = scrub_supers(sdev);
2298 	up_read(&fs_info->scrub_super_lock);
2299 
2300 	if (!ret)
2301 		ret = scrub_enumerate_chunks(sdev, start, end);
2302 
2303 	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2304 	atomic_dec(&fs_info->scrubs_running);
2305 	wake_up(&fs_info->scrub_pause_wait);
2306 
2307 	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
2308 
2309 	if (progress)
2310 		memcpy(progress, &sdev->stat, sizeof(*progress));
2311 
2312 	mutex_lock(&fs_info->scrub_lock);
2313 	dev->scrub_device = NULL;
2314 	mutex_unlock(&fs_info->scrub_lock);
2315 
2316 	scrub_free_dev(sdev);
2317 	scrub_workers_put(root);
2318 
2319 	return ret;
2320 }
2321 
2322 void btrfs_scrub_pause(struct btrfs_root *root)
2323 {
2324 	struct btrfs_fs_info *fs_info = root->fs_info;
2325 
2326 	mutex_lock(&fs_info->scrub_lock);
2327 	atomic_inc(&fs_info->scrub_pause_req);
2328 	while (atomic_read(&fs_info->scrubs_paused) !=
2329 	       atomic_read(&fs_info->scrubs_running)) {
2330 		mutex_unlock(&fs_info->scrub_lock);
2331 		wait_event(fs_info->scrub_pause_wait,
2332 			   atomic_read(&fs_info->scrubs_paused) ==
2333 			   atomic_read(&fs_info->scrubs_running));
2334 		mutex_lock(&fs_info->scrub_lock);
2335 	}
2336 	mutex_unlock(&fs_info->scrub_lock);
2337 }
2338 
2339 void btrfs_scrub_continue(struct btrfs_root *root)
2340 {
2341 	struct btrfs_fs_info *fs_info = root->fs_info;
2342 
2343 	atomic_dec(&fs_info->scrub_pause_req);
2344 	wake_up(&fs_info->scrub_pause_wait);
2345 }
2346 
2347 void btrfs_scrub_pause_super(struct btrfs_root *root)
2348 {
2349 	down_write(&root->fs_info->scrub_super_lock);
2350 }
2351 
2352 void btrfs_scrub_continue_super(struct btrfs_root *root)
2353 {
2354 	up_write(&root->fs_info->scrub_super_lock);
2355 }
2356 
2357 int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2358 {
2359 
2360 	mutex_lock(&fs_info->scrub_lock);
2361 	if (!atomic_read(&fs_info->scrubs_running)) {
2362 		mutex_unlock(&fs_info->scrub_lock);
2363 		return -ENOTCONN;
2364 	}
2365 
2366 	atomic_inc(&fs_info->scrub_cancel_req);
2367 	while (atomic_read(&fs_info->scrubs_running)) {
2368 		mutex_unlock(&fs_info->scrub_lock);
2369 		wait_event(fs_info->scrub_pause_wait,
2370 			   atomic_read(&fs_info->scrubs_running) == 0);
2371 		mutex_lock(&fs_info->scrub_lock);
2372 	}
2373 	atomic_dec(&fs_info->scrub_cancel_req);
2374 	mutex_unlock(&fs_info->scrub_lock);
2375 
2376 	return 0;
2377 }
2378 
2379 int btrfs_scrub_cancel(struct btrfs_root *root)
2380 {
2381 	return __btrfs_scrub_cancel(root->fs_info);
2382 }
2383 
2384 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2385 {
2386 	struct btrfs_fs_info *fs_info = root->fs_info;
2387 	struct scrub_dev *sdev;
2388 
2389 	mutex_lock(&fs_info->scrub_lock);
2390 	sdev = dev->scrub_device;
2391 	if (!sdev) {
2392 		mutex_unlock(&fs_info->scrub_lock);
2393 		return -ENOTCONN;
2394 	}
2395 	atomic_inc(&sdev->cancel_req);
2396 	while (dev->scrub_device) {
2397 		mutex_unlock(&fs_info->scrub_lock);
2398 		wait_event(fs_info->scrub_pause_wait,
2399 			   dev->scrub_device == NULL);
2400 		mutex_lock(&fs_info->scrub_lock);
2401 	}
2402 	mutex_unlock(&fs_info->scrub_lock);
2403 
2404 	return 0;
2405 }
2406 
2407 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
2408 {
2409 	struct btrfs_fs_info *fs_info = root->fs_info;
2410 	struct btrfs_device *dev;
2411 	int ret;
2412 
2413 	/*
2414 	 * we have to hold the device_list_mutex here so the device
2415 	 * does not go away in cancel_dev. FIXME: find a better solution
2416 	 */
2417 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2418 	dev = btrfs_find_device(root, devid, NULL, NULL);
2419 	if (!dev) {
2420 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2421 		return -ENODEV;
2422 	}
2423 	ret = btrfs_scrub_cancel_dev(root, dev);
2424 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2425 
2426 	return ret;
2427 }
2428 
2429 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2430 			 struct btrfs_scrub_progress *progress)
2431 {
2432 	struct btrfs_device *dev;
2433 	struct scrub_dev *sdev = NULL;
2434 
2435 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2436 	dev = btrfs_find_device(root, devid, NULL, NULL);
2437 	if (dev)
2438 		sdev = dev->scrub_device;
2439 	if (sdev)
2440 		memcpy(progress, &sdev->stat, sizeof(*progress));
2441 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2442 
2443 	return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2444 }
2445