xref: /linux/fs/btrfs/tests/extent-io-tests.c (revision 561add0da6d3d07c9bccb0832fb6ed5619167d26)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Fusion IO.  All rights reserved.
4  */
5 
6 #include <linux/pagemap.h>
7 #include <linux/pagevec.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/sizes.h>
11 #include "btrfs-tests.h"
12 #include "../ctree.h"
13 #include "../extent_io.h"
14 #include "../btrfs_inode.h"
15 
16 #define PROCESS_UNLOCK		(1 << 0)
17 #define PROCESS_RELEASE		(1 << 1)
18 #define PROCESS_TEST_LOCKED	(1 << 2)
19 
20 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
21 				       unsigned long flags)
22 {
23 	int ret;
24 	struct folio_batch fbatch;
25 	unsigned long index = start >> PAGE_SHIFT;
26 	unsigned long end_index = end >> PAGE_SHIFT;
27 	int i;
28 	int count = 0;
29 	int loops = 0;
30 
31 	folio_batch_init(&fbatch);
32 
33 	while (index <= end_index) {
34 		ret = filemap_get_folios_contig(inode->i_mapping, &index,
35 				end_index, &fbatch);
36 		for (i = 0; i < ret; i++) {
37 			struct folio *folio = fbatch.folios[i];
38 
39 			if (flags & PROCESS_TEST_LOCKED &&
40 			    !folio_test_locked(folio))
41 				count++;
42 			if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
43 				folio_unlock(folio);
44 			if (flags & PROCESS_RELEASE)
45 				folio_put(folio);
46 		}
47 		folio_batch_release(&fbatch);
48 		cond_resched();
49 		loops++;
50 		if (loops > 100000) {
51 			printk(KERN_ERR
52 		"stuck in a loop, start %llu, end %llu, ret %d\n",
53 				start, end, ret);
54 			break;
55 		}
56 	}
57 
58 	return count;
59 }
60 
61 #define STATE_FLAG_STR_LEN			256
62 
63 #define PRINT_ONE_FLAG(state, dest, cur, name)				\
64 ({									\
65 	if (state->state & EXTENT_##name)				\
66 		cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur,	\
67 				 "%s" #name, cur == 0 ? "" : "|");	\
68 })
69 
70 static void extent_flag_to_str(const struct extent_state *state, char *dest)
71 {
72 	int cur = 0;
73 
74 	dest[0] = 0;
75 	PRINT_ONE_FLAG(state, dest, cur, DIRTY);
76 	PRINT_ONE_FLAG(state, dest, cur, UPTODATE);
77 	PRINT_ONE_FLAG(state, dest, cur, LOCKED);
78 	PRINT_ONE_FLAG(state, dest, cur, NEW);
79 	PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
80 	PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
81 	PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
82 	PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
83 	PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
84 	PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
85 	PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
86 	PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
87 	PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
88 }
89 
90 static void dump_extent_io_tree(const struct extent_io_tree *tree)
91 {
92 	struct rb_node *node;
93 	char flags_str[STATE_FLAG_STR_LEN];
94 
95 	node = rb_first(&tree->state);
96 	test_msg("io tree content:");
97 	while (node) {
98 		struct extent_state *state;
99 
100 		state = rb_entry(node, struct extent_state, rb_node);
101 		extent_flag_to_str(state, flags_str);
102 		test_msg("  start=%llu len=%llu flags=%s", state->start,
103 			 state->end + 1 - state->start, flags_str);
104 		node = rb_next(node);
105 	}
106 }
107 
108 static int test_find_delalloc(u32 sectorsize)
109 {
110 	struct inode *inode;
111 	struct extent_io_tree *tmp;
112 	struct page *page;
113 	struct page *locked_page = NULL;
114 	unsigned long index = 0;
115 	/* In this test we need at least 2 file extents at its maximum size */
116 	u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
117 	u64 total_dirty = 2 * max_bytes;
118 	u64 start, end, test_start;
119 	bool found;
120 	int ret = -EINVAL;
121 
122 	test_msg("running find delalloc tests");
123 
124 	inode = btrfs_new_test_inode();
125 	if (!inode) {
126 		test_std_err(TEST_ALLOC_INODE);
127 		return -ENOMEM;
128 	}
129 	tmp = &BTRFS_I(inode)->io_tree;
130 
131 	/*
132 	 * Passing NULL as we don't have fs_info but tracepoints are not used
133 	 * at this point
134 	 */
135 	extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST);
136 
137 	/*
138 	 * First go through and create and mark all of our pages dirty, we pin
139 	 * everything to make sure our pages don't get evicted and screw up our
140 	 * test.
141 	 */
142 	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
143 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
144 		if (!page) {
145 			test_err("failed to allocate test page");
146 			ret = -ENOMEM;
147 			goto out;
148 		}
149 		SetPageDirty(page);
150 		if (index) {
151 			unlock_page(page);
152 		} else {
153 			get_page(page);
154 			locked_page = page;
155 		}
156 	}
157 
158 	/* Test this scenario
159 	 * |--- delalloc ---|
160 	 * |---  search  ---|
161 	 */
162 	set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL);
163 	start = 0;
164 	end = start + PAGE_SIZE - 1;
165 	found = find_lock_delalloc_range(inode, locked_page, &start,
166 					 &end);
167 	if (!found) {
168 		test_err("should have found at least one delalloc");
169 		goto out_bits;
170 	}
171 	if (start != 0 || end != (sectorsize - 1)) {
172 		test_err("expected start 0 end %u, got start %llu end %llu",
173 			sectorsize - 1, start, end);
174 		goto out_bits;
175 	}
176 	unlock_extent(tmp, start, end, NULL);
177 	unlock_page(locked_page);
178 	put_page(locked_page);
179 
180 	/*
181 	 * Test this scenario
182 	 *
183 	 * |--- delalloc ---|
184 	 *           |--- search ---|
185 	 */
186 	test_start = SZ_64M;
187 	locked_page = find_lock_page(inode->i_mapping,
188 				     test_start >> PAGE_SHIFT);
189 	if (!locked_page) {
190 		test_err("couldn't find the locked page");
191 		goto out_bits;
192 	}
193 	set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL);
194 	start = test_start;
195 	end = start + PAGE_SIZE - 1;
196 	found = find_lock_delalloc_range(inode, locked_page, &start,
197 					 &end);
198 	if (!found) {
199 		test_err("couldn't find delalloc in our range");
200 		goto out_bits;
201 	}
202 	if (start != test_start || end != max_bytes - 1) {
203 		test_err("expected start %llu end %llu, got start %llu, end %llu",
204 				test_start, max_bytes - 1, start, end);
205 		goto out_bits;
206 	}
207 	if (process_page_range(inode, start, end,
208 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
209 		test_err("there were unlocked pages in the range");
210 		goto out_bits;
211 	}
212 	unlock_extent(tmp, start, end, NULL);
213 	/* locked_page was unlocked above */
214 	put_page(locked_page);
215 
216 	/*
217 	 * Test this scenario
218 	 * |--- delalloc ---|
219 	 *                    |--- search ---|
220 	 */
221 	test_start = max_bytes + sectorsize;
222 	locked_page = find_lock_page(inode->i_mapping, test_start >>
223 				     PAGE_SHIFT);
224 	if (!locked_page) {
225 		test_err("couldn't find the locked page");
226 		goto out_bits;
227 	}
228 	start = test_start;
229 	end = start + PAGE_SIZE - 1;
230 	found = find_lock_delalloc_range(inode, locked_page, &start,
231 					 &end);
232 	if (found) {
233 		test_err("found range when we shouldn't have");
234 		goto out_bits;
235 	}
236 	if (end != test_start + PAGE_SIZE - 1) {
237 		test_err("did not return the proper end offset");
238 		goto out_bits;
239 	}
240 
241 	/*
242 	 * Test this scenario
243 	 * [------- delalloc -------|
244 	 * [max_bytes]|-- search--|
245 	 *
246 	 * We are re-using our test_start from above since it works out well.
247 	 */
248 	set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL);
249 	start = test_start;
250 	end = start + PAGE_SIZE - 1;
251 	found = find_lock_delalloc_range(inode, locked_page, &start,
252 					 &end);
253 	if (!found) {
254 		test_err("didn't find our range");
255 		goto out_bits;
256 	}
257 	if (start != test_start || end != total_dirty - 1) {
258 		test_err("expected start %llu end %llu, got start %llu end %llu",
259 			 test_start, total_dirty - 1, start, end);
260 		goto out_bits;
261 	}
262 	if (process_page_range(inode, start, end,
263 			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
264 		test_err("pages in range were not all locked");
265 		goto out_bits;
266 	}
267 	unlock_extent(tmp, start, end, NULL);
268 
269 	/*
270 	 * Now to test where we run into a page that is no longer dirty in the
271 	 * range we want to find.
272 	 */
273 	page = find_get_page(inode->i_mapping,
274 			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
275 	if (!page) {
276 		test_err("couldn't find our page");
277 		goto out_bits;
278 	}
279 	ClearPageDirty(page);
280 	put_page(page);
281 
282 	/* We unlocked it in the previous test */
283 	lock_page(locked_page);
284 	start = test_start;
285 	end = start + PAGE_SIZE - 1;
286 	/*
287 	 * Currently if we fail to find dirty pages in the delalloc range we
288 	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
289 	 * this changes at any point in the future we will need to fix this
290 	 * tests expected behavior.
291 	 */
292 	found = find_lock_delalloc_range(inode, locked_page, &start,
293 					 &end);
294 	if (!found) {
295 		test_err("didn't find our range");
296 		goto out_bits;
297 	}
298 	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
299 		test_err("expected start %llu end %llu, got start %llu end %llu",
300 			 test_start, test_start + PAGE_SIZE - 1, start, end);
301 		goto out_bits;
302 	}
303 	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
304 			       PROCESS_UNLOCK)) {
305 		test_err("pages in range were not all locked");
306 		goto out_bits;
307 	}
308 	ret = 0;
309 out_bits:
310 	if (ret)
311 		dump_extent_io_tree(tmp);
312 	clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1);
313 out:
314 	if (locked_page)
315 		put_page(locked_page);
316 	process_page_range(inode, 0, total_dirty - 1,
317 			   PROCESS_UNLOCK | PROCESS_RELEASE);
318 	iput(inode);
319 	return ret;
320 }
321 
322 static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
323 {
324 	unsigned long i;
325 
326 	for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
327 		int bit, bit1;
328 
329 		bit = !!test_bit(i, bitmap);
330 		bit1 = !!extent_buffer_test_bit(eb, 0, i);
331 		if (bit1 != bit) {
332 			u8 has;
333 			u8 expect;
334 
335 			read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
336 			expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
337 
338 			test_err(
339 		"bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
340 				 i, i / BITS_PER_BYTE, has, expect);
341 			return -EINVAL;
342 		}
343 
344 		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
345 						i % BITS_PER_BYTE);
346 		if (bit1 != bit) {
347 			u8 has;
348 			u8 expect;
349 
350 			read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
351 			expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
352 
353 			test_err(
354 		"bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
355 				 i / BITS_PER_BYTE, i % BITS_PER_BYTE,
356 				 i / BITS_PER_BYTE, has, expect);
357 			return -EINVAL;
358 		}
359 	}
360 	return 0;
361 }
362 
363 static int test_bitmap_set(const char *name, unsigned long *bitmap,
364 			   struct extent_buffer *eb,
365 			   unsigned long byte_start, unsigned long bit_start,
366 			   unsigned long bit_len)
367 {
368 	int ret;
369 
370 	bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
371 	extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len);
372 	ret = check_eb_bitmap(bitmap, eb);
373 	if (ret < 0)
374 		test_err("%s test failed", name);
375 	return ret;
376 }
377 
378 static int test_bitmap_clear(const char *name, unsigned long *bitmap,
379 			     struct extent_buffer *eb,
380 			     unsigned long byte_start, unsigned long bit_start,
381 			     unsigned long bit_len)
382 {
383 	int ret;
384 
385 	bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
386 	extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len);
387 	ret = check_eb_bitmap(bitmap, eb);
388 	if (ret < 0)
389 		test_err("%s test failed", name);
390 	return ret;
391 }
392 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
393 {
394 	unsigned long i, j;
395 	unsigned long byte_len = eb->len;
396 	u32 x;
397 	int ret;
398 
399 	ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0,
400 				byte_len * BITS_PER_BYTE);
401 	if (ret < 0)
402 		return ret;
403 
404 	ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE);
405 	if (ret < 0)
406 		return ret;
407 
408 	ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0,
409 				byte_len * BITS_PER_BYTE);
410 	if (ret < 0)
411 		return ret;
412 
413 	ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4);
414 	if (ret < 0)
415 		return ret;
416 
417 	ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1);
418 	if (ret < 0)
419 		return ret;
420 
421 	ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8);
422 	if (ret < 0)
423 		return ret;
424 
425 	ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24);
426 	if (ret < 0)
427 		return ret;
428 
429 	ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4);
430 	if (ret < 0)
431 		return ret;
432 
433 	ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20);
434 	if (ret < 0)
435 		return ret;
436 
437 	/* Straddling pages test */
438 	if (byte_len > PAGE_SIZE) {
439 		ret = test_bitmap_set("cross page set", bitmap, eb,
440 				      PAGE_SIZE - sizeof(long) / 2, 0,
441 				      sizeof(long) * BITS_PER_BYTE);
442 		if (ret < 0)
443 			return ret;
444 
445 		ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0,
446 				      byte_len * BITS_PER_BYTE);
447 		if (ret < 0)
448 			return ret;
449 
450 		ret = test_bitmap_clear("cross page clear", bitmap, eb,
451 					PAGE_SIZE - sizeof(long) / 2, 0,
452 					sizeof(long) * BITS_PER_BYTE);
453 		if (ret < 0)
454 			return ret;
455 	}
456 
457 	/*
458 	 * Generate a wonky pseudo-random bit pattern for the sake of not using
459 	 * something repetitive that could miss some hypothetical off-by-n bug.
460 	 */
461 	x = 0;
462 	ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
463 				byte_len * BITS_PER_BYTE);
464 	if (ret < 0)
465 		return ret;
466 
467 	for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
468 		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
469 		for (j = 0; j < 32; j++) {
470 			if (x & (1U << j)) {
471 				bitmap_set(bitmap, i * 32 + j, 1);
472 				extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
473 			}
474 		}
475 	}
476 
477 	ret = check_eb_bitmap(bitmap, eb);
478 	if (ret) {
479 		test_err("random bit pattern failed");
480 		return ret;
481 	}
482 
483 	return 0;
484 }
485 
486 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
487 {
488 	struct btrfs_fs_info *fs_info;
489 	unsigned long *bitmap = NULL;
490 	struct extent_buffer *eb = NULL;
491 	int ret;
492 
493 	test_msg("running extent buffer bitmap tests");
494 
495 	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
496 	if (!fs_info) {
497 		test_std_err(TEST_ALLOC_FS_INFO);
498 		return -ENOMEM;
499 	}
500 
501 	bitmap = kmalloc(nodesize, GFP_KERNEL);
502 	if (!bitmap) {
503 		test_err("couldn't allocate test bitmap");
504 		ret = -ENOMEM;
505 		goto out;
506 	}
507 
508 	eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize);
509 	if (!eb) {
510 		test_std_err(TEST_ALLOC_ROOT);
511 		ret = -ENOMEM;
512 		goto out;
513 	}
514 
515 	ret = __test_eb_bitmaps(bitmap, eb);
516 	if (ret)
517 		goto out;
518 
519 	free_extent_buffer(eb);
520 
521 	/*
522 	 * Test again for case where the tree block is sectorsize aligned but
523 	 * not nodesize aligned.
524 	 */
525 	eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize);
526 	if (!eb) {
527 		test_std_err(TEST_ALLOC_ROOT);
528 		ret = -ENOMEM;
529 		goto out;
530 	}
531 
532 	ret = __test_eb_bitmaps(bitmap, eb);
533 out:
534 	free_extent_buffer(eb);
535 	kfree(bitmap);
536 	btrfs_free_dummy_fs_info(fs_info);
537 	return ret;
538 }
539 
540 static int test_find_first_clear_extent_bit(void)
541 {
542 	struct extent_io_tree tree;
543 	u64 start, end;
544 	int ret = -EINVAL;
545 
546 	test_msg("running find_first_clear_extent_bit test");
547 
548 	extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST);
549 
550 	/* Test correct handling of empty tree */
551 	find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED);
552 	if (start != 0 || end != -1) {
553 		test_err(
554 	"error getting a range from completely empty tree: start %llu end %llu",
555 			 start, end);
556 		goto out;
557 	}
558 	/*
559 	 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
560 	 * 4M-32M
561 	 */
562 	set_extent_bit(&tree, SZ_1M, SZ_4M - 1,
563 		       CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
564 
565 	find_first_clear_extent_bit(&tree, SZ_512K, &start, &end,
566 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
567 
568 	if (start != 0 || end != SZ_1M - 1) {
569 		test_err("error finding beginning range: start %llu end %llu",
570 			 start, end);
571 		goto out;
572 	}
573 
574 	/* Now add 32M-64M so that we have a hole between 4M-32M */
575 	set_extent_bit(&tree, SZ_32M, SZ_64M - 1,
576 		       CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL);
577 
578 	/*
579 	 * Request first hole starting at 12M, we should get 4M-32M
580 	 */
581 	find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end,
582 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
583 
584 	if (start != SZ_4M || end != SZ_32M - 1) {
585 		test_err("error finding trimmed range: start %llu end %llu",
586 			 start, end);
587 		goto out;
588 	}
589 
590 	/*
591 	 * Search in the middle of allocated range, should get the next one
592 	 * available, which happens to be unallocated -> 4M-32M
593 	 */
594 	find_first_clear_extent_bit(&tree, SZ_2M, &start, &end,
595 				    CHUNK_TRIMMED | CHUNK_ALLOCATED);
596 
597 	if (start != SZ_4M || end != SZ_32M - 1) {
598 		test_err("error finding next unalloc range: start %llu end %llu",
599 			 start, end);
600 		goto out;
601 	}
602 
603 	/*
604 	 * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
605 	 * being unset in this range, we should get the entry in range 64M-72M
606 	 */
607 	set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL);
608 	find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end,
609 				    CHUNK_TRIMMED);
610 
611 	if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
612 		test_err("error finding exact range: start %llu end %llu",
613 			 start, end);
614 		goto out;
615 	}
616 
617 	find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end,
618 				    CHUNK_TRIMMED);
619 
620 	/*
621 	 * Search in the middle of set range whose immediate neighbour doesn't
622 	 * have the bits set so it must be returned
623 	 */
624 	if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) {
625 		test_err("error finding next alloc range: start %llu end %llu",
626 			 start, end);
627 		goto out;
628 	}
629 
630 	/*
631 	 * Search beyond any known range, shall return after last known range
632 	 * and end should be -1
633 	 */
634 	find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED);
635 	if (start != SZ_64M + SZ_8M || end != -1) {
636 		test_err(
637 		"error handling beyond end of range search: start %llu end %llu",
638 			start, end);
639 		goto out;
640 	}
641 
642 	ret = 0;
643 out:
644 	if (ret)
645 		dump_extent_io_tree(&tree);
646 	clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED);
647 
648 	return ret;
649 }
650 
651 static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory,
652 					const char *test_name)
653 {
654 	for (int i = 0; i < eb->len; i++) {
655 		struct page *page = eb->pages[i >> PAGE_SHIFT];
656 		void *addr = page_address(page) + offset_in_page(i);
657 
658 		if (memcmp(addr, memory + i, 1) != 0) {
659 			test_err("%s failed", test_name);
660 			test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
661 				 i, *(u8 *)addr, *(u8 *)(memory + i));
662 			return;
663 		}
664 	}
665 }
666 
667 static int verify_eb_and_memory(struct extent_buffer *eb, void *memory,
668 				const char *test_name)
669 {
670 	for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) {
671 		void *eb_addr = page_address(eb->pages[i]);
672 
673 		if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
674 			dump_eb_and_memory_contents(eb, memory, test_name);
675 			return -EUCLEAN;
676 		}
677 	}
678 	return 0;
679 }
680 
681 /*
682  * Init both memory and extent buffer contents to the same randomly generated
683  * contents.
684  */
685 static void init_eb_and_memory(struct extent_buffer *eb, void *memory)
686 {
687 	get_random_bytes(memory, eb->len);
688 	write_extent_buffer(eb, memory, 0, eb->len);
689 }
690 
691 static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
692 {
693 	struct btrfs_fs_info *fs_info;
694 	struct extent_buffer *eb = NULL;
695 	void *memory = NULL;
696 	int ret;
697 
698 	test_msg("running extent buffer memory operation tests");
699 
700 	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
701 	if (!fs_info) {
702 		test_std_err(TEST_ALLOC_FS_INFO);
703 		return -ENOMEM;
704 	}
705 
706 	memory = kvzalloc(nodesize, GFP_KERNEL);
707 	if (!memory) {
708 		test_err("failed to allocate memory");
709 		ret = -ENOMEM;
710 		goto out;
711 	}
712 
713 	eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize);
714 	if (!eb) {
715 		test_std_err(TEST_ALLOC_EXTENT_BUFFER);
716 		ret = -ENOMEM;
717 		goto out;
718 	}
719 
720 	init_eb_and_memory(eb, memory);
721 	ret = verify_eb_and_memory(eb, memory, "full eb write");
722 	if (ret < 0)
723 		goto out;
724 
725 	memcpy(memory, memory + 16, 16);
726 	memcpy_extent_buffer(eb, 0, 16, 16);
727 	ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1");
728 	if (ret < 0)
729 		goto out;
730 
731 	memcpy(memory, memory + 2048, 16);
732 	memcpy_extent_buffer(eb, 0, 2048, 16);
733 	ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2");
734 	if (ret < 0)
735 		goto out;
736 	memcpy(memory, memory + 2048, 2048);
737 	memcpy_extent_buffer(eb, 0, 2048, 2048);
738 	ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3");
739 	if (ret < 0)
740 		goto out;
741 
742 	memmove(memory + 512, memory + 256, 512);
743 	memmove_extent_buffer(eb, 512, 256, 512);
744 	ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1");
745 	if (ret < 0)
746 		goto out;
747 
748 	memmove(memory + 2048, memory + 512, 2048);
749 	memmove_extent_buffer(eb, 2048, 512, 2048);
750 	ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2");
751 	if (ret < 0)
752 		goto out;
753 	memmove(memory + 512, memory + 2048, 2048);
754 	memmove_extent_buffer(eb, 512, 2048, 2048);
755 	ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3");
756 	if (ret < 0)
757 		goto out;
758 
759 	if (nodesize > PAGE_SIZE) {
760 		memcpy(memory, memory + 4096 - 128, 256);
761 		memcpy_extent_buffer(eb, 0, 4096 - 128, 256);
762 		ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1");
763 		if (ret < 0)
764 			goto out;
765 
766 		memcpy(memory + 4096 - 128, memory + 4096 + 128, 256);
767 		memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256);
768 		ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2");
769 		if (ret < 0)
770 			goto out;
771 
772 		memmove(memory + 4096 - 128, memory + 4096 - 64, 256);
773 		memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256);
774 		ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1");
775 		if (ret < 0)
776 			goto out;
777 
778 		memmove(memory + 4096 - 64, memory + 4096 - 128, 256);
779 		memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256);
780 		ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2");
781 		if (ret < 0)
782 			goto out;
783 	}
784 out:
785 	free_extent_buffer(eb);
786 	kvfree(memory);
787 	btrfs_free_dummy_fs_info(fs_info);
788 	return ret;
789 }
790 
791 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
792 {
793 	int ret;
794 
795 	test_msg("running extent I/O tests");
796 
797 	ret = test_find_delalloc(sectorsize);
798 	if (ret)
799 		goto out;
800 
801 	ret = test_find_first_clear_extent_bit();
802 	if (ret)
803 		goto out;
804 
805 	ret = test_eb_bitmaps(sectorsize, nodesize);
806 	if (ret)
807 		goto out;
808 
809 	ret = test_eb_mem_ops(sectorsize, nodesize);
810 out:
811 	return ret;
812 }
813