xref: /linux/fs/bcachefs/tests.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
3 
4 #include "bcachefs.h"
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
7 #include "snapshot.h"
8 #include "tests.h"
9 
10 #include "linux/kthread.h"
11 #include "linux/random.h"
12 
13 static void delete_test_keys(struct bch_fs *c)
14 {
15 	int ret;
16 
17 	ret = bch2_btree_delete_range(c, BTREE_ID_extents,
18 				      SPOS(0, 0, U32_MAX),
19 				      POS(0, U64_MAX),
20 				      0, NULL);
21 	BUG_ON(ret);
22 
23 	ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
24 				      SPOS(0, 0, U32_MAX),
25 				      POS(0, U64_MAX),
26 				      0, NULL);
27 	BUG_ON(ret);
28 }
29 
30 /* unit tests */
31 
32 static int test_delete(struct bch_fs *c, u64 nr)
33 {
34 	struct btree_trans *trans = bch2_trans_get(c);
35 	struct btree_iter iter;
36 	struct bkey_i_cookie k;
37 	int ret;
38 
39 	bkey_cookie_init(&k.k_i);
40 	k.k.p.snapshot = U32_MAX;
41 
42 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
43 			     BTREE_ITER_INTENT);
44 
45 	ret = commit_do(trans, NULL, NULL, 0,
46 		bch2_btree_iter_traverse(&iter) ?:
47 		bch2_trans_update(trans, &iter, &k.k_i, 0));
48 	bch_err_msg(c, ret, "update error");
49 	if (ret)
50 		goto err;
51 
52 	pr_info("deleting once");
53 	ret = commit_do(trans, NULL, NULL, 0,
54 		bch2_btree_iter_traverse(&iter) ?:
55 		bch2_btree_delete_at(trans, &iter, 0));
56 	bch_err_msg(c, ret, "delete error (first)");
57 	if (ret)
58 		goto err;
59 
60 	pr_info("deleting twice");
61 	ret = commit_do(trans, NULL, NULL, 0,
62 		bch2_btree_iter_traverse(&iter) ?:
63 		bch2_btree_delete_at(trans, &iter, 0));
64 	bch_err_msg(c, ret, "delete error (second)");
65 	if (ret)
66 		goto err;
67 err:
68 	bch2_trans_iter_exit(trans, &iter);
69 	bch2_trans_put(trans);
70 	return ret;
71 }
72 
73 static int test_delete_written(struct bch_fs *c, u64 nr)
74 {
75 	struct btree_trans *trans = bch2_trans_get(c);
76 	struct btree_iter iter;
77 	struct bkey_i_cookie k;
78 	int ret;
79 
80 	bkey_cookie_init(&k.k_i);
81 	k.k.p.snapshot = U32_MAX;
82 
83 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
84 			     BTREE_ITER_INTENT);
85 
86 	ret = commit_do(trans, NULL, NULL, 0,
87 		bch2_btree_iter_traverse(&iter) ?:
88 		bch2_trans_update(trans, &iter, &k.k_i, 0));
89 	bch_err_msg(c, ret, "update error");
90 	if (ret)
91 		goto err;
92 
93 	bch2_trans_unlock(trans);
94 	bch2_journal_flush_all_pins(&c->journal);
95 
96 	ret = commit_do(trans, NULL, NULL, 0,
97 		bch2_btree_iter_traverse(&iter) ?:
98 		bch2_btree_delete_at(trans, &iter, 0));
99 	bch_err_msg(c, ret, "delete error");
100 	if (ret)
101 		goto err;
102 err:
103 	bch2_trans_iter_exit(trans, &iter);
104 	bch2_trans_put(trans);
105 	return ret;
106 }
107 
108 static int test_iterate(struct bch_fs *c, u64 nr)
109 {
110 	struct btree_trans *trans = bch2_trans_get(c);
111 	struct btree_iter iter = { NULL };
112 	struct bkey_s_c k;
113 	u64 i;
114 	int ret = 0;
115 
116 	delete_test_keys(c);
117 
118 	pr_info("inserting test keys");
119 
120 	for (i = 0; i < nr; i++) {
121 		struct bkey_i_cookie ck;
122 
123 		bkey_cookie_init(&ck.k_i);
124 		ck.k.p.offset = i;
125 		ck.k.p.snapshot = U32_MAX;
126 
127 		ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
128 		bch_err_msg(c, ret, "insert error");
129 		if (ret)
130 			goto err;
131 	}
132 
133 	pr_info("iterating forwards");
134 
135 	i = 0;
136 
137 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
138 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
139 				  0, k, ({
140 		BUG_ON(k.k->p.offset != i++);
141 		0;
142 	}));
143 	bch_err_msg(c, ret, "error iterating forwards");
144 	if (ret)
145 		goto err;
146 
147 	BUG_ON(i != nr);
148 
149 	pr_info("iterating backwards");
150 
151 	ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
152 					 SPOS(0, U64_MAX, U32_MAX), 0, k,
153 		({
154 			BUG_ON(k.k->p.offset != --i);
155 			0;
156 		}));
157 	bch_err_msg(c, ret, "error iterating backwards");
158 	if (ret)
159 		goto err;
160 
161 	BUG_ON(i);
162 err:
163 	bch2_trans_iter_exit(trans, &iter);
164 	bch2_trans_put(trans);
165 	return ret;
166 }
167 
168 static int test_iterate_extents(struct bch_fs *c, u64 nr)
169 {
170 	struct btree_trans *trans = bch2_trans_get(c);
171 	struct btree_iter iter = { NULL };
172 	struct bkey_s_c k;
173 	u64 i;
174 	int ret = 0;
175 
176 	delete_test_keys(c);
177 
178 	pr_info("inserting test extents");
179 
180 	for (i = 0; i < nr; i += 8) {
181 		struct bkey_i_cookie ck;
182 
183 		bkey_cookie_init(&ck.k_i);
184 		ck.k.p.offset = i + 8;
185 		ck.k.p.snapshot = U32_MAX;
186 		ck.k.size = 8;
187 
188 		ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
189 		bch_err_msg(c, ret, "insert error");
190 		if (ret)
191 			goto err;
192 	}
193 
194 	pr_info("iterating forwards");
195 
196 	i = 0;
197 
198 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
199 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
200 				  0, k, ({
201 		BUG_ON(bkey_start_offset(k.k) != i);
202 		i = k.k->p.offset;
203 		0;
204 	}));
205 	bch_err_msg(c, ret, "error iterating forwards");
206 	if (ret)
207 		goto err;
208 
209 	BUG_ON(i != nr);
210 
211 	pr_info("iterating backwards");
212 
213 	ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
214 					 SPOS(0, U64_MAX, U32_MAX), 0, k,
215 		({
216 			BUG_ON(k.k->p.offset != i);
217 			i = bkey_start_offset(k.k);
218 			0;
219 		}));
220 	bch_err_msg(c, ret, "error iterating backwards");
221 	if (ret)
222 		goto err;
223 
224 	BUG_ON(i);
225 err:
226 	bch2_trans_iter_exit(trans, &iter);
227 	bch2_trans_put(trans);
228 	return ret;
229 }
230 
231 static int test_iterate_slots(struct bch_fs *c, u64 nr)
232 {
233 	struct btree_trans *trans = bch2_trans_get(c);
234 	struct btree_iter iter = { NULL };
235 	struct bkey_s_c k;
236 	u64 i;
237 	int ret = 0;
238 
239 	delete_test_keys(c);
240 
241 	pr_info("inserting test keys");
242 
243 	for (i = 0; i < nr; i++) {
244 		struct bkey_i_cookie ck;
245 
246 		bkey_cookie_init(&ck.k_i);
247 		ck.k.p.offset = i * 2;
248 		ck.k.p.snapshot = U32_MAX;
249 
250 		ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
251 		bch_err_msg(c, ret, "insert error");
252 		if (ret)
253 			goto err;
254 	}
255 
256 	pr_info("iterating forwards");
257 
258 	i = 0;
259 
260 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
261 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
262 				  0, k, ({
263 		BUG_ON(k.k->p.offset != i);
264 		i += 2;
265 		0;
266 	}));
267 	bch_err_msg(c, ret, "error iterating forwards");
268 	if (ret)
269 		goto err;
270 
271 	BUG_ON(i != nr * 2);
272 
273 	pr_info("iterating forwards by slots");
274 
275 	i = 0;
276 
277 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
278 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
279 				  BTREE_ITER_SLOTS, k, ({
280 		if (i >= nr * 2)
281 			break;
282 
283 		BUG_ON(k.k->p.offset != i);
284 		BUG_ON(bkey_deleted(k.k) != (i & 1));
285 
286 		i++;
287 		0;
288 	}));
289 	if (ret < 0) {
290 		bch_err_msg(c, ret, "error iterating forwards by slots");
291 		goto err;
292 	}
293 	ret = 0;
294 err:
295 	bch2_trans_put(trans);
296 	return ret;
297 }
298 
299 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
300 {
301 	struct btree_trans *trans = bch2_trans_get(c);
302 	struct btree_iter iter = { NULL };
303 	struct bkey_s_c k;
304 	u64 i;
305 	int ret = 0;
306 
307 	delete_test_keys(c);
308 
309 	pr_info("inserting test keys");
310 
311 	for (i = 0; i < nr; i += 16) {
312 		struct bkey_i_cookie ck;
313 
314 		bkey_cookie_init(&ck.k_i);
315 		ck.k.p.offset = i + 16;
316 		ck.k.p.snapshot = U32_MAX;
317 		ck.k.size = 8;
318 
319 		ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
320 		bch_err_msg(c, ret, "insert error");
321 		if (ret)
322 			goto err;
323 	}
324 
325 	pr_info("iterating forwards");
326 
327 	i = 0;
328 
329 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
330 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
331 				  0, k, ({
332 		BUG_ON(bkey_start_offset(k.k) != i + 8);
333 		BUG_ON(k.k->size != 8);
334 		i += 16;
335 		0;
336 	}));
337 	bch_err_msg(c, ret, "error iterating forwards");
338 	if (ret)
339 		goto err;
340 
341 	BUG_ON(i != nr);
342 
343 	pr_info("iterating forwards by slots");
344 
345 	i = 0;
346 
347 	ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
348 				 SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
349 				 BTREE_ITER_SLOTS, k, ({
350 		if (i == nr)
351 			break;
352 		BUG_ON(bkey_deleted(k.k) != !(i % 16));
353 
354 		BUG_ON(bkey_start_offset(k.k) != i);
355 		BUG_ON(k.k->size != 8);
356 		i = k.k->p.offset;
357 		0;
358 	}));
359 	bch_err_msg(c, ret, "error iterating forwards by slots");
360 	if (ret)
361 		goto err;
362 	ret = 0;
363 err:
364 	bch2_trans_put(trans);
365 	return 0;
366 }
367 
368 /*
369  * XXX: we really want to make sure we've got a btree with depth > 0 for these
370  * tests
371  */
372 static int test_peek_end(struct bch_fs *c, u64 nr)
373 {
374 	struct btree_trans *trans = bch2_trans_get(c);
375 	struct btree_iter iter;
376 	struct bkey_s_c k;
377 
378 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
379 			     SPOS(0, 0, U32_MAX), 0);
380 
381 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
382 	BUG_ON(k.k);
383 
384 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
385 	BUG_ON(k.k);
386 
387 	bch2_trans_iter_exit(trans, &iter);
388 	bch2_trans_put(trans);
389 	return 0;
390 }
391 
392 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
393 {
394 	struct btree_trans *trans = bch2_trans_get(c);
395 	struct btree_iter iter;
396 	struct bkey_s_c k;
397 
398 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
399 			     SPOS(0, 0, U32_MAX), 0);
400 
401 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
402 	BUG_ON(k.k);
403 
404 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
405 	BUG_ON(k.k);
406 
407 	bch2_trans_iter_exit(trans, &iter);
408 	bch2_trans_put(trans);
409 	return 0;
410 }
411 
412 /* extent unit tests */
413 
414 static u64 test_version;
415 
416 static int insert_test_extent(struct bch_fs *c,
417 			      u64 start, u64 end)
418 {
419 	struct bkey_i_cookie k;
420 	int ret;
421 
422 	bkey_cookie_init(&k.k_i);
423 	k.k_i.k.p.offset = end;
424 	k.k_i.k.p.snapshot = U32_MAX;
425 	k.k_i.k.size = end - start;
426 	k.k_i.k.version.lo = test_version++;
427 
428 	ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0);
429 	bch_err_fn(c, ret);
430 	return ret;
431 }
432 
433 static int __test_extent_overwrite(struct bch_fs *c,
434 				    u64 e1_start, u64 e1_end,
435 				    u64 e2_start, u64 e2_end)
436 {
437 	int ret;
438 
439 	ret   = insert_test_extent(c, e1_start, e1_end) ?:
440 		insert_test_extent(c, e2_start, e2_end);
441 
442 	delete_test_keys(c);
443 	return ret;
444 }
445 
446 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
447 {
448 	return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
449 		__test_extent_overwrite(c, 8, 64, 0, 32);
450 }
451 
452 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
453 {
454 	return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
455 		__test_extent_overwrite(c, 0, 64, 32, 72);
456 }
457 
458 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
459 {
460 	return __test_extent_overwrite(c, 0, 64, 32, 40);
461 }
462 
463 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
464 {
465 	return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
466 		__test_extent_overwrite(c, 32, 64,  0, 128) ?:
467 		__test_extent_overwrite(c, 32, 64, 32,  64) ?:
468 		__test_extent_overwrite(c, 32, 64, 32, 128);
469 }
470 
471 static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
472 {
473 	struct bkey_i_cookie k;
474 	int ret;
475 
476 	bkey_cookie_init(&k.k_i);
477 	k.k_i.k.p.inode	= inum;
478 	k.k_i.k.p.offset = start + len;
479 	k.k_i.k.p.snapshot = snapid;
480 	k.k_i.k.size = len;
481 
482 	ret = bch2_trans_do(c, NULL, NULL, 0,
483 		bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
484 					    BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
485 	bch_err_fn(c, ret);
486 	return ret;
487 }
488 
489 static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
490 {
491 	return  insert_test_overlapping_extent(c, inum,  0, 16, U32_MAX - 2) ?: /* overwrite entire */
492 		insert_test_overlapping_extent(c, inum,  2,  8, U32_MAX - 2) ?:
493 		insert_test_overlapping_extent(c, inum,  4,  4, U32_MAX) ?:
494 		insert_test_overlapping_extent(c, inum, 32,  8, U32_MAX - 2) ?: /* overwrite front/back */
495 		insert_test_overlapping_extent(c, inum, 36,  8, U32_MAX) ?:
496 		insert_test_overlapping_extent(c, inum, 60,  8, U32_MAX - 2) ?:
497 		insert_test_overlapping_extent(c, inum, 64,  8, U32_MAX);
498 }
499 
500 /* snapshot unit tests */
501 
502 /* Test skipping over keys in unrelated snapshots: */
503 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
504 {
505 	struct btree_trans *trans;
506 	struct btree_iter iter;
507 	struct bkey_s_c k;
508 	struct bkey_i_cookie cookie;
509 	int ret;
510 
511 	bkey_cookie_init(&cookie.k_i);
512 	cookie.k.p.snapshot = snapid_hi;
513 	ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
514 	if (ret)
515 		return ret;
516 
517 	trans = bch2_trans_get(c);
518 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
519 			     SPOS(0, 0, snapid_lo), 0);
520 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
521 
522 	BUG_ON(k.k->p.snapshot != U32_MAX);
523 
524 	bch2_trans_iter_exit(trans, &iter);
525 	bch2_trans_put(trans);
526 	return ret;
527 }
528 
529 static int test_snapshots(struct bch_fs *c, u64 nr)
530 {
531 	struct bkey_i_cookie cookie;
532 	u32 snapids[2];
533 	u32 snapid_subvols[2] = { 1, 1 };
534 	int ret;
535 
536 	bkey_cookie_init(&cookie.k_i);
537 	cookie.k.p.snapshot = U32_MAX;
538 	ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
539 	if (ret)
540 		return ret;
541 
542 	ret = bch2_trans_do(c, NULL, NULL, 0,
543 		      bch2_snapshot_node_create(trans, U32_MAX,
544 						snapids,
545 						snapid_subvols,
546 						2));
547 	if (ret)
548 		return ret;
549 
550 	if (snapids[0] > snapids[1])
551 		swap(snapids[0], snapids[1]);
552 
553 	ret = test_snapshot_filter(c, snapids[0], snapids[1]);
554 	bch_err_msg(c, ret, "from test_snapshot_filter");
555 	return ret;
556 }
557 
558 /* perf tests */
559 
560 static u64 test_rand(void)
561 {
562 	u64 v;
563 
564 	get_random_bytes(&v, sizeof(v));
565 	return v;
566 }
567 
568 static int rand_insert(struct bch_fs *c, u64 nr)
569 {
570 	struct btree_trans *trans = bch2_trans_get(c);
571 	struct bkey_i_cookie k;
572 	int ret = 0;
573 	u64 i;
574 
575 	for (i = 0; i < nr; i++) {
576 		bkey_cookie_init(&k.k_i);
577 		k.k.p.offset = test_rand();
578 		k.k.p.snapshot = U32_MAX;
579 
580 		ret = commit_do(trans, NULL, NULL, 0,
581 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
582 		if (ret)
583 			break;
584 	}
585 
586 	bch2_trans_put(trans);
587 	return ret;
588 }
589 
590 static int rand_insert_multi(struct bch_fs *c, u64 nr)
591 {
592 	struct btree_trans *trans = bch2_trans_get(c);
593 	struct bkey_i_cookie k[8];
594 	int ret = 0;
595 	unsigned j;
596 	u64 i;
597 
598 	for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
599 		for (j = 0; j < ARRAY_SIZE(k); j++) {
600 			bkey_cookie_init(&k[j].k_i);
601 			k[j].k.p.offset = test_rand();
602 			k[j].k.p.snapshot = U32_MAX;
603 		}
604 
605 		ret = commit_do(trans, NULL, NULL, 0,
606 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
607 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
608 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
609 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
610 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
611 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
612 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
613 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
614 		if (ret)
615 			break;
616 	}
617 
618 	bch2_trans_put(trans);
619 	return ret;
620 }
621 
622 static int rand_lookup(struct bch_fs *c, u64 nr)
623 {
624 	struct btree_trans *trans = bch2_trans_get(c);
625 	struct btree_iter iter;
626 	struct bkey_s_c k;
627 	int ret = 0;
628 	u64 i;
629 
630 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
631 			     SPOS(0, 0, U32_MAX), 0);
632 
633 	for (i = 0; i < nr; i++) {
634 		bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
635 
636 		lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
637 		ret = bkey_err(k);
638 		if (ret)
639 			break;
640 	}
641 
642 	bch2_trans_iter_exit(trans, &iter);
643 	bch2_trans_put(trans);
644 	return ret;
645 }
646 
647 static int rand_mixed_trans(struct btree_trans *trans,
648 			    struct btree_iter *iter,
649 			    struct bkey_i_cookie *cookie,
650 			    u64 i, u64 pos)
651 {
652 	struct bkey_s_c k;
653 	int ret;
654 
655 	bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
656 
657 	k = bch2_btree_iter_peek(iter);
658 	ret = bkey_err(k);
659 	bch_err_msg(trans->c, ret, "lookup error");
660 	if (ret)
661 		return ret;
662 
663 	if (!(i & 3) && k.k) {
664 		bkey_cookie_init(&cookie->k_i);
665 		cookie->k.p = iter->pos;
666 		ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
667 	}
668 
669 	return ret;
670 }
671 
672 static int rand_mixed(struct bch_fs *c, u64 nr)
673 {
674 	struct btree_trans *trans = bch2_trans_get(c);
675 	struct btree_iter iter;
676 	struct bkey_i_cookie cookie;
677 	int ret = 0;
678 	u64 i, rand;
679 
680 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
681 			     SPOS(0, 0, U32_MAX), 0);
682 
683 	for (i = 0; i < nr; i++) {
684 		rand = test_rand();
685 		ret = commit_do(trans, NULL, NULL, 0,
686 			rand_mixed_trans(trans, &iter, &cookie, i, rand));
687 		if (ret)
688 			break;
689 	}
690 
691 	bch2_trans_iter_exit(trans, &iter);
692 	bch2_trans_put(trans);
693 	return ret;
694 }
695 
696 static int __do_delete(struct btree_trans *trans, struct bpos pos)
697 {
698 	struct btree_iter iter;
699 	struct bkey_s_c k;
700 	int ret = 0;
701 
702 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
703 			     BTREE_ITER_INTENT);
704 	k = bch2_btree_iter_peek(&iter);
705 	ret = bkey_err(k);
706 	if (ret)
707 		goto err;
708 
709 	if (!k.k)
710 		goto err;
711 
712 	ret = bch2_btree_delete_at(trans, &iter, 0);
713 err:
714 	bch2_trans_iter_exit(trans, &iter);
715 	return ret;
716 }
717 
718 static int rand_delete(struct bch_fs *c, u64 nr)
719 {
720 	struct btree_trans *trans = bch2_trans_get(c);
721 	int ret = 0;
722 	u64 i;
723 
724 	for (i = 0; i < nr; i++) {
725 		struct bpos pos = SPOS(0, test_rand(), U32_MAX);
726 
727 		ret = commit_do(trans, NULL, NULL, 0,
728 			__do_delete(trans, pos));
729 		if (ret)
730 			break;
731 	}
732 
733 	bch2_trans_put(trans);
734 	return ret;
735 }
736 
737 static int seq_insert(struct bch_fs *c, u64 nr)
738 {
739 	struct btree_iter iter;
740 	struct bkey_s_c k;
741 	struct bkey_i_cookie insert;
742 
743 	bkey_cookie_init(&insert.k_i);
744 
745 	return bch2_trans_run(c,
746 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
747 					SPOS(0, 0, U32_MAX),
748 					BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
749 					NULL, NULL, 0, ({
750 			if (iter.pos.offset >= nr)
751 				break;
752 			insert.k.p = iter.pos;
753 			bch2_trans_update(trans, &iter, &insert.k_i, 0);
754 		})));
755 }
756 
757 static int seq_lookup(struct bch_fs *c, u64 nr)
758 {
759 	struct btree_iter iter;
760 	struct bkey_s_c k;
761 
762 	return bch2_trans_run(c,
763 		for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
764 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
765 				  0, k,
766 		0));
767 }
768 
769 static int seq_overwrite(struct bch_fs *c, u64 nr)
770 {
771 	struct btree_iter iter;
772 	struct bkey_s_c k;
773 
774 	return bch2_trans_run(c,
775 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
776 					SPOS(0, 0, U32_MAX),
777 					BTREE_ITER_INTENT, k,
778 					NULL, NULL, 0, ({
779 			struct bkey_i_cookie u;
780 
781 			bkey_reassemble(&u.k_i, k);
782 			bch2_trans_update(trans, &iter, &u.k_i, 0);
783 		})));
784 }
785 
786 static int seq_delete(struct bch_fs *c, u64 nr)
787 {
788 	return bch2_btree_delete_range(c, BTREE_ID_xattrs,
789 				      SPOS(0, 0, U32_MAX),
790 				      POS(0, U64_MAX),
791 				      0, NULL);
792 }
793 
794 typedef int (*perf_test_fn)(struct bch_fs *, u64);
795 
796 struct test_job {
797 	struct bch_fs			*c;
798 	u64				nr;
799 	unsigned			nr_threads;
800 	perf_test_fn			fn;
801 
802 	atomic_t			ready;
803 	wait_queue_head_t		ready_wait;
804 
805 	atomic_t			done;
806 	struct completion		done_completion;
807 
808 	u64				start;
809 	u64				finish;
810 	int				ret;
811 };
812 
813 static int btree_perf_test_thread(void *data)
814 {
815 	struct test_job *j = data;
816 	int ret;
817 
818 	if (atomic_dec_and_test(&j->ready)) {
819 		wake_up(&j->ready_wait);
820 		j->start = sched_clock();
821 	} else {
822 		wait_event(j->ready_wait, !atomic_read(&j->ready));
823 	}
824 
825 	ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
826 	if (ret) {
827 		bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
828 		j->ret = ret;
829 	}
830 
831 	if (atomic_dec_and_test(&j->done)) {
832 		j->finish = sched_clock();
833 		complete(&j->done_completion);
834 	}
835 
836 	return 0;
837 }
838 
839 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
840 			 u64 nr, unsigned nr_threads)
841 {
842 	struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
843 	char name_buf[20];
844 	struct printbuf nr_buf = PRINTBUF;
845 	struct printbuf per_sec_buf = PRINTBUF;
846 	unsigned i;
847 	u64 time;
848 
849 	atomic_set(&j.ready, nr_threads);
850 	init_waitqueue_head(&j.ready_wait);
851 
852 	atomic_set(&j.done, nr_threads);
853 	init_completion(&j.done_completion);
854 
855 #define perf_test(_test)				\
856 	if (!strcmp(testname, #_test)) j.fn = _test
857 
858 	perf_test(rand_insert);
859 	perf_test(rand_insert_multi);
860 	perf_test(rand_lookup);
861 	perf_test(rand_mixed);
862 	perf_test(rand_delete);
863 
864 	perf_test(seq_insert);
865 	perf_test(seq_lookup);
866 	perf_test(seq_overwrite);
867 	perf_test(seq_delete);
868 
869 	/* a unit test, not a perf test: */
870 	perf_test(test_delete);
871 	perf_test(test_delete_written);
872 	perf_test(test_iterate);
873 	perf_test(test_iterate_extents);
874 	perf_test(test_iterate_slots);
875 	perf_test(test_iterate_slots_extents);
876 	perf_test(test_peek_end);
877 	perf_test(test_peek_end_extents);
878 
879 	perf_test(test_extent_overwrite_front);
880 	perf_test(test_extent_overwrite_back);
881 	perf_test(test_extent_overwrite_middle);
882 	perf_test(test_extent_overwrite_all);
883 	perf_test(test_extent_create_overlapping);
884 
885 	perf_test(test_snapshots);
886 
887 	if (!j.fn) {
888 		pr_err("unknown test %s", testname);
889 		return -EINVAL;
890 	}
891 
892 	//pr_info("running test %s:", testname);
893 
894 	if (nr_threads == 1)
895 		btree_perf_test_thread(&j);
896 	else
897 		for (i = 0; i < nr_threads; i++)
898 			kthread_run(btree_perf_test_thread, &j,
899 				    "bcachefs perf test[%u]", i);
900 
901 	while (wait_for_completion_interruptible(&j.done_completion))
902 		;
903 
904 	time = j.finish - j.start;
905 
906 	scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
907 	prt_human_readable_u64(&nr_buf, nr);
908 	prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
909 	printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
910 		name_buf, nr_buf.buf, nr_threads,
911 		div_u64(time, NSEC_PER_SEC),
912 		div_u64(time * nr_threads, nr),
913 		per_sec_buf.buf);
914 	printbuf_exit(&per_sec_buf);
915 	printbuf_exit(&nr_buf);
916 	return j.ret;
917 }
918 
919 #endif /* CONFIG_BCACHEFS_TESTS */
920