xref: /linux/fs/bcachefs/tests.c (revision eef0dc0bd432885b2bd4fc7f410ed039bf028e37)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
3 
4 #include "bcachefs.h"
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
7 #include "snapshot.h"
8 #include "tests.h"
9 
10 #include "linux/kthread.h"
11 #include "linux/random.h"
12 
delete_test_keys(struct bch_fs * c)13 static void delete_test_keys(struct bch_fs *c)
14 {
15 	int ret;
16 
17 	ret = bch2_btree_delete_range(c, BTREE_ID_extents,
18 				      SPOS(0, 0, U32_MAX),
19 				      POS(0, U64_MAX),
20 				      0, NULL);
21 	BUG_ON(ret);
22 
23 	ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
24 				      SPOS(0, 0, U32_MAX),
25 				      POS(0, U64_MAX),
26 				      0, NULL);
27 	BUG_ON(ret);
28 }
29 
30 /* unit tests */
31 
test_delete(struct bch_fs * c,u64 nr)32 static int test_delete(struct bch_fs *c, u64 nr)
33 {
34 	struct btree_trans *trans = bch2_trans_get(c);
35 	struct btree_iter iter;
36 	struct bkey_i_cookie k;
37 	int ret;
38 
39 	bkey_cookie_init(&k.k_i);
40 	k.k.p.snapshot = U32_MAX;
41 
42 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
43 			     BTREE_ITER_intent);
44 
45 	ret = commit_do(trans, NULL, NULL, 0,
46 		bch2_btree_iter_traverse(trans, &iter) ?:
47 		bch2_trans_update(trans, &iter, &k.k_i, 0));
48 	bch_err_msg(c, ret, "update error");
49 	if (ret)
50 		goto err;
51 
52 	pr_info("deleting once");
53 	ret = commit_do(trans, NULL, NULL, 0,
54 		bch2_btree_iter_traverse(trans, &iter) ?:
55 		bch2_btree_delete_at(trans, &iter, 0));
56 	bch_err_msg(c, ret, "delete error (first)");
57 	if (ret)
58 		goto err;
59 
60 	pr_info("deleting twice");
61 	ret = commit_do(trans, NULL, NULL, 0,
62 		bch2_btree_iter_traverse(trans, &iter) ?:
63 		bch2_btree_delete_at(trans, &iter, 0));
64 	bch_err_msg(c, ret, "delete error (second)");
65 	if (ret)
66 		goto err;
67 err:
68 	bch2_trans_iter_exit(trans, &iter);
69 	bch2_trans_put(trans);
70 	return ret;
71 }
72 
test_delete_written(struct bch_fs * c,u64 nr)73 static int test_delete_written(struct bch_fs *c, u64 nr)
74 {
75 	struct btree_trans *trans = bch2_trans_get(c);
76 	struct btree_iter iter;
77 	struct bkey_i_cookie k;
78 	int ret;
79 
80 	bkey_cookie_init(&k.k_i);
81 	k.k.p.snapshot = U32_MAX;
82 
83 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
84 			     BTREE_ITER_intent);
85 
86 	ret = commit_do(trans, NULL, NULL, 0,
87 		bch2_btree_iter_traverse(trans, &iter) ?:
88 		bch2_trans_update(trans, &iter, &k.k_i, 0));
89 	bch_err_msg(c, ret, "update error");
90 	if (ret)
91 		goto err;
92 
93 	bch2_trans_unlock(trans);
94 	bch2_journal_flush_all_pins(&c->journal);
95 
96 	ret = commit_do(trans, NULL, NULL, 0,
97 		bch2_btree_iter_traverse(trans, &iter) ?:
98 		bch2_btree_delete_at(trans, &iter, 0));
99 	bch_err_msg(c, ret, "delete error");
100 	if (ret)
101 		goto err;
102 err:
103 	bch2_trans_iter_exit(trans, &iter);
104 	bch2_trans_put(trans);
105 	return ret;
106 }
107 
test_iterate(struct bch_fs * c,u64 nr)108 static int test_iterate(struct bch_fs *c, u64 nr)
109 {
110 	u64 i;
111 	int ret = 0;
112 
113 	delete_test_keys(c);
114 
115 	pr_info("inserting test keys");
116 
117 	for (i = 0; i < nr; i++) {
118 		struct bkey_i_cookie ck;
119 
120 		bkey_cookie_init(&ck.k_i);
121 		ck.k.p.offset = i;
122 		ck.k.p.snapshot = U32_MAX;
123 
124 		ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
125 		bch_err_msg(c, ret, "insert error");
126 		if (ret)
127 			return ret;
128 	}
129 
130 	pr_info("iterating forwards");
131 	i = 0;
132 
133 	ret = bch2_trans_run(c,
134 		for_each_btree_key_max(trans, iter, BTREE_ID_xattrs,
135 					SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
136 					0, k, ({
137 			BUG_ON(k.k->p.offset != i++);
138 			0;
139 		})));
140 	bch_err_msg(c, ret, "error iterating forwards");
141 	if (ret)
142 		return ret;
143 
144 	BUG_ON(i != nr);
145 
146 	pr_info("iterating backwards");
147 
148 	ret = bch2_trans_run(c,
149 		for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
150 				SPOS(0, U64_MAX, U32_MAX), 0, k, ({
151 			BUG_ON(k.k->p.offset != --i);
152 			0;
153 		})));
154 	bch_err_msg(c, ret, "error iterating backwards");
155 	if (ret)
156 		return ret;
157 
158 	BUG_ON(i);
159 	return 0;
160 }
161 
test_iterate_extents(struct bch_fs * c,u64 nr)162 static int test_iterate_extents(struct bch_fs *c, u64 nr)
163 {
164 	u64 i;
165 	int ret = 0;
166 
167 	delete_test_keys(c);
168 
169 	pr_info("inserting test extents");
170 
171 	for (i = 0; i < nr; i += 8) {
172 		struct bkey_i_cookie ck;
173 
174 		bkey_cookie_init(&ck.k_i);
175 		ck.k.p.offset = i + 8;
176 		ck.k.p.snapshot = U32_MAX;
177 		ck.k.size = 8;
178 
179 		ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
180 		bch_err_msg(c, ret, "insert error");
181 		if (ret)
182 			return ret;
183 	}
184 
185 	pr_info("iterating forwards");
186 	i = 0;
187 
188 	ret = bch2_trans_run(c,
189 		for_each_btree_key_max(trans, iter, BTREE_ID_extents,
190 					SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
191 					0, k, ({
192 			BUG_ON(bkey_start_offset(k.k) != i);
193 			i = k.k->p.offset;
194 			0;
195 		})));
196 	bch_err_msg(c, ret, "error iterating forwards");
197 	if (ret)
198 		return ret;
199 
200 	BUG_ON(i != nr);
201 
202 	pr_info("iterating backwards");
203 
204 	ret = bch2_trans_run(c,
205 		for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
206 				SPOS(0, U64_MAX, U32_MAX), 0, k, ({
207 			BUG_ON(k.k->p.offset != i);
208 			i = bkey_start_offset(k.k);
209 			0;
210 		})));
211 	bch_err_msg(c, ret, "error iterating backwards");
212 	if (ret)
213 		return ret;
214 
215 	BUG_ON(i);
216 	return 0;
217 }
218 
test_iterate_slots(struct bch_fs * c,u64 nr)219 static int test_iterate_slots(struct bch_fs *c, u64 nr)
220 {
221 	u64 i;
222 	int ret = 0;
223 
224 	delete_test_keys(c);
225 
226 	pr_info("inserting test keys");
227 
228 	for (i = 0; i < nr; i++) {
229 		struct bkey_i_cookie ck;
230 
231 		bkey_cookie_init(&ck.k_i);
232 		ck.k.p.offset = i * 2;
233 		ck.k.p.snapshot = U32_MAX;
234 
235 		ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0, 0);
236 		bch_err_msg(c, ret, "insert error");
237 		if (ret)
238 			return ret;
239 	}
240 
241 	pr_info("iterating forwards");
242 	i = 0;
243 
244 	ret = bch2_trans_run(c,
245 		for_each_btree_key_max(trans, iter, BTREE_ID_xattrs,
246 					  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
247 					  0, k, ({
248 			BUG_ON(k.k->p.offset != i);
249 			i += 2;
250 			0;
251 		})));
252 	bch_err_msg(c, ret, "error iterating forwards");
253 	if (ret)
254 		return ret;
255 
256 	BUG_ON(i != nr * 2);
257 
258 	pr_info("iterating forwards by slots");
259 	i = 0;
260 
261 	ret = bch2_trans_run(c,
262 		for_each_btree_key_max(trans, iter, BTREE_ID_xattrs,
263 					SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
264 					BTREE_ITER_slots, k, ({
265 			if (i >= nr * 2)
266 				break;
267 
268 			BUG_ON(k.k->p.offset != i);
269 			BUG_ON(bkey_deleted(k.k) != (i & 1));
270 
271 			i++;
272 			0;
273 		})));
274 	bch_err_msg(c, ret, "error iterating forwards by slots");
275 	return ret;
276 }
277 
test_iterate_slots_extents(struct bch_fs * c,u64 nr)278 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
279 {
280 	u64 i;
281 	int ret = 0;
282 
283 	delete_test_keys(c);
284 
285 	pr_info("inserting test keys");
286 
287 	for (i = 0; i < nr; i += 16) {
288 		struct bkey_i_cookie ck;
289 
290 		bkey_cookie_init(&ck.k_i);
291 		ck.k.p.offset = i + 16;
292 		ck.k.p.snapshot = U32_MAX;
293 		ck.k.size = 8;
294 
295 		ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0, 0);
296 		bch_err_msg(c, ret, "insert error");
297 		if (ret)
298 			return ret;
299 	}
300 
301 	pr_info("iterating forwards");
302 	i = 0;
303 
304 	ret = bch2_trans_run(c,
305 		for_each_btree_key_max(trans, iter, BTREE_ID_extents,
306 					SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
307 					0, k, ({
308 			BUG_ON(bkey_start_offset(k.k) != i + 8);
309 			BUG_ON(k.k->size != 8);
310 			i += 16;
311 			0;
312 		})));
313 	bch_err_msg(c, ret, "error iterating forwards");
314 	if (ret)
315 		return ret;
316 
317 	BUG_ON(i != nr);
318 
319 	pr_info("iterating forwards by slots");
320 	i = 0;
321 
322 	ret = bch2_trans_run(c,
323 		for_each_btree_key_max(trans, iter, BTREE_ID_extents,
324 					SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
325 					BTREE_ITER_slots, k, ({
326 			if (i == nr)
327 				break;
328 			BUG_ON(bkey_deleted(k.k) != !(i % 16));
329 
330 			BUG_ON(bkey_start_offset(k.k) != i);
331 			BUG_ON(k.k->size != 8);
332 			i = k.k->p.offset;
333 			0;
334 		})));
335 	bch_err_msg(c, ret, "error iterating forwards by slots");
336 	return ret;
337 }
338 
339 /*
340  * XXX: we really want to make sure we've got a btree with depth > 0 for these
341  * tests
342  */
test_peek_end(struct bch_fs * c,u64 nr)343 static int test_peek_end(struct bch_fs *c, u64 nr)
344 {
345 	delete_test_keys(c);
346 
347 	struct btree_trans *trans = bch2_trans_get(c);
348 	struct btree_iter iter;
349 	struct bkey_s_c k;
350 
351 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
352 			     SPOS(0, 0, U32_MAX), 0);
353 
354 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
355 	BUG_ON(k.k);
356 
357 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
358 	BUG_ON(k.k);
359 
360 	bch2_trans_iter_exit(trans, &iter);
361 	bch2_trans_put(trans);
362 	return 0;
363 }
364 
test_peek_end_extents(struct bch_fs * c,u64 nr)365 static int test_peek_end_extents(struct bch_fs *c, u64 nr)
366 {
367 	delete_test_keys(c);
368 
369 	struct btree_trans *trans = bch2_trans_get(c);
370 	struct btree_iter iter;
371 	struct bkey_s_c k;
372 
373 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
374 			     SPOS(0, 0, U32_MAX), 0);
375 
376 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
377 	BUG_ON(k.k);
378 
379 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
380 	BUG_ON(k.k);
381 
382 	bch2_trans_iter_exit(trans, &iter);
383 	bch2_trans_put(trans);
384 	return 0;
385 }
386 
387 /* extent unit tests */
388 
389 static u64 test_version;
390 
insert_test_extent(struct bch_fs * c,u64 start,u64 end)391 static int insert_test_extent(struct bch_fs *c,
392 			      u64 start, u64 end)
393 {
394 	struct bkey_i_cookie k;
395 	int ret;
396 
397 	bkey_cookie_init(&k.k_i);
398 	k.k_i.k.p.offset = end;
399 	k.k_i.k.p.snapshot = U32_MAX;
400 	k.k_i.k.size = end - start;
401 	k.k_i.k.bversion.lo = test_version++;
402 
403 	ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
404 	bch_err_fn(c, ret);
405 	return ret;
406 }
407 
__test_extent_overwrite(struct bch_fs * c,u64 e1_start,u64 e1_end,u64 e2_start,u64 e2_end)408 static int __test_extent_overwrite(struct bch_fs *c,
409 				    u64 e1_start, u64 e1_end,
410 				    u64 e2_start, u64 e2_end)
411 {
412 	int ret;
413 
414 	ret   = insert_test_extent(c, e1_start, e1_end) ?:
415 		insert_test_extent(c, e2_start, e2_end);
416 
417 	delete_test_keys(c);
418 	return ret;
419 }
420 
test_extent_overwrite_front(struct bch_fs * c,u64 nr)421 static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
422 {
423 	return  __test_extent_overwrite(c, 0, 64, 0, 32) ?:
424 		__test_extent_overwrite(c, 8, 64, 0, 32);
425 }
426 
test_extent_overwrite_back(struct bch_fs * c,u64 nr)427 static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
428 {
429 	return  __test_extent_overwrite(c, 0, 64, 32, 64) ?:
430 		__test_extent_overwrite(c, 0, 64, 32, 72);
431 }
432 
test_extent_overwrite_middle(struct bch_fs * c,u64 nr)433 static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
434 {
435 	return __test_extent_overwrite(c, 0, 64, 32, 40);
436 }
437 
test_extent_overwrite_all(struct bch_fs * c,u64 nr)438 static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
439 {
440 	return  __test_extent_overwrite(c, 32, 64,  0,  64) ?:
441 		__test_extent_overwrite(c, 32, 64,  0, 128) ?:
442 		__test_extent_overwrite(c, 32, 64, 32,  64) ?:
443 		__test_extent_overwrite(c, 32, 64, 32, 128);
444 }
445 
insert_test_overlapping_extent(struct bch_fs * c,u64 inum,u64 start,u32 len,u32 snapid)446 static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
447 {
448 	struct bkey_i_cookie k;
449 	int ret;
450 
451 	bkey_cookie_init(&k.k_i);
452 	k.k_i.k.p.inode	= inum;
453 	k.k_i.k.p.offset = start + len;
454 	k.k_i.k.p.snapshot = snapid;
455 	k.k_i.k.size = len;
456 
457 	ret = bch2_trans_commit_do(c, NULL, NULL, 0,
458 		bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
459 					    BTREE_UPDATE_internal_snapshot_node));
460 	bch_err_fn(c, ret);
461 	return ret;
462 }
463 
test_extent_create_overlapping(struct bch_fs * c,u64 inum)464 static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
465 {
466 	return  insert_test_overlapping_extent(c, inum,  0, 16, U32_MAX - 2) ?: /* overwrite entire */
467 		insert_test_overlapping_extent(c, inum,  2,  8, U32_MAX - 2) ?:
468 		insert_test_overlapping_extent(c, inum,  4,  4, U32_MAX) ?:
469 		insert_test_overlapping_extent(c, inum, 32,  8, U32_MAX - 2) ?: /* overwrite front/back */
470 		insert_test_overlapping_extent(c, inum, 36,  8, U32_MAX) ?:
471 		insert_test_overlapping_extent(c, inum, 60,  8, U32_MAX - 2) ?:
472 		insert_test_overlapping_extent(c, inum, 64,  8, U32_MAX);
473 }
474 
475 /* snapshot unit tests */
476 
477 /* Test skipping over keys in unrelated snapshots: */
test_snapshot_filter(struct bch_fs * c,u32 snapid_lo,u32 snapid_hi)478 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
479 {
480 	struct btree_trans *trans;
481 	struct btree_iter iter;
482 	struct bkey_s_c k;
483 	struct bkey_i_cookie cookie;
484 	int ret;
485 
486 	bkey_cookie_init(&cookie.k_i);
487 	cookie.k.p.snapshot = snapid_hi;
488 	ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
489 	if (ret)
490 		return ret;
491 
492 	trans = bch2_trans_get(c);
493 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
494 			     SPOS(0, 0, snapid_lo), 0);
495 	lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
496 
497 	BUG_ON(k.k->p.snapshot != U32_MAX);
498 
499 	bch2_trans_iter_exit(trans, &iter);
500 	bch2_trans_put(trans);
501 	return ret;
502 }
503 
test_snapshots(struct bch_fs * c,u64 nr)504 static int test_snapshots(struct bch_fs *c, u64 nr)
505 {
506 	struct bkey_i_cookie cookie;
507 	u32 snapids[2];
508 	u32 snapid_subvols[2] = { 1, 1 };
509 	int ret;
510 
511 	bkey_cookie_init(&cookie.k_i);
512 	cookie.k.p.snapshot = U32_MAX;
513 	ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
514 	if (ret)
515 		return ret;
516 
517 	ret = bch2_trans_commit_do(c, NULL, NULL, 0,
518 		      bch2_snapshot_node_create(trans, U32_MAX,
519 						snapids,
520 						snapid_subvols,
521 						2));
522 	if (ret)
523 		return ret;
524 
525 	if (snapids[0] > snapids[1])
526 		swap(snapids[0], snapids[1]);
527 
528 	ret = test_snapshot_filter(c, snapids[0], snapids[1]);
529 	bch_err_msg(c, ret, "from test_snapshot_filter");
530 	return ret;
531 }
532 
533 /* perf tests */
534 
test_rand(void)535 static u64 test_rand(void)
536 {
537 	u64 v;
538 
539 	get_random_bytes(&v, sizeof(v));
540 	return v;
541 }
542 
rand_insert(struct bch_fs * c,u64 nr)543 static int rand_insert(struct bch_fs *c, u64 nr)
544 {
545 	struct btree_trans *trans = bch2_trans_get(c);
546 	struct bkey_i_cookie k;
547 	int ret = 0;
548 	u64 i;
549 
550 	for (i = 0; i < nr; i++) {
551 		bkey_cookie_init(&k.k_i);
552 		k.k.p.offset = test_rand();
553 		k.k.p.snapshot = U32_MAX;
554 
555 		ret = commit_do(trans, NULL, NULL, 0,
556 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
557 		if (ret)
558 			break;
559 	}
560 
561 	bch2_trans_put(trans);
562 	return ret;
563 }
564 
rand_insert_multi(struct bch_fs * c,u64 nr)565 static int rand_insert_multi(struct bch_fs *c, u64 nr)
566 {
567 	struct btree_trans *trans = bch2_trans_get(c);
568 	struct bkey_i_cookie k[8];
569 	int ret = 0;
570 	unsigned j;
571 	u64 i;
572 
573 	for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
574 		for (j = 0; j < ARRAY_SIZE(k); j++) {
575 			bkey_cookie_init(&k[j].k_i);
576 			k[j].k.p.offset = test_rand();
577 			k[j].k.p.snapshot = U32_MAX;
578 		}
579 
580 		ret = commit_do(trans, NULL, NULL, 0,
581 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
582 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
583 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
584 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
585 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
586 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
587 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
588 			bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
589 		if (ret)
590 			break;
591 	}
592 
593 	bch2_trans_put(trans);
594 	return ret;
595 }
596 
rand_lookup(struct bch_fs * c,u64 nr)597 static int rand_lookup(struct bch_fs *c, u64 nr)
598 {
599 	struct btree_trans *trans = bch2_trans_get(c);
600 	struct btree_iter iter;
601 	struct bkey_s_c k;
602 	int ret = 0;
603 	u64 i;
604 
605 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
606 			     SPOS(0, 0, U32_MAX), 0);
607 
608 	for (i = 0; i < nr; i++) {
609 		bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX));
610 
611 		lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
612 		ret = bkey_err(k);
613 		if (ret)
614 			break;
615 	}
616 
617 	bch2_trans_iter_exit(trans, &iter);
618 	bch2_trans_put(trans);
619 	return ret;
620 }
621 
rand_mixed_trans(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i_cookie * cookie,u64 i,u64 pos)622 static int rand_mixed_trans(struct btree_trans *trans,
623 			    struct btree_iter *iter,
624 			    struct bkey_i_cookie *cookie,
625 			    u64 i, u64 pos)
626 {
627 	struct bkey_s_c k;
628 	int ret;
629 
630 	bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX));
631 
632 	k = bch2_btree_iter_peek(trans, iter);
633 	ret = bkey_err(k);
634 	bch_err_msg(trans->c, ret, "lookup error");
635 	if (ret)
636 		return ret;
637 
638 	if (!(i & 3) && k.k) {
639 		bkey_cookie_init(&cookie->k_i);
640 		cookie->k.p = iter->pos;
641 		ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
642 	}
643 
644 	return ret;
645 }
646 
rand_mixed(struct bch_fs * c,u64 nr)647 static int rand_mixed(struct bch_fs *c, u64 nr)
648 {
649 	struct btree_trans *trans = bch2_trans_get(c);
650 	struct btree_iter iter;
651 	struct bkey_i_cookie cookie;
652 	int ret = 0;
653 	u64 i, rand;
654 
655 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
656 			     SPOS(0, 0, U32_MAX), 0);
657 
658 	for (i = 0; i < nr; i++) {
659 		rand = test_rand();
660 		ret = commit_do(trans, NULL, NULL, 0,
661 			rand_mixed_trans(trans, &iter, &cookie, i, rand));
662 		if (ret)
663 			break;
664 	}
665 
666 	bch2_trans_iter_exit(trans, &iter);
667 	bch2_trans_put(trans);
668 	return ret;
669 }
670 
__do_delete(struct btree_trans * trans,struct bpos pos)671 static int __do_delete(struct btree_trans *trans, struct bpos pos)
672 {
673 	struct btree_iter iter;
674 	struct bkey_s_c k;
675 	int ret = 0;
676 
677 	bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
678 			     BTREE_ITER_intent);
679 	k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX));
680 	ret = bkey_err(k);
681 	if (ret)
682 		goto err;
683 
684 	if (!k.k)
685 		goto err;
686 
687 	ret = bch2_btree_delete_at(trans, &iter, 0);
688 err:
689 	bch2_trans_iter_exit(trans, &iter);
690 	return ret;
691 }
692 
rand_delete(struct bch_fs * c,u64 nr)693 static int rand_delete(struct bch_fs *c, u64 nr)
694 {
695 	struct btree_trans *trans = bch2_trans_get(c);
696 	int ret = 0;
697 	u64 i;
698 
699 	for (i = 0; i < nr; i++) {
700 		struct bpos pos = SPOS(0, test_rand(), U32_MAX);
701 
702 		ret = commit_do(trans, NULL, NULL, 0,
703 			__do_delete(trans, pos));
704 		if (ret)
705 			break;
706 	}
707 
708 	bch2_trans_put(trans);
709 	return ret;
710 }
711 
seq_insert(struct bch_fs * c,u64 nr)712 static int seq_insert(struct bch_fs *c, u64 nr)
713 {
714 	struct bkey_i_cookie insert;
715 
716 	bkey_cookie_init(&insert.k_i);
717 
718 	return bch2_trans_run(c,
719 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
720 					SPOS(0, 0, U32_MAX),
721 					BTREE_ITER_slots|BTREE_ITER_intent, k,
722 					NULL, NULL, 0, ({
723 			if (iter.pos.offset >= nr)
724 				break;
725 			insert.k.p = iter.pos;
726 			bch2_trans_update(trans, &iter, &insert.k_i, 0);
727 		})));
728 }
729 
seq_lookup(struct bch_fs * c,u64 nr)730 static int seq_lookup(struct bch_fs *c, u64 nr)
731 {
732 	return bch2_trans_run(c,
733 		for_each_btree_key_max(trans, iter, BTREE_ID_xattrs,
734 				  SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
735 				  0, k,
736 		0));
737 }
738 
seq_overwrite(struct bch_fs * c,u64 nr)739 static int seq_overwrite(struct bch_fs *c, u64 nr)
740 {
741 	return bch2_trans_run(c,
742 		for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
743 					SPOS(0, 0, U32_MAX),
744 					BTREE_ITER_intent, k,
745 					NULL, NULL, 0, ({
746 			struct bkey_i_cookie u;
747 
748 			bkey_reassemble(&u.k_i, k);
749 			bch2_trans_update(trans, &iter, &u.k_i, 0);
750 		})));
751 }
752 
seq_delete(struct bch_fs * c,u64 nr)753 static int seq_delete(struct bch_fs *c, u64 nr)
754 {
755 	return bch2_btree_delete_range(c, BTREE_ID_xattrs,
756 				      SPOS(0, 0, U32_MAX),
757 				      POS(0, U64_MAX),
758 				      0, NULL);
759 }
760 
761 typedef int (*perf_test_fn)(struct bch_fs *, u64);
762 
763 struct test_job {
764 	struct bch_fs			*c;
765 	u64				nr;
766 	unsigned			nr_threads;
767 	perf_test_fn			fn;
768 
769 	atomic_t			ready;
770 	wait_queue_head_t		ready_wait;
771 
772 	atomic_t			done;
773 	struct completion		done_completion;
774 
775 	u64				start;
776 	u64				finish;
777 	int				ret;
778 };
779 
btree_perf_test_thread(void * data)780 static int btree_perf_test_thread(void *data)
781 {
782 	struct test_job *j = data;
783 	int ret;
784 
785 	if (atomic_dec_and_test(&j->ready)) {
786 		wake_up(&j->ready_wait);
787 		j->start = sched_clock();
788 	} else {
789 		wait_event(j->ready_wait, !atomic_read(&j->ready));
790 	}
791 
792 	ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
793 	if (ret) {
794 		bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
795 		j->ret = ret;
796 	}
797 
798 	if (atomic_dec_and_test(&j->done)) {
799 		j->finish = sched_clock();
800 		complete(&j->done_completion);
801 	}
802 
803 	return 0;
804 }
805 
bch2_btree_perf_test(struct bch_fs * c,const char * testname,u64 nr,unsigned nr_threads)806 int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
807 			 u64 nr, unsigned nr_threads)
808 {
809 	struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
810 	char name_buf[20];
811 	struct printbuf nr_buf = PRINTBUF;
812 	struct printbuf per_sec_buf = PRINTBUF;
813 	unsigned i;
814 	u64 time;
815 
816 	if (nr == 0 || nr_threads == 0) {
817 		pr_err("nr of iterations or threads is not allowed to be 0");
818 		return -EINVAL;
819 	}
820 
821 	atomic_set(&j.ready, nr_threads);
822 	init_waitqueue_head(&j.ready_wait);
823 
824 	atomic_set(&j.done, nr_threads);
825 	init_completion(&j.done_completion);
826 
827 #define perf_test(_test)				\
828 	if (!strcmp(testname, #_test)) j.fn = _test
829 
830 	perf_test(rand_insert);
831 	perf_test(rand_insert_multi);
832 	perf_test(rand_lookup);
833 	perf_test(rand_mixed);
834 	perf_test(rand_delete);
835 
836 	perf_test(seq_insert);
837 	perf_test(seq_lookup);
838 	perf_test(seq_overwrite);
839 	perf_test(seq_delete);
840 
841 	/* a unit test, not a perf test: */
842 	perf_test(test_delete);
843 	perf_test(test_delete_written);
844 	perf_test(test_iterate);
845 	perf_test(test_iterate_extents);
846 	perf_test(test_iterate_slots);
847 	perf_test(test_iterate_slots_extents);
848 	perf_test(test_peek_end);
849 	perf_test(test_peek_end_extents);
850 
851 	perf_test(test_extent_overwrite_front);
852 	perf_test(test_extent_overwrite_back);
853 	perf_test(test_extent_overwrite_middle);
854 	perf_test(test_extent_overwrite_all);
855 	perf_test(test_extent_create_overlapping);
856 
857 	perf_test(test_snapshots);
858 
859 	if (!j.fn) {
860 		pr_err("unknown test %s", testname);
861 		return -EINVAL;
862 	}
863 
864 	//pr_info("running test %s:", testname);
865 
866 	if (nr_threads == 1)
867 		btree_perf_test_thread(&j);
868 	else
869 		for (i = 0; i < nr_threads; i++)
870 			kthread_run(btree_perf_test_thread, &j,
871 				    "bcachefs perf test[%u]", i);
872 
873 	while (wait_for_completion_interruptible(&j.done_completion))
874 		;
875 
876 	time = j.finish - j.start;
877 
878 	scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
879 	prt_human_readable_u64(&nr_buf, nr);
880 	prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
881 	printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
882 		name_buf, nr_buf.buf, nr_threads,
883 		div_u64(time, NSEC_PER_SEC),
884 		div_u64(time * nr_threads, nr),
885 		per_sec_buf.buf);
886 	printbuf_exit(&per_sec_buf);
887 	printbuf_exit(&nr_buf);
888 	return j.ret;
889 }
890 
891 #endif /* CONFIG_BCACHEFS_TESTS */
892