xref: /linux/fs/bcachefs/debug.c (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Assorted bcachefs debug code
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_io.h"
13 #include "btree_iter.h"
14 #include "btree_locking.h"
15 #include "btree_update.h"
16 #include "buckets.h"
17 #include "debug.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "fsck.h"
21 #include "inode.h"
22 #include "super.h"
23 
24 #include <linux/console.h>
25 #include <linux/debugfs.h>
26 #include <linux/module.h>
27 #include <linux/random.h>
28 #include <linux/seq_file.h>
29 
30 static struct dentry *bch_debug;
31 
32 static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
33 				      struct extent_ptr_decoded pick)
34 {
35 	struct btree *v = c->verify_data;
36 	struct btree_node *n_ondisk = c->verify_ondisk;
37 	struct btree_node *n_sorted = c->verify_data->data;
38 	struct bset *sorted, *inmemory = &b->data->keys;
39 	struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
40 	struct bio *bio;
41 	bool failed = false, saw_error = false;
42 
43 	if (!bch2_dev_get_ioref(ca, READ))
44 		return false;
45 
46 	bio = bio_alloc_bioset(ca->disk_sb.bdev,
47 			       buf_pages(n_sorted, btree_buf_bytes(b)),
48 			       REQ_OP_READ|REQ_META,
49 			       GFP_NOFS,
50 			       &c->btree_bio);
51 	bio->bi_iter.bi_sector	= pick.ptr.offset;
52 	bch2_bio_map(bio, n_sorted, btree_buf_bytes(b));
53 
54 	submit_bio_wait(bio);
55 
56 	bio_put(bio);
57 	percpu_ref_put(&ca->io_ref);
58 
59 	memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
60 
61 	v->written = 0;
62 	if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
63 		return false;
64 
65 	n_sorted = c->verify_data->data;
66 	sorted = &n_sorted->keys;
67 
68 	if (inmemory->u64s != sorted->u64s ||
69 	    memcmp(inmemory->start,
70 		   sorted->start,
71 		   vstruct_end(inmemory) - (void *) inmemory->start)) {
72 		unsigned offset = 0, sectors;
73 		struct bset *i;
74 		unsigned j;
75 
76 		console_lock();
77 
78 		printk(KERN_ERR "*** in memory:\n");
79 		bch2_dump_bset(c, b, inmemory, 0);
80 
81 		printk(KERN_ERR "*** read back in:\n");
82 		bch2_dump_bset(c, v, sorted, 0);
83 
84 		while (offset < v->written) {
85 			if (!offset) {
86 				i = &n_ondisk->keys;
87 				sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
88 					c->block_bits;
89 			} else {
90 				struct btree_node_entry *bne =
91 					(void *) n_ondisk + (offset << 9);
92 				i = &bne->keys;
93 
94 				sectors = vstruct_blocks(bne, c->block_bits) <<
95 					c->block_bits;
96 			}
97 
98 			printk(KERN_ERR "*** on disk block %u:\n", offset);
99 			bch2_dump_bset(c, b, i, offset);
100 
101 			offset += sectors;
102 		}
103 
104 		for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
105 			if (inmemory->_data[j] != sorted->_data[j])
106 				break;
107 
108 		console_unlock();
109 		bch_err(c, "verify failed at key %u", j);
110 
111 		failed = true;
112 	}
113 
114 	if (v->written != b->written) {
115 		bch_err(c, "written wrong: expected %u, got %u",
116 			b->written, v->written);
117 		failed = true;
118 	}
119 
120 	return failed;
121 }
122 
123 void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
124 {
125 	struct bkey_ptrs_c ptrs;
126 	struct extent_ptr_decoded p;
127 	const union bch_extent_entry *entry;
128 	struct btree *v;
129 	struct bset *inmemory = &b->data->keys;
130 	struct bkey_packed *k;
131 	bool failed = false;
132 
133 	if (c->opts.nochanges)
134 		return;
135 
136 	bch2_btree_node_io_lock(b);
137 	mutex_lock(&c->verify_lock);
138 
139 	if (!c->verify_ondisk) {
140 		c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
141 		if (!c->verify_ondisk)
142 			goto out;
143 	}
144 
145 	if (!c->verify_data) {
146 		c->verify_data = __bch2_btree_node_mem_alloc(c);
147 		if (!c->verify_data)
148 			goto out;
149 
150 		list_del_init(&c->verify_data->list);
151 	}
152 
153 	BUG_ON(b->nsets != 1);
154 
155 	for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
156 		if (k->type == KEY_TYPE_btree_ptr_v2)
157 			((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0;
158 
159 	v = c->verify_data;
160 	bkey_copy(&v->key, &b->key);
161 	v->c.level	= b->c.level;
162 	v->c.btree_id	= b->c.btree_id;
163 	bch2_btree_keys_init(v);
164 
165 	ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
166 	bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry)
167 		failed |= bch2_btree_verify_replica(c, b, p);
168 
169 	if (failed) {
170 		struct printbuf buf = PRINTBUF;
171 
172 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
173 		bch2_fs_fatal_error(c, ": btree node verify failed for: %s\n", buf.buf);
174 		printbuf_exit(&buf);
175 	}
176 out:
177 	mutex_unlock(&c->verify_lock);
178 	bch2_btree_node_io_unlock(b);
179 }
180 
181 void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
182 				    const struct btree *b)
183 {
184 	struct btree_node *n_ondisk = NULL;
185 	struct extent_ptr_decoded pick;
186 	struct bch_dev *ca;
187 	struct bio *bio = NULL;
188 	unsigned offset = 0;
189 	int ret;
190 
191 	if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick) <= 0) {
192 		prt_printf(out, "error getting device to read from: invalid device\n");
193 		return;
194 	}
195 
196 	ca = bch_dev_bkey_exists(c, pick.ptr.dev);
197 	if (!bch2_dev_get_ioref(ca, READ)) {
198 		prt_printf(out, "error getting device to read from: not online\n");
199 		return;
200 	}
201 
202 	n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
203 	if (!n_ondisk) {
204 		prt_printf(out, "memory allocation failure\n");
205 		goto out;
206 	}
207 
208 	bio = bio_alloc_bioset(ca->disk_sb.bdev,
209 			       buf_pages(n_ondisk, btree_buf_bytes(b)),
210 			       REQ_OP_READ|REQ_META,
211 			       GFP_NOFS,
212 			       &c->btree_bio);
213 	bio->bi_iter.bi_sector	= pick.ptr.offset;
214 	bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b));
215 
216 	ret = submit_bio_wait(bio);
217 	if (ret) {
218 		prt_printf(out, "IO error reading btree node: %s\n", bch2_err_str(ret));
219 		goto out;
220 	}
221 
222 	while (offset < btree_sectors(c)) {
223 		struct bset *i;
224 		struct nonce nonce;
225 		struct bch_csum csum;
226 		struct bkey_packed *k;
227 		unsigned sectors;
228 
229 		if (!offset) {
230 			i = &n_ondisk->keys;
231 
232 			if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
233 				prt_printf(out, "unknown checksum type at offset %u: %llu\n",
234 					   offset, BSET_CSUM_TYPE(i));
235 				goto out;
236 			}
237 
238 			nonce = btree_nonce(i, offset << 9);
239 			csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, n_ondisk);
240 
241 			if (bch2_crc_cmp(csum, n_ondisk->csum)) {
242 				prt_printf(out, "invalid checksum\n");
243 				goto out;
244 			}
245 
246 			bset_encrypt(c, i, offset << 9);
247 
248 			sectors = vstruct_sectors(n_ondisk, c->block_bits);
249 		} else {
250 			struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9);
251 
252 			i = &bne->keys;
253 
254 			if (i->seq != n_ondisk->keys.seq)
255 				break;
256 
257 			if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
258 				prt_printf(out, "unknown checksum type at offset %u: %llu\n",
259 					   offset, BSET_CSUM_TYPE(i));
260 				goto out;
261 			}
262 
263 			nonce = btree_nonce(i, offset << 9);
264 			csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
265 
266 			if (bch2_crc_cmp(csum, bne->csum)) {
267 				prt_printf(out, "invalid checksum");
268 				goto out;
269 			}
270 
271 			bset_encrypt(c, i, offset << 9);
272 
273 			sectors = vstruct_sectors(bne, c->block_bits);
274 		}
275 
276 		prt_printf(out, "  offset %u version %u, journal seq %llu\n",
277 			   offset,
278 			   le16_to_cpu(i->version),
279 			   le64_to_cpu(i->journal_seq));
280 		offset += sectors;
281 
282 		printbuf_indent_add(out, 4);
283 
284 		for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
285 			struct bkey u;
286 
287 			bch2_bkey_val_to_text(out, c, bkey_disassemble(b, k, &u));
288 			prt_newline(out);
289 		}
290 
291 		printbuf_indent_sub(out, 4);
292 	}
293 out:
294 	if (bio)
295 		bio_put(bio);
296 	kvfree(n_ondisk);
297 	percpu_ref_put(&ca->io_ref);
298 }
299 
300 #ifdef CONFIG_DEBUG_FS
301 
302 /* XXX: bch_fs refcounting */
303 
304 struct dump_iter {
305 	struct bch_fs		*c;
306 	enum btree_id		id;
307 	struct bpos		from;
308 	struct bpos		prev_node;
309 	u64			iter;
310 
311 	struct printbuf		buf;
312 
313 	char __user		*ubuf;	/* destination user buffer */
314 	size_t			size;	/* size of requested read */
315 	ssize_t			ret;	/* bytes read so far */
316 };
317 
318 static ssize_t flush_buf(struct dump_iter *i)
319 {
320 	if (i->buf.pos) {
321 		size_t bytes = min_t(size_t, i->buf.pos, i->size);
322 		int copied = bytes - copy_to_user(i->ubuf, i->buf.buf, bytes);
323 
324 		i->ret	 += copied;
325 		i->ubuf	 += copied;
326 		i->size	 -= copied;
327 		i->buf.pos -= copied;
328 		memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos);
329 
330 		if (copied != bytes)
331 			return -EFAULT;
332 	}
333 
334 	return i->size ? 0 : i->ret;
335 }
336 
337 static int bch2_dump_open(struct inode *inode, struct file *file)
338 {
339 	struct btree_debug *bd = inode->i_private;
340 	struct dump_iter *i;
341 
342 	i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
343 	if (!i)
344 		return -ENOMEM;
345 
346 	file->private_data = i;
347 	i->from = POS_MIN;
348 	i->iter	= 0;
349 	i->c	= container_of(bd, struct bch_fs, btree_debug[bd->id]);
350 	i->id	= bd->id;
351 	i->buf	= PRINTBUF;
352 
353 	return 0;
354 }
355 
356 static int bch2_dump_release(struct inode *inode, struct file *file)
357 {
358 	struct dump_iter *i = file->private_data;
359 
360 	printbuf_exit(&i->buf);
361 	kfree(i);
362 	return 0;
363 }
364 
365 static ssize_t bch2_read_btree(struct file *file, char __user *buf,
366 			       size_t size, loff_t *ppos)
367 {
368 	struct dump_iter *i = file->private_data;
369 
370 	i->ubuf = buf;
371 	i->size	= size;
372 	i->ret	= 0;
373 
374 	return flush_buf(i) ?:
375 		bch2_trans_run(i->c,
376 			for_each_btree_key(trans, iter, i->id, i->from,
377 					   BTREE_ITER_PREFETCH|
378 					   BTREE_ITER_ALL_SNAPSHOTS, k, ({
379 				bch2_bkey_val_to_text(&i->buf, i->c, k);
380 				prt_newline(&i->buf);
381 				bch2_trans_unlock(trans);
382 				i->from = bpos_successor(iter.pos);
383 				flush_buf(i);
384 			}))) ?:
385 		i->ret;
386 }
387 
388 static const struct file_operations btree_debug_ops = {
389 	.owner		= THIS_MODULE,
390 	.open		= bch2_dump_open,
391 	.release	= bch2_dump_release,
392 	.read		= bch2_read_btree,
393 };
394 
395 static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
396 				       size_t size, loff_t *ppos)
397 {
398 	struct dump_iter *i = file->private_data;
399 	struct btree_trans *trans;
400 	struct btree_iter iter;
401 	struct btree *b;
402 	ssize_t ret;
403 
404 	i->ubuf = buf;
405 	i->size	= size;
406 	i->ret	= 0;
407 
408 	ret = flush_buf(i);
409 	if (ret)
410 		return ret;
411 
412 	if (bpos_eq(SPOS_MAX, i->from))
413 		return i->ret;
414 
415 	trans = bch2_trans_get(i->c);
416 retry:
417 	bch2_trans_begin(trans);
418 
419 	for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) {
420 		bch2_btree_node_to_text(&i->buf, i->c, b);
421 		i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
422 			? bpos_successor(b->key.k.p)
423 			: b->key.k.p;
424 
425 		ret = drop_locks_do(trans, flush_buf(i));
426 		if (ret)
427 			break;
428 	}
429 	bch2_trans_iter_exit(trans, &iter);
430 
431 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
432 		goto retry;
433 
434 	bch2_trans_put(trans);
435 
436 	if (!ret)
437 		ret = flush_buf(i);
438 
439 	return ret ?: i->ret;
440 }
441 
442 static const struct file_operations btree_format_debug_ops = {
443 	.owner		= THIS_MODULE,
444 	.open		= bch2_dump_open,
445 	.release	= bch2_dump_release,
446 	.read		= bch2_read_btree_formats,
447 };
448 
449 static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
450 				       size_t size, loff_t *ppos)
451 {
452 	struct dump_iter *i = file->private_data;
453 
454 	i->ubuf = buf;
455 	i->size	= size;
456 	i->ret	= 0;
457 
458 	return flush_buf(i) ?:
459 		bch2_trans_run(i->c,
460 			for_each_btree_key(trans, iter, i->id, i->from,
461 					   BTREE_ITER_PREFETCH|
462 					   BTREE_ITER_ALL_SNAPSHOTS, k, ({
463 				struct btree_path_level *l =
464 					&btree_iter_path(trans, &iter)->l[0];
465 				struct bkey_packed *_k =
466 					bch2_btree_node_iter_peek(&l->iter, l->b);
467 
468 				if (bpos_gt(l->b->key.k.p, i->prev_node)) {
469 					bch2_btree_node_to_text(&i->buf, i->c, l->b);
470 					i->prev_node = l->b->key.k.p;
471 				}
472 
473 				bch2_bfloat_to_text(&i->buf, l->b, _k);
474 				bch2_trans_unlock(trans);
475 				i->from = bpos_successor(iter.pos);
476 				flush_buf(i);
477 			}))) ?:
478 		i->ret;
479 }
480 
481 static const struct file_operations bfloat_failed_debug_ops = {
482 	.owner		= THIS_MODULE,
483 	.open		= bch2_dump_open,
484 	.release	= bch2_dump_release,
485 	.read		= bch2_read_bfloat_failed,
486 };
487 
488 static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
489 					   struct btree *b)
490 {
491 	if (!out->nr_tabstops)
492 		printbuf_tabstop_push(out, 32);
493 
494 	prt_printf(out, "%px btree=%s l=%u ",
495 	       b,
496 	       bch2_btree_id_str(b->c.btree_id),
497 	       b->c.level);
498 	prt_newline(out);
499 
500 	printbuf_indent_add(out, 2);
501 
502 	bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
503 	prt_newline(out);
504 
505 	prt_printf(out, "flags: ");
506 	prt_tab(out);
507 	prt_bitflags(out, bch2_btree_node_flags, b->flags);
508 	prt_newline(out);
509 
510 	prt_printf(out, "pcpu read locks: ");
511 	prt_tab(out);
512 	prt_printf(out, "%u", b->c.lock.readers != NULL);
513 	prt_newline(out);
514 
515 	prt_printf(out, "written:");
516 	prt_tab(out);
517 	prt_printf(out, "%u", b->written);
518 	prt_newline(out);
519 
520 	prt_printf(out, "writes blocked:");
521 	prt_tab(out);
522 	prt_printf(out, "%u", !list_empty_careful(&b->write_blocked));
523 	prt_newline(out);
524 
525 	prt_printf(out, "will make reachable:");
526 	prt_tab(out);
527 	prt_printf(out, "%lx", b->will_make_reachable);
528 	prt_newline(out);
529 
530 	prt_printf(out, "journal pin %px:", &b->writes[0].journal);
531 	prt_tab(out);
532 	prt_printf(out, "%llu", b->writes[0].journal.seq);
533 	prt_newline(out);
534 
535 	prt_printf(out, "journal pin %px:", &b->writes[1].journal);
536 	prt_tab(out);
537 	prt_printf(out, "%llu", b->writes[1].journal.seq);
538 	prt_newline(out);
539 
540 	printbuf_indent_sub(out, 2);
541 }
542 
543 static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
544 					    size_t size, loff_t *ppos)
545 {
546 	struct dump_iter *i = file->private_data;
547 	struct bch_fs *c = i->c;
548 	bool done = false;
549 	ssize_t ret = 0;
550 
551 	i->ubuf = buf;
552 	i->size	= size;
553 	i->ret	= 0;
554 
555 	do {
556 		struct bucket_table *tbl;
557 		struct rhash_head *pos;
558 		struct btree *b;
559 
560 		ret = flush_buf(i);
561 		if (ret)
562 			return ret;
563 
564 		rcu_read_lock();
565 		i->buf.atomic++;
566 		tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
567 					  &c->btree_cache.table);
568 		if (i->iter < tbl->size) {
569 			rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
570 				bch2_cached_btree_node_to_text(&i->buf, c, b);
571 			i->iter++;
572 		} else {
573 			done = true;
574 		}
575 		--i->buf.atomic;
576 		rcu_read_unlock();
577 	} while (!done);
578 
579 	if (i->buf.allocation_failure)
580 		ret = -ENOMEM;
581 
582 	if (!ret)
583 		ret = flush_buf(i);
584 
585 	return ret ?: i->ret;
586 }
587 
588 static const struct file_operations cached_btree_nodes_ops = {
589 	.owner		= THIS_MODULE,
590 	.open		= bch2_dump_open,
591 	.release	= bch2_dump_release,
592 	.read		= bch2_cached_btree_nodes_read,
593 };
594 
595 static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
596 					    size_t size, loff_t *ppos)
597 {
598 	struct dump_iter *i = file->private_data;
599 	struct bch_fs *c = i->c;
600 	struct btree_trans *trans;
601 	ssize_t ret = 0;
602 	u32 seq;
603 
604 	i->ubuf = buf;
605 	i->size	= size;
606 	i->ret	= 0;
607 restart:
608 	seqmutex_lock(&c->btree_trans_lock);
609 	list_for_each_entry(trans, &c->btree_trans_list, list) {
610 		struct task_struct *task = READ_ONCE(trans->locking_wait.task);
611 
612 		if (!task || task->pid <= i->iter)
613 			continue;
614 
615 		closure_get(&trans->ref);
616 		seq = seqmutex_seq(&c->btree_trans_lock);
617 		seqmutex_unlock(&c->btree_trans_lock);
618 
619 		ret = flush_buf(i);
620 		if (ret) {
621 			closure_put(&trans->ref);
622 			goto unlocked;
623 		}
624 
625 		bch2_btree_trans_to_text(&i->buf, trans);
626 
627 		prt_printf(&i->buf, "backtrace:");
628 		prt_newline(&i->buf);
629 		printbuf_indent_add(&i->buf, 2);
630 		bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
631 		printbuf_indent_sub(&i->buf, 2);
632 		prt_newline(&i->buf);
633 
634 		i->iter = task->pid;
635 
636 		closure_put(&trans->ref);
637 
638 		if (!seqmutex_relock(&c->btree_trans_lock, seq))
639 			goto restart;
640 	}
641 	seqmutex_unlock(&c->btree_trans_lock);
642 unlocked:
643 	if (i->buf.allocation_failure)
644 		ret = -ENOMEM;
645 
646 	if (!ret)
647 		ret = flush_buf(i);
648 
649 	return ret ?: i->ret;
650 }
651 
652 static const struct file_operations btree_transactions_ops = {
653 	.owner		= THIS_MODULE,
654 	.open		= bch2_dump_open,
655 	.release	= bch2_dump_release,
656 	.read		= bch2_btree_transactions_read,
657 };
658 
659 static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
660 				      size_t size, loff_t *ppos)
661 {
662 	struct dump_iter *i = file->private_data;
663 	struct bch_fs *c = i->c;
664 	bool done = false;
665 	int err;
666 
667 	i->ubuf = buf;
668 	i->size	= size;
669 	i->ret	= 0;
670 
671 	do {
672 		err = flush_buf(i);
673 		if (err)
674 			return err;
675 
676 		if (!i->size)
677 			break;
678 
679 		done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
680 		i->iter++;
681 	} while (!done);
682 
683 	if (i->buf.allocation_failure)
684 		return -ENOMEM;
685 
686 	return i->ret;
687 }
688 
689 static const struct file_operations journal_pins_ops = {
690 	.owner		= THIS_MODULE,
691 	.open		= bch2_dump_open,
692 	.release	= bch2_dump_release,
693 	.read		= bch2_journal_pins_read,
694 };
695 
696 static int btree_transaction_stats_open(struct inode *inode, struct file *file)
697 {
698 	struct bch_fs *c = inode->i_private;
699 	struct dump_iter *i;
700 
701 	i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
702 
703 	if (!i)
704 		return -ENOMEM;
705 
706 	i->iter = 1;
707 	i->c    = c;
708 	i->buf  = PRINTBUF;
709 	file->private_data = i;
710 
711 	return 0;
712 }
713 
714 static int btree_transaction_stats_release(struct inode *inode, struct file *file)
715 {
716 	struct dump_iter *i = file->private_data;
717 
718 	printbuf_exit(&i->buf);
719 	kfree(i);
720 
721 	return 0;
722 }
723 
724 static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
725 					    size_t size, loff_t *ppos)
726 {
727 	struct dump_iter        *i = file->private_data;
728 	struct bch_fs *c = i->c;
729 	int err;
730 
731 	i->ubuf = buf;
732 	i->size = size;
733 	i->ret  = 0;
734 
735 	while (1) {
736 		struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter];
737 
738 		err = flush_buf(i);
739 		if (err)
740 			return err;
741 
742 		if (!i->size)
743 			break;
744 
745 		if (i->iter == ARRAY_SIZE(bch2_btree_transaction_fns) ||
746 		    !bch2_btree_transaction_fns[i->iter])
747 			break;
748 
749 		prt_printf(&i->buf, "%s: ", bch2_btree_transaction_fns[i->iter]);
750 		prt_newline(&i->buf);
751 		printbuf_indent_add(&i->buf, 2);
752 
753 		mutex_lock(&s->lock);
754 
755 		prt_printf(&i->buf, "Max mem used: %u", s->max_mem);
756 		prt_newline(&i->buf);
757 
758 		prt_printf(&i->buf, "Transaction duration:");
759 		prt_newline(&i->buf);
760 
761 		printbuf_indent_add(&i->buf, 2);
762 		bch2_time_stats_to_text(&i->buf, &s->duration);
763 		printbuf_indent_sub(&i->buf, 2);
764 
765 		if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
766 			prt_printf(&i->buf, "Lock hold times:");
767 			prt_newline(&i->buf);
768 
769 			printbuf_indent_add(&i->buf, 2);
770 			bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
771 			printbuf_indent_sub(&i->buf, 2);
772 		}
773 
774 		if (s->max_paths_text) {
775 			prt_printf(&i->buf, "Maximum allocated btree paths (%u):", s->nr_max_paths);
776 			prt_newline(&i->buf);
777 
778 			printbuf_indent_add(&i->buf, 2);
779 			prt_str_indented(&i->buf, s->max_paths_text);
780 			printbuf_indent_sub(&i->buf, 2);
781 		}
782 
783 		mutex_unlock(&s->lock);
784 
785 		printbuf_indent_sub(&i->buf, 2);
786 		prt_newline(&i->buf);
787 		i->iter++;
788 	}
789 
790 	if (i->buf.allocation_failure)
791 		return -ENOMEM;
792 
793 	return i->ret;
794 }
795 
796 static const struct file_operations btree_transaction_stats_op = {
797 	.owner		= THIS_MODULE,
798 	.open		= btree_transaction_stats_open,
799 	.release	= btree_transaction_stats_release,
800 	.read		= btree_transaction_stats_read,
801 };
802 
803 static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
804 					    size_t size, loff_t *ppos)
805 {
806 	struct dump_iter *i = file->private_data;
807 	struct bch_fs *c = i->c;
808 	struct btree_trans *trans;
809 	ssize_t ret = 0;
810 	u32 seq;
811 
812 	i->ubuf = buf;
813 	i->size	= size;
814 	i->ret	= 0;
815 
816 	if (i->iter)
817 		goto out;
818 restart:
819 	seqmutex_lock(&c->btree_trans_lock);
820 	list_for_each_entry(trans, &c->btree_trans_list, list) {
821 		struct task_struct *task = READ_ONCE(trans->locking_wait.task);
822 
823 		if (!task || task->pid <= i->iter)
824 			continue;
825 
826 		closure_get(&trans->ref);
827 		seq = seqmutex_seq(&c->btree_trans_lock);
828 		seqmutex_unlock(&c->btree_trans_lock);
829 
830 		ret = flush_buf(i);
831 		if (ret) {
832 			closure_put(&trans->ref);
833 			goto out;
834 		}
835 
836 		bch2_check_for_deadlock(trans, &i->buf);
837 
838 		i->iter = task->pid;
839 
840 		closure_put(&trans->ref);
841 
842 		if (!seqmutex_relock(&c->btree_trans_lock, seq))
843 			goto restart;
844 	}
845 	seqmutex_unlock(&c->btree_trans_lock);
846 out:
847 	if (i->buf.allocation_failure)
848 		ret = -ENOMEM;
849 
850 	if (!ret)
851 		ret = flush_buf(i);
852 
853 	return ret ?: i->ret;
854 }
855 
856 static const struct file_operations btree_deadlock_ops = {
857 	.owner		= THIS_MODULE,
858 	.open		= bch2_dump_open,
859 	.release	= bch2_dump_release,
860 	.read		= bch2_btree_deadlock_read,
861 };
862 
863 void bch2_fs_debug_exit(struct bch_fs *c)
864 {
865 	if (!IS_ERR_OR_NULL(c->fs_debug_dir))
866 		debugfs_remove_recursive(c->fs_debug_dir);
867 }
868 
869 void bch2_fs_debug_init(struct bch_fs *c)
870 {
871 	struct btree_debug *bd;
872 	char name[100];
873 
874 	if (IS_ERR_OR_NULL(bch_debug))
875 		return;
876 
877 	snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
878 	c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
879 	if (IS_ERR_OR_NULL(c->fs_debug_dir))
880 		return;
881 
882 	debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
883 			    c->btree_debug, &cached_btree_nodes_ops);
884 
885 	debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir,
886 			    c->btree_debug, &btree_transactions_ops);
887 
888 	debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
889 			    c->btree_debug, &journal_pins_ops);
890 
891 	debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
892 			    c, &btree_transaction_stats_op);
893 
894 	debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
895 			    c->btree_debug, &btree_deadlock_ops);
896 
897 	c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
898 	if (IS_ERR_OR_NULL(c->btree_debug_dir))
899 		return;
900 
901 	for (bd = c->btree_debug;
902 	     bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
903 	     bd++) {
904 		bd->id = bd - c->btree_debug;
905 		debugfs_create_file(bch2_btree_id_str(bd->id),
906 				    0400, c->btree_debug_dir, bd,
907 				    &btree_debug_ops);
908 
909 		snprintf(name, sizeof(name), "%s-formats",
910 			 bch2_btree_id_str(bd->id));
911 
912 		debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
913 				    &btree_format_debug_ops);
914 
915 		snprintf(name, sizeof(name), "%s-bfloat-failed",
916 			 bch2_btree_id_str(bd->id));
917 
918 		debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
919 				    &bfloat_failed_debug_ops);
920 	}
921 }
922 
923 #endif
924 
925 void bch2_debug_exit(void)
926 {
927 	if (!IS_ERR_OR_NULL(bch_debug))
928 		debugfs_remove_recursive(bch_debug);
929 }
930 
931 int __init bch2_debug_init(void)
932 {
933 	bch_debug = debugfs_create_dir("bcachefs", NULL);
934 	return 0;
935 }
936