xref: /linux/fs/btrfs/ref-verify.c (revision 2decec48b0fd28ffdbf4cc684bd04e735f0839dd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2014 Facebook.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include "ctree.h"
9 #include "disk-io.h"
10 #include "locking.h"
11 #include "delayed-ref.h"
12 #include "ref-verify.h"
13 
14 /*
15  * Used to keep track the roots and number of refs each root has for a given
16  * bytenr.  This just tracks the number of direct references, no shared
17  * references.
18  */
19 struct root_entry {
20 	u64 root_objectid;
21 	u64 num_refs;
22 	struct rb_node node;
23 };
24 
25 /*
26  * These are meant to represent what should exist in the extent tree, these can
27  * be used to verify the extent tree is consistent as these should all match
28  * what the extent tree says.
29  */
30 struct ref_entry {
31 	u64 root_objectid;
32 	u64 parent;
33 	u64 owner;
34 	u64 offset;
35 	u64 num_refs;
36 	struct rb_node node;
37 };
38 
39 #define MAX_TRACE	16
40 
41 /*
42  * Whenever we add/remove a reference we record the action.  The action maps
43  * back to the delayed ref action.  We hold the ref we are changing in the
44  * action so we can account for the history properly, and we record the root we
45  * were called with since it could be different from ref_root.  We also store
46  * stack traces because that's how I roll.
47  */
48 struct ref_action {
49 	int action;
50 	u64 root;
51 	struct ref_entry ref;
52 	struct list_head list;
53 	unsigned long trace[MAX_TRACE];
54 	unsigned int trace_len;
55 };
56 
57 /*
58  * One of these for every block we reference, it holds the roots and references
59  * to it as well as all of the ref actions that have occurred to it.  We never
60  * free it until we unmount the file system in order to make sure re-allocations
61  * are happening properly.
62  */
63 struct block_entry {
64 	u64 bytenr;
65 	u64 len;
66 	u64 num_refs;
67 	int metadata;
68 	int from_disk;
69 	struct rb_root roots;
70 	struct rb_root refs;
71 	struct rb_node node;
72 	struct list_head actions;
73 };
74 
75 static struct block_entry *insert_block_entry(struct rb_root *root,
76 					      struct block_entry *be)
77 {
78 	struct rb_node **p = &root->rb_node;
79 	struct rb_node *parent_node = NULL;
80 	struct block_entry *entry;
81 
82 	while (*p) {
83 		parent_node = *p;
84 		entry = rb_entry(parent_node, struct block_entry, node);
85 		if (entry->bytenr > be->bytenr)
86 			p = &(*p)->rb_left;
87 		else if (entry->bytenr < be->bytenr)
88 			p = &(*p)->rb_right;
89 		else
90 			return entry;
91 	}
92 
93 	rb_link_node(&be->node, parent_node, p);
94 	rb_insert_color(&be->node, root);
95 	return NULL;
96 }
97 
98 static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
99 {
100 	struct rb_node *n;
101 	struct block_entry *entry = NULL;
102 
103 	n = root->rb_node;
104 	while (n) {
105 		entry = rb_entry(n, struct block_entry, node);
106 		if (entry->bytenr < bytenr)
107 			n = n->rb_right;
108 		else if (entry->bytenr > bytenr)
109 			n = n->rb_left;
110 		else
111 			return entry;
112 	}
113 	return NULL;
114 }
115 
116 static struct root_entry *insert_root_entry(struct rb_root *root,
117 					    struct root_entry *re)
118 {
119 	struct rb_node **p = &root->rb_node;
120 	struct rb_node *parent_node = NULL;
121 	struct root_entry *entry;
122 
123 	while (*p) {
124 		parent_node = *p;
125 		entry = rb_entry(parent_node, struct root_entry, node);
126 		if (entry->root_objectid > re->root_objectid)
127 			p = &(*p)->rb_left;
128 		else if (entry->root_objectid < re->root_objectid)
129 			p = &(*p)->rb_right;
130 		else
131 			return entry;
132 	}
133 
134 	rb_link_node(&re->node, parent_node, p);
135 	rb_insert_color(&re->node, root);
136 	return NULL;
137 
138 }
139 
140 static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
141 {
142 	if (ref1->root_objectid < ref2->root_objectid)
143 		return -1;
144 	if (ref1->root_objectid > ref2->root_objectid)
145 		return 1;
146 	if (ref1->parent < ref2->parent)
147 		return -1;
148 	if (ref1->parent > ref2->parent)
149 		return 1;
150 	if (ref1->owner < ref2->owner)
151 		return -1;
152 	if (ref1->owner > ref2->owner)
153 		return 1;
154 	if (ref1->offset < ref2->offset)
155 		return -1;
156 	if (ref1->offset > ref2->offset)
157 		return 1;
158 	return 0;
159 }
160 
161 static struct ref_entry *insert_ref_entry(struct rb_root *root,
162 					  struct ref_entry *ref)
163 {
164 	struct rb_node **p = &root->rb_node;
165 	struct rb_node *parent_node = NULL;
166 	struct ref_entry *entry;
167 	int cmp;
168 
169 	while (*p) {
170 		parent_node = *p;
171 		entry = rb_entry(parent_node, struct ref_entry, node);
172 		cmp = comp_refs(entry, ref);
173 		if (cmp > 0)
174 			p = &(*p)->rb_left;
175 		else if (cmp < 0)
176 			p = &(*p)->rb_right;
177 		else
178 			return entry;
179 	}
180 
181 	rb_link_node(&ref->node, parent_node, p);
182 	rb_insert_color(&ref->node, root);
183 	return NULL;
184 
185 }
186 
187 static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
188 {
189 	struct rb_node *n;
190 	struct root_entry *entry = NULL;
191 
192 	n = root->rb_node;
193 	while (n) {
194 		entry = rb_entry(n, struct root_entry, node);
195 		if (entry->root_objectid < objectid)
196 			n = n->rb_right;
197 		else if (entry->root_objectid > objectid)
198 			n = n->rb_left;
199 		else
200 			return entry;
201 	}
202 	return NULL;
203 }
204 
205 #ifdef CONFIG_STACKTRACE
206 static void __save_stack_trace(struct ref_action *ra)
207 {
208 	ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
209 }
210 
211 static void __print_stack_trace(struct btrfs_fs_info *fs_info,
212 				struct ref_action *ra)
213 {
214 	if (ra->trace_len == 0) {
215 		btrfs_err(fs_info, "  ref-verify: no stacktrace");
216 		return;
217 	}
218 	stack_trace_print(ra->trace, ra->trace_len, 2);
219 }
220 #else
221 static void inline __save_stack_trace(struct ref_action *ra)
222 {
223 }
224 
225 static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
226 				       struct ref_action *ra)
227 {
228 	btrfs_err(fs_info, "  ref-verify: no stacktrace support");
229 }
230 #endif
231 
232 static void free_block_entry(struct block_entry *be)
233 {
234 	struct root_entry *re;
235 	struct ref_entry *ref;
236 	struct ref_action *ra;
237 	struct rb_node *n;
238 
239 	while ((n = rb_first(&be->roots))) {
240 		re = rb_entry(n, struct root_entry, node);
241 		rb_erase(&re->node, &be->roots);
242 		kfree(re);
243 	}
244 
245 	while((n = rb_first(&be->refs))) {
246 		ref = rb_entry(n, struct ref_entry, node);
247 		rb_erase(&ref->node, &be->refs);
248 		kfree(ref);
249 	}
250 
251 	while (!list_empty(&be->actions)) {
252 		ra = list_first_entry(&be->actions, struct ref_action,
253 				      list);
254 		list_del(&ra->list);
255 		kfree(ra);
256 	}
257 	kfree(be);
258 }
259 
260 static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
261 					   u64 bytenr, u64 len,
262 					   u64 root_objectid)
263 {
264 	struct block_entry *be = NULL, *exist;
265 	struct root_entry *re = NULL;
266 
267 	re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
268 	be = kzalloc(sizeof(struct block_entry), GFP_KERNEL);
269 	if (!be || !re) {
270 		kfree(re);
271 		kfree(be);
272 		return ERR_PTR(-ENOMEM);
273 	}
274 	be->bytenr = bytenr;
275 	be->len = len;
276 
277 	re->root_objectid = root_objectid;
278 	re->num_refs = 0;
279 
280 	spin_lock(&fs_info->ref_verify_lock);
281 	exist = insert_block_entry(&fs_info->block_tree, be);
282 	if (exist) {
283 		if (root_objectid) {
284 			struct root_entry *exist_re;
285 
286 			exist_re = insert_root_entry(&exist->roots, re);
287 			if (exist_re)
288 				kfree(re);
289 		}
290 		kfree(be);
291 		return exist;
292 	}
293 
294 	be->num_refs = 0;
295 	be->metadata = 0;
296 	be->from_disk = 0;
297 	be->roots = RB_ROOT;
298 	be->refs = RB_ROOT;
299 	INIT_LIST_HEAD(&be->actions);
300 	if (root_objectid)
301 		insert_root_entry(&be->roots, re);
302 	else
303 		kfree(re);
304 	return be;
305 }
306 
307 static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
308 			  u64 parent, u64 bytenr, int level)
309 {
310 	struct block_entry *be;
311 	struct root_entry *re;
312 	struct ref_entry *ref = NULL, *exist;
313 
314 	ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL);
315 	if (!ref)
316 		return -ENOMEM;
317 
318 	if (parent)
319 		ref->root_objectid = 0;
320 	else
321 		ref->root_objectid = ref_root;
322 	ref->parent = parent;
323 	ref->owner = level;
324 	ref->offset = 0;
325 	ref->num_refs = 1;
326 
327 	be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root);
328 	if (IS_ERR(be)) {
329 		kfree(ref);
330 		return PTR_ERR(be);
331 	}
332 	be->num_refs++;
333 	be->from_disk = 1;
334 	be->metadata = 1;
335 
336 	if (!parent) {
337 		ASSERT(ref_root);
338 		re = lookup_root_entry(&be->roots, ref_root);
339 		ASSERT(re);
340 		re->num_refs++;
341 	}
342 	exist = insert_ref_entry(&be->refs, ref);
343 	if (exist) {
344 		exist->num_refs++;
345 		kfree(ref);
346 	}
347 	spin_unlock(&fs_info->ref_verify_lock);
348 
349 	return 0;
350 }
351 
352 static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
353 			       u64 parent, u32 num_refs, u64 bytenr,
354 			       u64 num_bytes)
355 {
356 	struct block_entry *be;
357 	struct ref_entry *ref;
358 
359 	ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
360 	if (!ref)
361 		return -ENOMEM;
362 	be = add_block_entry(fs_info, bytenr, num_bytes, 0);
363 	if (IS_ERR(be)) {
364 		kfree(ref);
365 		return PTR_ERR(be);
366 	}
367 	be->num_refs += num_refs;
368 
369 	ref->parent = parent;
370 	ref->num_refs = num_refs;
371 	if (insert_ref_entry(&be->refs, ref)) {
372 		spin_unlock(&fs_info->ref_verify_lock);
373 		btrfs_err(fs_info, "existing shared ref when reading from disk?");
374 		kfree(ref);
375 		return -EINVAL;
376 	}
377 	spin_unlock(&fs_info->ref_verify_lock);
378 	return 0;
379 }
380 
381 static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
382 			       struct extent_buffer *leaf,
383 			       struct btrfs_extent_data_ref *dref,
384 			       u64 bytenr, u64 num_bytes)
385 {
386 	struct block_entry *be;
387 	struct ref_entry *ref;
388 	struct root_entry *re;
389 	u64 ref_root = btrfs_extent_data_ref_root(leaf, dref);
390 	u64 owner = btrfs_extent_data_ref_objectid(leaf, dref);
391 	u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
392 	u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
393 
394 	ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
395 	if (!ref)
396 		return -ENOMEM;
397 	be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
398 	if (IS_ERR(be)) {
399 		kfree(ref);
400 		return PTR_ERR(be);
401 	}
402 	be->num_refs += num_refs;
403 
404 	ref->parent = 0;
405 	ref->owner = owner;
406 	ref->root_objectid = ref_root;
407 	ref->offset = offset;
408 	ref->num_refs = num_refs;
409 	if (insert_ref_entry(&be->refs, ref)) {
410 		spin_unlock(&fs_info->ref_verify_lock);
411 		btrfs_err(fs_info, "existing ref when reading from disk?");
412 		kfree(ref);
413 		return -EINVAL;
414 	}
415 
416 	re = lookup_root_entry(&be->roots, ref_root);
417 	if (!re) {
418 		spin_unlock(&fs_info->ref_verify_lock);
419 		btrfs_err(fs_info, "missing root in new block entry?");
420 		return -EINVAL;
421 	}
422 	re->num_refs += num_refs;
423 	spin_unlock(&fs_info->ref_verify_lock);
424 	return 0;
425 }
426 
427 static int process_extent_item(struct btrfs_fs_info *fs_info,
428 			       struct btrfs_path *path, struct btrfs_key *key,
429 			       int slot, int *tree_block_level)
430 {
431 	struct btrfs_extent_item *ei;
432 	struct btrfs_extent_inline_ref *iref;
433 	struct btrfs_extent_data_ref *dref;
434 	struct btrfs_shared_data_ref *sref;
435 	struct extent_buffer *leaf = path->nodes[0];
436 	u32 item_size = btrfs_item_size_nr(leaf, slot);
437 	unsigned long end, ptr;
438 	u64 offset, flags, count;
439 	int type, ret;
440 
441 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
442 	flags = btrfs_extent_flags(leaf, ei);
443 
444 	if ((key->type == BTRFS_EXTENT_ITEM_KEY) &&
445 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
446 		struct btrfs_tree_block_info *info;
447 
448 		info = (struct btrfs_tree_block_info *)(ei + 1);
449 		*tree_block_level = btrfs_tree_block_level(leaf, info);
450 		iref = (struct btrfs_extent_inline_ref *)(info + 1);
451 	} else {
452 		if (key->type == BTRFS_METADATA_ITEM_KEY)
453 			*tree_block_level = key->offset;
454 		iref = (struct btrfs_extent_inline_ref *)(ei + 1);
455 	}
456 
457 	ptr = (unsigned long)iref;
458 	end = (unsigned long)ei + item_size;
459 	while (ptr < end) {
460 		iref = (struct btrfs_extent_inline_ref *)ptr;
461 		type = btrfs_extent_inline_ref_type(leaf, iref);
462 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
463 		switch (type) {
464 		case BTRFS_TREE_BLOCK_REF_KEY:
465 			ret = add_tree_block(fs_info, offset, 0, key->objectid,
466 					     *tree_block_level);
467 			break;
468 		case BTRFS_SHARED_BLOCK_REF_KEY:
469 			ret = add_tree_block(fs_info, 0, offset, key->objectid,
470 					     *tree_block_level);
471 			break;
472 		case BTRFS_EXTENT_DATA_REF_KEY:
473 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
474 			ret = add_extent_data_ref(fs_info, leaf, dref,
475 						  key->objectid, key->offset);
476 			break;
477 		case BTRFS_SHARED_DATA_REF_KEY:
478 			sref = (struct btrfs_shared_data_ref *)(iref + 1);
479 			count = btrfs_shared_data_ref_count(leaf, sref);
480 			ret = add_shared_data_ref(fs_info, offset, count,
481 						  key->objectid, key->offset);
482 			break;
483 		default:
484 			btrfs_err(fs_info, "invalid key type in iref");
485 			ret = -EINVAL;
486 			break;
487 		}
488 		if (ret)
489 			break;
490 		ptr += btrfs_extent_inline_ref_size(type);
491 	}
492 	return ret;
493 }
494 
495 static int process_leaf(struct btrfs_root *root,
496 			struct btrfs_path *path, u64 *bytenr, u64 *num_bytes)
497 {
498 	struct btrfs_fs_info *fs_info = root->fs_info;
499 	struct extent_buffer *leaf = path->nodes[0];
500 	struct btrfs_extent_data_ref *dref;
501 	struct btrfs_shared_data_ref *sref;
502 	u32 count;
503 	int i = 0, tree_block_level = 0, ret;
504 	struct btrfs_key key;
505 	int nritems = btrfs_header_nritems(leaf);
506 
507 	for (i = 0; i < nritems; i++) {
508 		btrfs_item_key_to_cpu(leaf, &key, i);
509 		switch (key.type) {
510 		case BTRFS_EXTENT_ITEM_KEY:
511 			*num_bytes = key.offset;
512 		case BTRFS_METADATA_ITEM_KEY:
513 			*bytenr = key.objectid;
514 			ret = process_extent_item(fs_info, path, &key, i,
515 						  &tree_block_level);
516 			break;
517 		case BTRFS_TREE_BLOCK_REF_KEY:
518 			ret = add_tree_block(fs_info, key.offset, 0,
519 					     key.objectid, tree_block_level);
520 			break;
521 		case BTRFS_SHARED_BLOCK_REF_KEY:
522 			ret = add_tree_block(fs_info, 0, key.offset,
523 					     key.objectid, tree_block_level);
524 			break;
525 		case BTRFS_EXTENT_DATA_REF_KEY:
526 			dref = btrfs_item_ptr(leaf, i,
527 					      struct btrfs_extent_data_ref);
528 			ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr,
529 						  *num_bytes);
530 			break;
531 		case BTRFS_SHARED_DATA_REF_KEY:
532 			sref = btrfs_item_ptr(leaf, i,
533 					      struct btrfs_shared_data_ref);
534 			count = btrfs_shared_data_ref_count(leaf, sref);
535 			ret = add_shared_data_ref(fs_info, key.offset, count,
536 						  *bytenr, *num_bytes);
537 			break;
538 		default:
539 			break;
540 		}
541 		if (ret)
542 			break;
543 	}
544 	return ret;
545 }
546 
547 /* Walk down to the leaf from the given level */
548 static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
549 			  int level, u64 *bytenr, u64 *num_bytes)
550 {
551 	struct btrfs_fs_info *fs_info = root->fs_info;
552 	struct extent_buffer *eb;
553 	u64 block_bytenr, gen;
554 	int ret = 0;
555 
556 	while (level >= 0) {
557 		if (level) {
558 			struct btrfs_key first_key;
559 
560 			block_bytenr = btrfs_node_blockptr(path->nodes[level],
561 							   path->slots[level]);
562 			gen = btrfs_node_ptr_generation(path->nodes[level],
563 							path->slots[level]);
564 			btrfs_node_key_to_cpu(path->nodes[level], &first_key,
565 					      path->slots[level]);
566 			eb = read_tree_block(fs_info, block_bytenr, gen,
567 					     level - 1, &first_key);
568 			if (IS_ERR(eb))
569 				return PTR_ERR(eb);
570 			if (!extent_buffer_uptodate(eb)) {
571 				free_extent_buffer(eb);
572 				return -EIO;
573 			}
574 			btrfs_tree_read_lock(eb);
575 			btrfs_set_lock_blocking_read(eb);
576 			path->nodes[level-1] = eb;
577 			path->slots[level-1] = 0;
578 			path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
579 		} else {
580 			ret = process_leaf(root, path, bytenr, num_bytes);
581 			if (ret)
582 				break;
583 		}
584 		level--;
585 	}
586 	return ret;
587 }
588 
589 /* Walk up to the next node that needs to be processed */
590 static int walk_up_tree(struct btrfs_path *path, int *level)
591 {
592 	int l;
593 
594 	for (l = 0; l < BTRFS_MAX_LEVEL; l++) {
595 		if (!path->nodes[l])
596 			continue;
597 		if (l) {
598 			path->slots[l]++;
599 			if (path->slots[l] <
600 			    btrfs_header_nritems(path->nodes[l])) {
601 				*level = l;
602 				return 0;
603 			}
604 		}
605 		btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]);
606 		free_extent_buffer(path->nodes[l]);
607 		path->nodes[l] = NULL;
608 		path->slots[l] = 0;
609 		path->locks[l] = 0;
610 	}
611 
612 	return 1;
613 }
614 
615 static void dump_ref_action(struct btrfs_fs_info *fs_info,
616 			    struct ref_action *ra)
617 {
618 	btrfs_err(fs_info,
619 "  Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
620 		  ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent,
621 		  ra->ref.owner, ra->ref.offset, ra->ref.num_refs);
622 	__print_stack_trace(fs_info, ra);
623 }
624 
625 /*
626  * Dumps all the information from the block entry to printk, it's going to be
627  * awesome.
628  */
629 static void dump_block_entry(struct btrfs_fs_info *fs_info,
630 			     struct block_entry *be)
631 {
632 	struct ref_entry *ref;
633 	struct root_entry *re;
634 	struct ref_action *ra;
635 	struct rb_node *n;
636 
637 	btrfs_err(fs_info,
638 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
639 		  be->bytenr, be->len, be->num_refs, be->metadata,
640 		  be->from_disk);
641 
642 	for (n = rb_first(&be->refs); n; n = rb_next(n)) {
643 		ref = rb_entry(n, struct ref_entry, node);
644 		btrfs_err(fs_info,
645 "  ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
646 			  ref->root_objectid, ref->parent, ref->owner,
647 			  ref->offset, ref->num_refs);
648 	}
649 
650 	for (n = rb_first(&be->roots); n; n = rb_next(n)) {
651 		re = rb_entry(n, struct root_entry, node);
652 		btrfs_err(fs_info, "  root entry %llu, num_refs %llu",
653 			  re->root_objectid, re->num_refs);
654 	}
655 
656 	list_for_each_entry(ra, &be->actions, list)
657 		dump_ref_action(fs_info, ra);
658 }
659 
660 /*
661  * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
662  * @root: the root we are making this modification from.
663  * @bytenr: the bytenr we are modifying.
664  * @num_bytes: number of bytes.
665  * @parent: the parent bytenr.
666  * @ref_root: the original root owner of the bytenr.
667  * @owner: level in the case of metadata, inode in the case of data.
668  * @offset: 0 for metadata, file offset for data.
669  * @action: the action that we are doing, this is the same as the delayed ref
670  *	action.
671  *
672  * This will add an action item to the given bytenr and do sanity checks to make
673  * sure we haven't messed something up.  If we are making a new allocation and
674  * this block entry has history we will delete all previous actions as long as
675  * our sanity checks pass as they are no longer needed.
676  */
677 int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
678 		       u64 parent, u64 ref_root, u64 owner, u64 offset,
679 		       int action)
680 {
681 	struct btrfs_fs_info *fs_info = root->fs_info;
682 	struct ref_entry *ref = NULL, *exist;
683 	struct ref_action *ra = NULL;
684 	struct block_entry *be = NULL;
685 	struct root_entry *re = NULL;
686 	int ret = 0;
687 	bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
688 
689 	if (!btrfs_test_opt(root->fs_info, REF_VERIFY))
690 		return 0;
691 
692 	ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
693 	ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
694 	if (!ra || !ref) {
695 		kfree(ref);
696 		kfree(ra);
697 		ret = -ENOMEM;
698 		goto out;
699 	}
700 
701 	if (parent) {
702 		ref->parent = parent;
703 	} else {
704 		ref->root_objectid = ref_root;
705 		ref->owner = owner;
706 		ref->offset = offset;
707 	}
708 	ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
709 
710 	memcpy(&ra->ref, ref, sizeof(struct ref_entry));
711 	/*
712 	 * Save the extra info from the delayed ref in the ref action to make it
713 	 * easier to figure out what is happening.  The real ref's we add to the
714 	 * ref tree need to reflect what we save on disk so it matches any
715 	 * on-disk refs we pre-loaded.
716 	 */
717 	ra->ref.owner = owner;
718 	ra->ref.offset = offset;
719 	ra->ref.root_objectid = ref_root;
720 	__save_stack_trace(ra);
721 
722 	INIT_LIST_HEAD(&ra->list);
723 	ra->action = action;
724 	ra->root = root->root_key.objectid;
725 
726 	/*
727 	 * This is an allocation, preallocate the block_entry in case we haven't
728 	 * used it before.
729 	 */
730 	ret = -EINVAL;
731 	if (action == BTRFS_ADD_DELAYED_EXTENT) {
732 		/*
733 		 * For subvol_create we'll just pass in whatever the parent root
734 		 * is and the new root objectid, so let's not treat the passed
735 		 * in root as if it really has a ref for this bytenr.
736 		 */
737 		be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root);
738 		if (IS_ERR(be)) {
739 			kfree(ra);
740 			ret = PTR_ERR(be);
741 			goto out;
742 		}
743 		be->num_refs++;
744 		if (metadata)
745 			be->metadata = 1;
746 
747 		if (be->num_refs != 1) {
748 			btrfs_err(fs_info,
749 			"re-allocated a block that still has references to it!");
750 			dump_block_entry(fs_info, be);
751 			dump_ref_action(fs_info, ra);
752 			goto out_unlock;
753 		}
754 
755 		while (!list_empty(&be->actions)) {
756 			struct ref_action *tmp;
757 
758 			tmp = list_first_entry(&be->actions, struct ref_action,
759 					       list);
760 			list_del(&tmp->list);
761 			kfree(tmp);
762 		}
763 	} else {
764 		struct root_entry *tmp;
765 
766 		if (!parent) {
767 			re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
768 			if (!re) {
769 				kfree(ref);
770 				kfree(ra);
771 				ret = -ENOMEM;
772 				goto out;
773 			}
774 			/*
775 			 * This is the root that is modifying us, so it's the
776 			 * one we want to lookup below when we modify the
777 			 * re->num_refs.
778 			 */
779 			ref_root = root->root_key.objectid;
780 			re->root_objectid = root->root_key.objectid;
781 			re->num_refs = 0;
782 		}
783 
784 		spin_lock(&root->fs_info->ref_verify_lock);
785 		be = lookup_block_entry(&root->fs_info->block_tree, bytenr);
786 		if (!be) {
787 			btrfs_err(fs_info,
788 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
789 				  action, (unsigned long long)bytenr,
790 				  (unsigned long long)num_bytes);
791 			dump_ref_action(fs_info, ra);
792 			kfree(ref);
793 			kfree(ra);
794 			goto out_unlock;
795 		}
796 
797 		if (!parent) {
798 			tmp = insert_root_entry(&be->roots, re);
799 			if (tmp) {
800 				kfree(re);
801 				re = tmp;
802 			}
803 		}
804 	}
805 
806 	exist = insert_ref_entry(&be->refs, ref);
807 	if (exist) {
808 		if (action == BTRFS_DROP_DELAYED_REF) {
809 			if (exist->num_refs == 0) {
810 				btrfs_err(fs_info,
811 "dropping a ref for a existing root that doesn't have a ref on the block");
812 				dump_block_entry(fs_info, be);
813 				dump_ref_action(fs_info, ra);
814 				kfree(ra);
815 				goto out_unlock;
816 			}
817 			exist->num_refs--;
818 			if (exist->num_refs == 0) {
819 				rb_erase(&exist->node, &be->refs);
820 				kfree(exist);
821 			}
822 		} else if (!be->metadata) {
823 			exist->num_refs++;
824 		} else {
825 			btrfs_err(fs_info,
826 "attempting to add another ref for an existing ref on a tree block");
827 			dump_block_entry(fs_info, be);
828 			dump_ref_action(fs_info, ra);
829 			kfree(ra);
830 			goto out_unlock;
831 		}
832 		kfree(ref);
833 	} else {
834 		if (action == BTRFS_DROP_DELAYED_REF) {
835 			btrfs_err(fs_info,
836 "dropping a ref for a root that doesn't have a ref on the block");
837 			dump_block_entry(fs_info, be);
838 			dump_ref_action(fs_info, ra);
839 			kfree(ra);
840 			goto out_unlock;
841 		}
842 	}
843 
844 	if (!parent && !re) {
845 		re = lookup_root_entry(&be->roots, ref_root);
846 		if (!re) {
847 			/*
848 			 * This shouldn't happen because we will add our re
849 			 * above when we lookup the be with !parent, but just in
850 			 * case catch this case so we don't panic because I
851 			 * didn't think of some other corner case.
852 			 */
853 			btrfs_err(fs_info, "failed to find root %llu for %llu",
854 				  root->root_key.objectid, be->bytenr);
855 			dump_block_entry(fs_info, be);
856 			dump_ref_action(fs_info, ra);
857 			kfree(ra);
858 			goto out_unlock;
859 		}
860 	}
861 	if (action == BTRFS_DROP_DELAYED_REF) {
862 		if (re)
863 			re->num_refs--;
864 		be->num_refs--;
865 	} else if (action == BTRFS_ADD_DELAYED_REF) {
866 		be->num_refs++;
867 		if (re)
868 			re->num_refs++;
869 	}
870 	list_add_tail(&ra->list, &be->actions);
871 	ret = 0;
872 out_unlock:
873 	spin_unlock(&root->fs_info->ref_verify_lock);
874 out:
875 	if (ret)
876 		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
877 	return ret;
878 }
879 
880 /* Free up the ref cache */
881 void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
882 {
883 	struct block_entry *be;
884 	struct rb_node *n;
885 
886 	if (!btrfs_test_opt(fs_info, REF_VERIFY))
887 		return;
888 
889 	spin_lock(&fs_info->ref_verify_lock);
890 	while ((n = rb_first(&fs_info->block_tree))) {
891 		be = rb_entry(n, struct block_entry, node);
892 		rb_erase(&be->node, &fs_info->block_tree);
893 		free_block_entry(be);
894 		cond_resched_lock(&fs_info->ref_verify_lock);
895 	}
896 	spin_unlock(&fs_info->ref_verify_lock);
897 }
898 
899 void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
900 			       u64 len)
901 {
902 	struct block_entry *be = NULL, *entry;
903 	struct rb_node *n;
904 
905 	if (!btrfs_test_opt(fs_info, REF_VERIFY))
906 		return;
907 
908 	spin_lock(&fs_info->ref_verify_lock);
909 	n = fs_info->block_tree.rb_node;
910 	while (n) {
911 		entry = rb_entry(n, struct block_entry, node);
912 		if (entry->bytenr < start) {
913 			n = n->rb_right;
914 		} else if (entry->bytenr > start) {
915 			n = n->rb_left;
916 		} else {
917 			be = entry;
918 			break;
919 		}
920 		/* We want to get as close to start as possible */
921 		if (be == NULL ||
922 		    (entry->bytenr < start && be->bytenr > start) ||
923 		    (entry->bytenr < start && entry->bytenr > be->bytenr))
924 			be = entry;
925 	}
926 
927 	/*
928 	 * Could have an empty block group, maybe have something to check for
929 	 * this case to verify we were actually empty?
930 	 */
931 	if (!be) {
932 		spin_unlock(&fs_info->ref_verify_lock);
933 		return;
934 	}
935 
936 	n = &be->node;
937 	while (n) {
938 		be = rb_entry(n, struct block_entry, node);
939 		n = rb_next(n);
940 		if (be->bytenr < start && be->bytenr + be->len > start) {
941 			btrfs_err(fs_info,
942 				"block entry overlaps a block group [%llu,%llu]!",
943 				start, len);
944 			dump_block_entry(fs_info, be);
945 			continue;
946 		}
947 		if (be->bytenr < start)
948 			continue;
949 		if (be->bytenr >= start + len)
950 			break;
951 		if (be->bytenr + be->len > start + len) {
952 			btrfs_err(fs_info,
953 				"block entry overlaps a block group [%llu,%llu]!",
954 				start, len);
955 			dump_block_entry(fs_info, be);
956 		}
957 		rb_erase(&be->node, &fs_info->block_tree);
958 		free_block_entry(be);
959 	}
960 	spin_unlock(&fs_info->ref_verify_lock);
961 }
962 
963 /* Walk down all roots and build the ref tree, meant to be called at mount */
964 int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
965 {
966 	struct btrfs_path *path;
967 	struct extent_buffer *eb;
968 	u64 bytenr = 0, num_bytes = 0;
969 	int ret, level;
970 
971 	if (!btrfs_test_opt(fs_info, REF_VERIFY))
972 		return 0;
973 
974 	path = btrfs_alloc_path();
975 	if (!path)
976 		return -ENOMEM;
977 
978 	eb = btrfs_read_lock_root_node(fs_info->extent_root);
979 	btrfs_set_lock_blocking_read(eb);
980 	level = btrfs_header_level(eb);
981 	path->nodes[level] = eb;
982 	path->slots[level] = 0;
983 	path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
984 
985 	while (1) {
986 		/*
987 		 * We have to keep track of the bytenr/num_bytes we last hit
988 		 * because we could have run out of space for an inline ref, and
989 		 * would have had to added a ref key item which may appear on a
990 		 * different leaf from the original extent item.
991 		 */
992 		ret = walk_down_tree(fs_info->extent_root, path, level,
993 				     &bytenr, &num_bytes);
994 		if (ret)
995 			break;
996 		ret = walk_up_tree(path, &level);
997 		if (ret < 0)
998 			break;
999 		if (ret > 0) {
1000 			ret = 0;
1001 			break;
1002 		}
1003 	}
1004 	if (ret) {
1005 		btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
1006 		btrfs_free_ref_cache(fs_info);
1007 	}
1008 	btrfs_free_path(path);
1009 	return ret;
1010 }
1011