xref: /linux/fs/btrfs/disk-io.c (revision 2ccd9fecd9163f168761d4398564c81554f636ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <linux/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "bio.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "dev-replace.h"
33 #include "raid56.h"
34 #include "sysfs.h"
35 #include "qgroup.h"
36 #include "compression.h"
37 #include "tree-checker.h"
38 #include "ref-verify.h"
39 #include "block-group.h"
40 #include "discard.h"
41 #include "space-info.h"
42 #include "zoned.h"
43 #include "subpage.h"
44 #include "fs.h"
45 #include "accessors.h"
46 #include "extent-tree.h"
47 #include "root-tree.h"
48 #include "defrag.h"
49 #include "uuid-tree.h"
50 #include "relocation.h"
51 #include "scrub.h"
52 #include "super.h"
53 
54 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
55 				 BTRFS_HEADER_FLAG_RELOC |\
56 				 BTRFS_SUPER_FLAG_ERROR |\
57 				 BTRFS_SUPER_FLAG_SEEDING |\
58 				 BTRFS_SUPER_FLAG_METADUMP |\
59 				 BTRFS_SUPER_FLAG_METADUMP_V2)
60 
61 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
62 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
63 
64 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
65 {
66 	if (fs_info->csum_shash)
67 		crypto_free_shash(fs_info->csum_shash);
68 }
69 
70 /*
71  * Compute the csum of a btree block and store the result to provided buffer.
72  */
73 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
74 {
75 	struct btrfs_fs_info *fs_info = buf->fs_info;
76 	int num_pages;
77 	u32 first_page_part;
78 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
79 	char *kaddr;
80 	int i;
81 
82 	shash->tfm = fs_info->csum_shash;
83 	crypto_shash_init(shash);
84 
85 	if (buf->addr) {
86 		/* Pages are contiguous, handle them as a big one. */
87 		kaddr = buf->addr;
88 		first_page_part = fs_info->nodesize;
89 		num_pages = 1;
90 	} else {
91 		kaddr = folio_address(buf->folios[0]);
92 		first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
93 		num_pages = num_extent_pages(buf);
94 	}
95 
96 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
97 			    first_page_part - BTRFS_CSUM_SIZE);
98 
99 	/*
100 	 * Multiple single-page folios case would reach here.
101 	 *
102 	 * nodesize <= PAGE_SIZE and large folio all handled by above
103 	 * crypto_shash_update() already.
104 	 */
105 	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
106 		kaddr = folio_address(buf->folios[i]);
107 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
108 	}
109 	memset(result, 0, BTRFS_CSUM_SIZE);
110 	crypto_shash_final(shash, result);
111 }
112 
113 /*
114  * we can't consider a given block up to date unless the transid of the
115  * block matches the transid in the parent node's pointer.  This is how we
116  * detect blocks that either didn't get written at all or got written
117  * in the wrong place.
118  */
119 int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
120 {
121 	if (!extent_buffer_uptodate(eb))
122 		return 0;
123 
124 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
125 		return 1;
126 
127 	if (atomic)
128 		return -EAGAIN;
129 
130 	if (!extent_buffer_uptodate(eb) ||
131 	    btrfs_header_generation(eb) != parent_transid) {
132 		btrfs_err_rl(eb->fs_info,
133 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
134 			eb->start, eb->read_mirror,
135 			parent_transid, btrfs_header_generation(eb));
136 		clear_extent_buffer_uptodate(eb);
137 		return 0;
138 	}
139 	return 1;
140 }
141 
142 static bool btrfs_supported_super_csum(u16 csum_type)
143 {
144 	switch (csum_type) {
145 	case BTRFS_CSUM_TYPE_CRC32:
146 	case BTRFS_CSUM_TYPE_XXHASH:
147 	case BTRFS_CSUM_TYPE_SHA256:
148 	case BTRFS_CSUM_TYPE_BLAKE2:
149 		return true;
150 	default:
151 		return false;
152 	}
153 }
154 
155 /*
156  * Return 0 if the superblock checksum type matches the checksum value of that
157  * algorithm. Pass the raw disk superblock data.
158  */
159 int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
160 			   const struct btrfs_super_block *disk_sb)
161 {
162 	char result[BTRFS_CSUM_SIZE];
163 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
164 
165 	shash->tfm = fs_info->csum_shash;
166 
167 	/*
168 	 * The super_block structure does not span the whole
169 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
170 	 * filled with zeros and is included in the checksum.
171 	 */
172 	crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
173 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
174 
175 	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
176 		return 1;
177 
178 	return 0;
179 }
180 
181 static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
182 				      int mirror_num)
183 {
184 	struct btrfs_fs_info *fs_info = eb->fs_info;
185 	int ret = 0;
186 
187 	if (sb_rdonly(fs_info->sb))
188 		return -EROFS;
189 
190 	for (int i = 0; i < num_extent_folios(eb); i++) {
191 		struct folio *folio = eb->folios[i];
192 		u64 start = max_t(u64, eb->start, folio_pos(folio));
193 		u64 end = min_t(u64, eb->start + eb->len,
194 				folio_pos(folio) + eb->folio_size);
195 		u32 len = end - start;
196 		phys_addr_t paddr = PFN_PHYS(folio_pfn(folio)) +
197 				    offset_in_folio(folio, start);
198 
199 		ret = btrfs_repair_io_failure(fs_info, 0, start, len, start,
200 					      paddr, mirror_num);
201 		if (ret)
202 			break;
203 	}
204 
205 	return ret;
206 }
207 
208 /*
209  * helper to read a given tree block, doing retries as required when
210  * the checksums don't match and we have alternate mirrors to try.
211  *
212  * @check:		expected tree parentness check, see the comments of the
213  *			structure for details.
214  */
215 int btrfs_read_extent_buffer(struct extent_buffer *eb,
216 			     const struct btrfs_tree_parent_check *check)
217 {
218 	struct btrfs_fs_info *fs_info = eb->fs_info;
219 	int failed = 0;
220 	int ret;
221 	int num_copies = 0;
222 	int mirror_num = 0;
223 	int failed_mirror = 0;
224 
225 	ASSERT(check);
226 
227 	while (1) {
228 		ret = read_extent_buffer_pages(eb, mirror_num, check);
229 		if (!ret)
230 			break;
231 
232 		num_copies = btrfs_num_copies(fs_info,
233 					      eb->start, eb->len);
234 		if (num_copies == 1)
235 			break;
236 
237 		if (!failed_mirror) {
238 			failed = 1;
239 			failed_mirror = eb->read_mirror;
240 		}
241 
242 		mirror_num++;
243 		if (mirror_num == failed_mirror)
244 			mirror_num++;
245 
246 		if (mirror_num > num_copies)
247 			break;
248 	}
249 
250 	if (failed && !ret && failed_mirror)
251 		btrfs_repair_eb_io_failure(eb, failed_mirror);
252 
253 	return ret;
254 }
255 
256 /*
257  * Checksum a dirty tree block before IO.
258  */
259 int btree_csum_one_bio(struct btrfs_bio *bbio)
260 {
261 	struct extent_buffer *eb = bbio->private;
262 	struct btrfs_fs_info *fs_info = eb->fs_info;
263 	u64 found_start = btrfs_header_bytenr(eb);
264 	u64 last_trans;
265 	u8 result[BTRFS_CSUM_SIZE];
266 	int ret;
267 
268 	/* Btree blocks are always contiguous on disk. */
269 	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
270 		return -EIO;
271 	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
272 		return -EIO;
273 
274 	/*
275 	 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
276 	 * checksum it but zero-out its content. This is done to preserve
277 	 * ordering of I/O without unnecessarily writing out data.
278 	 */
279 	if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
280 		memzero_extent_buffer(eb, 0, eb->len);
281 		return 0;
282 	}
283 
284 	if (WARN_ON_ONCE(found_start != eb->start))
285 		return -EIO;
286 	if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb)))
287 		return -EIO;
288 
289 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
290 				    offsetof(struct btrfs_header, fsid),
291 				    BTRFS_FSID_SIZE) == 0);
292 	csum_tree_block(eb, result);
293 
294 	if (btrfs_header_level(eb))
295 		ret = btrfs_check_node(eb);
296 	else
297 		ret = btrfs_check_leaf(eb);
298 
299 	if (ret < 0)
300 		goto error;
301 
302 	/*
303 	 * Also check the generation, the eb reached here must be newer than
304 	 * last committed. Or something seriously wrong happened.
305 	 */
306 	last_trans = btrfs_get_last_trans_committed(fs_info);
307 	if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
308 		ret = -EUCLEAN;
309 		btrfs_err(fs_info,
310 			"block=%llu bad generation, have %llu expect > %llu",
311 			  eb->start, btrfs_header_generation(eb), last_trans);
312 		goto error;
313 	}
314 	write_extent_buffer(eb, result, 0, fs_info->csum_size);
315 	return 0;
316 
317 error:
318 	btrfs_print_tree(eb, 0);
319 	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
320 		  eb->start);
321 	/*
322 	 * Be noisy if this is an extent buffer from a log tree. We don't abort
323 	 * a transaction in case there's a bad log tree extent buffer, we just
324 	 * fallback to a transaction commit. Still we want to know when there is
325 	 * a bad log tree extent buffer, as that may signal a bug somewhere.
326 	 */
327 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
328 		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
329 	return ret;
330 }
331 
332 static bool check_tree_block_fsid(struct extent_buffer *eb)
333 {
334 	struct btrfs_fs_info *fs_info = eb->fs_info;
335 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
336 	u8 fsid[BTRFS_FSID_SIZE];
337 
338 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
339 			   BTRFS_FSID_SIZE);
340 
341 	/*
342 	 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
343 	 * This is then overwritten by metadata_uuid if it is present in the
344 	 * device_list_add(). The same true for a seed device as well. So use of
345 	 * fs_devices::metadata_uuid is appropriate here.
346 	 */
347 	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
348 		return false;
349 
350 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
351 		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
352 			return false;
353 
354 	return true;
355 }
356 
357 /* Do basic extent buffer checks at read time */
358 int btrfs_validate_extent_buffer(struct extent_buffer *eb,
359 				 const struct btrfs_tree_parent_check *check)
360 {
361 	struct btrfs_fs_info *fs_info = eb->fs_info;
362 	u64 found_start;
363 	const u32 csum_size = fs_info->csum_size;
364 	u8 found_level;
365 	u8 result[BTRFS_CSUM_SIZE];
366 	const u8 *header_csum;
367 	int ret = 0;
368 	const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
369 
370 	ASSERT(check);
371 
372 	found_start = btrfs_header_bytenr(eb);
373 	if (found_start != eb->start) {
374 		btrfs_err_rl(fs_info,
375 			"bad tree block start, mirror %u want %llu have %llu",
376 			     eb->read_mirror, eb->start, found_start);
377 		ret = -EIO;
378 		goto out;
379 	}
380 	if (check_tree_block_fsid(eb)) {
381 		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
382 			     eb->start, eb->read_mirror);
383 		ret = -EIO;
384 		goto out;
385 	}
386 	found_level = btrfs_header_level(eb);
387 	if (found_level >= BTRFS_MAX_LEVEL) {
388 		btrfs_err(fs_info,
389 			"bad tree block level, mirror %u level %d on logical %llu",
390 			eb->read_mirror, btrfs_header_level(eb), eb->start);
391 		ret = -EIO;
392 		goto out;
393 	}
394 
395 	csum_tree_block(eb, result);
396 	header_csum = folio_address(eb->folios[0]) +
397 		get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
398 
399 	if (memcmp(result, header_csum, csum_size) != 0) {
400 		btrfs_warn_rl(fs_info,
401 "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
402 			      eb->start, eb->read_mirror,
403 			      CSUM_FMT_VALUE(csum_size, header_csum),
404 			      CSUM_FMT_VALUE(csum_size, result),
405 			      btrfs_header_level(eb),
406 			      ignore_csum ? ", ignored" : "");
407 		if (!ignore_csum) {
408 			ret = -EUCLEAN;
409 			goto out;
410 		}
411 	}
412 
413 	if (found_level != check->level) {
414 		btrfs_err(fs_info,
415 		"level verify failed on logical %llu mirror %u wanted %u found %u",
416 			  eb->start, eb->read_mirror, check->level, found_level);
417 		ret = -EIO;
418 		goto out;
419 	}
420 	if (unlikely(check->transid &&
421 		     btrfs_header_generation(eb) != check->transid)) {
422 		btrfs_err_rl(eb->fs_info,
423 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
424 				eb->start, eb->read_mirror, check->transid,
425 				btrfs_header_generation(eb));
426 		ret = -EIO;
427 		goto out;
428 	}
429 	if (check->has_first_key) {
430 		const struct btrfs_key *expect_key = &check->first_key;
431 		struct btrfs_key found_key;
432 
433 		if (found_level)
434 			btrfs_node_key_to_cpu(eb, &found_key, 0);
435 		else
436 			btrfs_item_key_to_cpu(eb, &found_key, 0);
437 		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
438 			btrfs_err(fs_info,
439 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
440 				  eb->start, check->transid,
441 				  expect_key->objectid,
442 				  expect_key->type, expect_key->offset,
443 				  found_key.objectid, found_key.type,
444 				  found_key.offset);
445 			ret = -EUCLEAN;
446 			goto out;
447 		}
448 	}
449 	if (check->owner_root) {
450 		ret = btrfs_check_eb_owner(eb, check->owner_root);
451 		if (ret < 0)
452 			goto out;
453 	}
454 
455 	/* If this is a leaf block and it is corrupt, just return -EIO. */
456 	if (found_level == 0 && btrfs_check_leaf(eb))
457 		ret = -EIO;
458 
459 	if (found_level > 0 && btrfs_check_node(eb))
460 		ret = -EIO;
461 
462 	if (ret)
463 		btrfs_err(fs_info,
464 		"read time tree block corruption detected on logical %llu mirror %u",
465 			  eb->start, eb->read_mirror);
466 out:
467 	return ret;
468 }
469 
470 #ifdef CONFIG_MIGRATION
471 static int btree_migrate_folio(struct address_space *mapping,
472 		struct folio *dst, struct folio *src, enum migrate_mode mode)
473 {
474 	/*
475 	 * we can't safely write a btree page from here,
476 	 * we haven't done the locking hook
477 	 */
478 	if (folio_test_dirty(src))
479 		return -EAGAIN;
480 	/*
481 	 * Buffers may be managed in a filesystem specific way.
482 	 * We must have no buffers or drop them.
483 	 */
484 	if (folio_get_private(src) &&
485 	    !filemap_release_folio(src, GFP_KERNEL))
486 		return -EAGAIN;
487 	return migrate_folio(mapping, dst, src, mode);
488 }
489 #else
490 #define btree_migrate_folio NULL
491 #endif
492 
493 static int btree_writepages(struct address_space *mapping,
494 			    struct writeback_control *wbc)
495 {
496 	int ret;
497 
498 	if (wbc->sync_mode == WB_SYNC_NONE) {
499 		struct btrfs_fs_info *fs_info;
500 
501 		if (wbc->for_kupdate)
502 			return 0;
503 
504 		fs_info = inode_to_fs_info(mapping->host);
505 		/* this is a bit racy, but that's ok */
506 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
507 					     BTRFS_DIRTY_METADATA_THRESH,
508 					     fs_info->dirty_metadata_batch);
509 		if (ret < 0)
510 			return 0;
511 	}
512 	return btree_write_cache_pages(mapping, wbc);
513 }
514 
515 static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
516 {
517 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
518 		return false;
519 
520 	return try_release_extent_buffer(folio);
521 }
522 
523 static void btree_invalidate_folio(struct folio *folio, size_t offset,
524 				 size_t length)
525 {
526 	struct extent_io_tree *tree;
527 
528 	tree = &folio_to_inode(folio)->io_tree;
529 	extent_invalidate_folio(tree, folio, offset);
530 	btree_release_folio(folio, GFP_NOFS);
531 	if (folio_get_private(folio)) {
532 		btrfs_warn(folio_to_fs_info(folio),
533 			   "folio private not zero on folio %llu",
534 			   (unsigned long long)folio_pos(folio));
535 		folio_detach_private(folio);
536 	}
537 }
538 
539 #ifdef DEBUG
540 static bool btree_dirty_folio(struct address_space *mapping,
541 		struct folio *folio)
542 {
543 	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
544 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
545 	struct btrfs_subpage *subpage;
546 	struct extent_buffer *eb;
547 	int cur_bit = 0;
548 	u64 page_start = folio_pos(folio);
549 
550 	if (fs_info->sectorsize == PAGE_SIZE) {
551 		eb = folio_get_private(folio);
552 		BUG_ON(!eb);
553 		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
554 		BUG_ON(!atomic_read(&eb->refs));
555 		btrfs_assert_tree_write_locked(eb);
556 		return filemap_dirty_folio(mapping, folio);
557 	}
558 
559 	ASSERT(spi);
560 	subpage = folio_get_private(folio);
561 
562 	for (cur_bit = spi->dirty_offset;
563 	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
564 	     cur_bit++) {
565 		unsigned long flags;
566 		u64 cur;
567 
568 		spin_lock_irqsave(&subpage->lock, flags);
569 		if (!test_bit(cur_bit, subpage->bitmaps)) {
570 			spin_unlock_irqrestore(&subpage->lock, flags);
571 			continue;
572 		}
573 		spin_unlock_irqrestore(&subpage->lock, flags);
574 		cur = page_start + cur_bit * fs_info->sectorsize;
575 
576 		eb = find_extent_buffer(fs_info, cur);
577 		ASSERT(eb);
578 		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
579 		ASSERT(atomic_read(&eb->refs));
580 		btrfs_assert_tree_write_locked(eb);
581 		free_extent_buffer(eb);
582 
583 		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
584 	}
585 	return filemap_dirty_folio(mapping, folio);
586 }
587 #else
588 #define btree_dirty_folio filemap_dirty_folio
589 #endif
590 
591 static const struct address_space_operations btree_aops = {
592 	.writepages	= btree_writepages,
593 	.release_folio	= btree_release_folio,
594 	.invalidate_folio = btree_invalidate_folio,
595 	.migrate_folio	= btree_migrate_folio,
596 	.dirty_folio	= btree_dirty_folio,
597 };
598 
599 struct extent_buffer *btrfs_find_create_tree_block(
600 						struct btrfs_fs_info *fs_info,
601 						u64 bytenr, u64 owner_root,
602 						int level)
603 {
604 	if (btrfs_is_testing(fs_info))
605 		return alloc_test_extent_buffer(fs_info, bytenr);
606 	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
607 }
608 
609 /*
610  * Read tree block at logical address @bytenr and do variant basic but critical
611  * verification.
612  *
613  * @check:		expected tree parentness check, see comments of the
614  *			structure for details.
615  */
616 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
617 				      struct btrfs_tree_parent_check *check)
618 {
619 	struct extent_buffer *buf = NULL;
620 	int ret;
621 
622 	ASSERT(check);
623 
624 	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
625 					   check->level);
626 	if (IS_ERR(buf))
627 		return buf;
628 
629 	ret = btrfs_read_extent_buffer(buf, check);
630 	if (ret) {
631 		free_extent_buffer_stale(buf);
632 		return ERR_PTR(ret);
633 	}
634 	return buf;
635 
636 }
637 
638 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
639 					   u64 objectid, gfp_t flags)
640 {
641 	struct btrfs_root *root;
642 	bool dummy = btrfs_is_testing(fs_info);
643 
644 	root = kzalloc(sizeof(*root), flags);
645 	if (!root)
646 		return NULL;
647 
648 	memset(&root->root_key, 0, sizeof(root->root_key));
649 	memset(&root->root_item, 0, sizeof(root->root_item));
650 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
651 	root->fs_info = fs_info;
652 	root->root_key.objectid = objectid;
653 	root->node = NULL;
654 	root->commit_root = NULL;
655 	root->state = 0;
656 	RB_CLEAR_NODE(&root->rb_node);
657 
658 	btrfs_set_root_last_trans(root, 0);
659 	root->free_objectid = 0;
660 	root->nr_delalloc_inodes = 0;
661 	root->nr_ordered_extents = 0;
662 	xa_init(&root->inodes);
663 	xa_init(&root->delayed_nodes);
664 
665 	btrfs_init_root_block_rsv(root);
666 
667 	INIT_LIST_HEAD(&root->dirty_list);
668 	INIT_LIST_HEAD(&root->root_list);
669 	INIT_LIST_HEAD(&root->delalloc_inodes);
670 	INIT_LIST_HEAD(&root->delalloc_root);
671 	INIT_LIST_HEAD(&root->ordered_extents);
672 	INIT_LIST_HEAD(&root->ordered_root);
673 	INIT_LIST_HEAD(&root->reloc_dirty_list);
674 	spin_lock_init(&root->delalloc_lock);
675 	spin_lock_init(&root->ordered_extent_lock);
676 	spin_lock_init(&root->accounting_lock);
677 	spin_lock_init(&root->qgroup_meta_rsv_lock);
678 	mutex_init(&root->objectid_mutex);
679 	mutex_init(&root->log_mutex);
680 	mutex_init(&root->ordered_extent_mutex);
681 	mutex_init(&root->delalloc_mutex);
682 	init_waitqueue_head(&root->qgroup_flush_wait);
683 	init_waitqueue_head(&root->log_writer_wait);
684 	init_waitqueue_head(&root->log_commit_wait[0]);
685 	init_waitqueue_head(&root->log_commit_wait[1]);
686 	INIT_LIST_HEAD(&root->log_ctxs[0]);
687 	INIT_LIST_HEAD(&root->log_ctxs[1]);
688 	atomic_set(&root->log_commit[0], 0);
689 	atomic_set(&root->log_commit[1], 0);
690 	atomic_set(&root->log_writers, 0);
691 	atomic_set(&root->log_batch, 0);
692 	refcount_set(&root->refs, 1);
693 	atomic_set(&root->snapshot_force_cow, 0);
694 	atomic_set(&root->nr_swapfiles, 0);
695 	btrfs_set_root_log_transid(root, 0);
696 	root->log_transid_committed = -1;
697 	btrfs_set_root_last_log_commit(root, 0);
698 	root->anon_dev = 0;
699 	if (!dummy) {
700 		btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
701 					  IO_TREE_ROOT_DIRTY_LOG_PAGES);
702 		btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,
703 					  IO_TREE_LOG_CSUM_RANGE);
704 	}
705 
706 	spin_lock_init(&root->root_item_lock);
707 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
708 #ifdef CONFIG_BTRFS_DEBUG
709 	INIT_LIST_HEAD(&root->leak_list);
710 	spin_lock(&fs_info->fs_roots_radix_lock);
711 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
712 	spin_unlock(&fs_info->fs_roots_radix_lock);
713 #endif
714 
715 	return root;
716 }
717 
718 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
719 /* Should only be used by the testing infrastructure */
720 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
721 {
722 	struct btrfs_root *root;
723 
724 	if (!fs_info)
725 		return ERR_PTR(-EINVAL);
726 
727 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
728 	if (!root)
729 		return ERR_PTR(-ENOMEM);
730 
731 	/* We don't use the stripesize in selftest, set it as sectorsize */
732 	root->alloc_bytenr = 0;
733 
734 	return root;
735 }
736 #endif
737 
738 static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
739 {
740 	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
741 	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
742 
743 	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
744 }
745 
746 static int global_root_key_cmp(const void *k, const struct rb_node *node)
747 {
748 	const struct btrfs_key *key = k;
749 	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
750 
751 	return btrfs_comp_cpu_keys(key, &root->root_key);
752 }
753 
754 int btrfs_global_root_insert(struct btrfs_root *root)
755 {
756 	struct btrfs_fs_info *fs_info = root->fs_info;
757 	struct rb_node *tmp;
758 	int ret = 0;
759 
760 	write_lock(&fs_info->global_root_lock);
761 	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
762 	write_unlock(&fs_info->global_root_lock);
763 
764 	if (tmp) {
765 		ret = -EEXIST;
766 		btrfs_warn(fs_info, "global root %llu %llu already exists",
767 			   btrfs_root_id(root), root->root_key.offset);
768 	}
769 	return ret;
770 }
771 
772 void btrfs_global_root_delete(struct btrfs_root *root)
773 {
774 	struct btrfs_fs_info *fs_info = root->fs_info;
775 
776 	write_lock(&fs_info->global_root_lock);
777 	rb_erase(&root->rb_node, &fs_info->global_root_tree);
778 	write_unlock(&fs_info->global_root_lock);
779 }
780 
781 struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
782 				     struct btrfs_key *key)
783 {
784 	struct rb_node *node;
785 	struct btrfs_root *root = NULL;
786 
787 	read_lock(&fs_info->global_root_lock);
788 	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
789 	if (node)
790 		root = container_of(node, struct btrfs_root, rb_node);
791 	read_unlock(&fs_info->global_root_lock);
792 
793 	return root;
794 }
795 
796 static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
797 {
798 	struct btrfs_block_group *block_group;
799 	u64 ret;
800 
801 	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
802 		return 0;
803 
804 	if (bytenr)
805 		block_group = btrfs_lookup_block_group(fs_info, bytenr);
806 	else
807 		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
808 	ASSERT(block_group);
809 	if (!block_group)
810 		return 0;
811 	ret = block_group->global_root_id;
812 	btrfs_put_block_group(block_group);
813 
814 	return ret;
815 }
816 
817 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
818 {
819 	struct btrfs_key key = {
820 		.objectid = BTRFS_CSUM_TREE_OBJECTID,
821 		.type = BTRFS_ROOT_ITEM_KEY,
822 		.offset = btrfs_global_root_id(fs_info, bytenr),
823 	};
824 
825 	return btrfs_global_root(fs_info, &key);
826 }
827 
828 struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
829 {
830 	struct btrfs_key key = {
831 		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
832 		.type = BTRFS_ROOT_ITEM_KEY,
833 		.offset = btrfs_global_root_id(fs_info, bytenr),
834 	};
835 
836 	return btrfs_global_root(fs_info, &key);
837 }
838 
839 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
840 				     u64 objectid)
841 {
842 	struct btrfs_fs_info *fs_info = trans->fs_info;
843 	struct extent_buffer *leaf;
844 	struct btrfs_root *tree_root = fs_info->tree_root;
845 	struct btrfs_root *root;
846 	struct btrfs_key key;
847 	unsigned int nofs_flag;
848 	int ret = 0;
849 
850 	/*
851 	 * We're holding a transaction handle, so use a NOFS memory allocation
852 	 * context to avoid deadlock if reclaim happens.
853 	 */
854 	nofs_flag = memalloc_nofs_save();
855 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
856 	memalloc_nofs_restore(nofs_flag);
857 	if (!root)
858 		return ERR_PTR(-ENOMEM);
859 
860 	root->root_key.objectid = objectid;
861 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
862 	root->root_key.offset = 0;
863 
864 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
865 				      0, BTRFS_NESTING_NORMAL);
866 	if (IS_ERR(leaf)) {
867 		ret = PTR_ERR(leaf);
868 		leaf = NULL;
869 		goto fail;
870 	}
871 
872 	root->node = leaf;
873 	btrfs_mark_buffer_dirty(trans, leaf);
874 
875 	root->commit_root = btrfs_root_node(root);
876 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
877 
878 	btrfs_set_root_flags(&root->root_item, 0);
879 	btrfs_set_root_limit(&root->root_item, 0);
880 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
881 	btrfs_set_root_generation(&root->root_item, trans->transid);
882 	btrfs_set_root_level(&root->root_item, 0);
883 	btrfs_set_root_refs(&root->root_item, 1);
884 	btrfs_set_root_used(&root->root_item, leaf->len);
885 	btrfs_set_root_last_snapshot(&root->root_item, 0);
886 	btrfs_set_root_dirid(&root->root_item, 0);
887 	if (btrfs_is_fstree(objectid))
888 		generate_random_guid(root->root_item.uuid);
889 	else
890 		export_guid(root->root_item.uuid, &guid_null);
891 	btrfs_set_root_drop_level(&root->root_item, 0);
892 
893 	btrfs_tree_unlock(leaf);
894 
895 	key.objectid = objectid;
896 	key.type = BTRFS_ROOT_ITEM_KEY;
897 	key.offset = 0;
898 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
899 	if (ret)
900 		goto fail;
901 
902 	return root;
903 
904 fail:
905 	btrfs_put_root(root);
906 
907 	return ERR_PTR(ret);
908 }
909 
910 static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info)
911 {
912 	struct btrfs_root *root;
913 
914 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
915 	if (!root)
916 		return ERR_PTR(-ENOMEM);
917 
918 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
919 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
920 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
921 
922 	return root;
923 }
924 
925 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
926 			      struct btrfs_root *root)
927 {
928 	struct extent_buffer *leaf;
929 
930 	/*
931 	 * DON'T set SHAREABLE bit for log trees.
932 	 *
933 	 * Log trees are not exposed to user space thus can't be snapshotted,
934 	 * and they go away before a real commit is actually done.
935 	 *
936 	 * They do store pointers to file data extents, and those reference
937 	 * counts still get updated (along with back refs to the log tree).
938 	 */
939 
940 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
941 			NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
942 	if (IS_ERR(leaf))
943 		return PTR_ERR(leaf);
944 
945 	root->node = leaf;
946 
947 	btrfs_mark_buffer_dirty(trans, root->node);
948 	btrfs_tree_unlock(root->node);
949 
950 	return 0;
951 }
952 
953 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
954 			     struct btrfs_fs_info *fs_info)
955 {
956 	struct btrfs_root *log_root;
957 
958 	log_root = alloc_log_tree(fs_info);
959 	if (IS_ERR(log_root))
960 		return PTR_ERR(log_root);
961 
962 	if (!btrfs_is_zoned(fs_info)) {
963 		int ret = btrfs_alloc_log_tree_node(trans, log_root);
964 
965 		if (ret) {
966 			btrfs_put_root(log_root);
967 			return ret;
968 		}
969 	}
970 
971 	WARN_ON(fs_info->log_root_tree);
972 	fs_info->log_root_tree = log_root;
973 	return 0;
974 }
975 
976 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
977 		       struct btrfs_root *root)
978 {
979 	struct btrfs_fs_info *fs_info = root->fs_info;
980 	struct btrfs_root *log_root;
981 	struct btrfs_inode_item *inode_item;
982 	int ret;
983 
984 	log_root = alloc_log_tree(fs_info);
985 	if (IS_ERR(log_root))
986 		return PTR_ERR(log_root);
987 
988 	ret = btrfs_alloc_log_tree_node(trans, log_root);
989 	if (ret) {
990 		btrfs_put_root(log_root);
991 		return ret;
992 	}
993 
994 	btrfs_set_root_last_trans(log_root, trans->transid);
995 	log_root->root_key.offset = btrfs_root_id(root);
996 
997 	inode_item = &log_root->root_item.inode;
998 	btrfs_set_stack_inode_generation(inode_item, 1);
999 	btrfs_set_stack_inode_size(inode_item, 3);
1000 	btrfs_set_stack_inode_nlink(inode_item, 1);
1001 	btrfs_set_stack_inode_nbytes(inode_item,
1002 				     fs_info->nodesize);
1003 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1004 
1005 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1006 
1007 	WARN_ON(root->log_root);
1008 	root->log_root = log_root;
1009 	btrfs_set_root_log_transid(root, 0);
1010 	root->log_transid_committed = -1;
1011 	btrfs_set_root_last_log_commit(root, 0);
1012 	return 0;
1013 }
1014 
1015 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1016 					      struct btrfs_path *path,
1017 					      const struct btrfs_key *key)
1018 {
1019 	struct btrfs_root *root;
1020 	struct btrfs_tree_parent_check check = { 0 };
1021 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1022 	u64 generation;
1023 	int ret;
1024 	int level;
1025 
1026 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1027 	if (!root)
1028 		return ERR_PTR(-ENOMEM);
1029 
1030 	ret = btrfs_find_root(tree_root, key, path,
1031 			      &root->root_item, &root->root_key);
1032 	if (ret) {
1033 		if (ret > 0)
1034 			ret = -ENOENT;
1035 		goto fail;
1036 	}
1037 
1038 	generation = btrfs_root_generation(&root->root_item);
1039 	level = btrfs_root_level(&root->root_item);
1040 	check.level = level;
1041 	check.transid = generation;
1042 	check.owner_root = key->objectid;
1043 	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1044 				     &check);
1045 	if (IS_ERR(root->node)) {
1046 		ret = PTR_ERR(root->node);
1047 		root->node = NULL;
1048 		goto fail;
1049 	}
1050 	if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1051 		ret = -EIO;
1052 		goto fail;
1053 	}
1054 
1055 	/*
1056 	 * For real fs, and not log/reloc trees, root owner must
1057 	 * match its root node owner
1058 	 */
1059 	if (!btrfs_is_testing(fs_info) &&
1060 	    btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1061 	    btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1062 	    btrfs_root_id(root) != btrfs_header_owner(root->node)) {
1063 		btrfs_crit(fs_info,
1064 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1065 			   btrfs_root_id(root), root->node->start,
1066 			   btrfs_header_owner(root->node),
1067 			   btrfs_root_id(root));
1068 		ret = -EUCLEAN;
1069 		goto fail;
1070 	}
1071 	root->commit_root = btrfs_root_node(root);
1072 	return root;
1073 fail:
1074 	btrfs_put_root(root);
1075 	return ERR_PTR(ret);
1076 }
1077 
1078 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1079 					const struct btrfs_key *key)
1080 {
1081 	struct btrfs_root *root;
1082 	BTRFS_PATH_AUTO_FREE(path);
1083 
1084 	path = btrfs_alloc_path();
1085 	if (!path)
1086 		return ERR_PTR(-ENOMEM);
1087 	root = read_tree_root_path(tree_root, path, key);
1088 
1089 	return root;
1090 }
1091 
1092 /*
1093  * Initialize subvolume root in-memory structure.
1094  *
1095  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1096  *
1097  * In case of failure the caller is responsible to call btrfs_free_fs_root()
1098  */
1099 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1100 {
1101 	int ret;
1102 
1103 	btrfs_drew_lock_init(&root->snapshot_lock);
1104 
1105 	if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1106 	    !btrfs_is_data_reloc_root(root) &&
1107 	    btrfs_is_fstree(btrfs_root_id(root))) {
1108 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1109 		btrfs_check_and_init_root_item(&root->root_item);
1110 	}
1111 
1112 	/*
1113 	 * Don't assign anonymous block device to roots that are not exposed to
1114 	 * userspace, the id pool is limited to 1M
1115 	 */
1116 	if (btrfs_is_fstree(btrfs_root_id(root)) &&
1117 	    btrfs_root_refs(&root->root_item) > 0) {
1118 		if (!anon_dev) {
1119 			ret = get_anon_bdev(&root->anon_dev);
1120 			if (ret)
1121 				return ret;
1122 		} else {
1123 			root->anon_dev = anon_dev;
1124 		}
1125 	}
1126 
1127 	mutex_lock(&root->objectid_mutex);
1128 	ret = btrfs_init_root_free_objectid(root);
1129 	if (ret) {
1130 		mutex_unlock(&root->objectid_mutex);
1131 		return ret;
1132 	}
1133 
1134 	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1135 
1136 	mutex_unlock(&root->objectid_mutex);
1137 
1138 	return 0;
1139 }
1140 
1141 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1142 					       u64 root_id)
1143 {
1144 	struct btrfs_root *root;
1145 
1146 	spin_lock(&fs_info->fs_roots_radix_lock);
1147 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1148 				 (unsigned long)root_id);
1149 	root = btrfs_grab_root(root);
1150 	spin_unlock(&fs_info->fs_roots_radix_lock);
1151 	return root;
1152 }
1153 
1154 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1155 						u64 objectid)
1156 {
1157 	struct btrfs_key key = {
1158 		.objectid = objectid,
1159 		.type = BTRFS_ROOT_ITEM_KEY,
1160 		.offset = 0,
1161 	};
1162 
1163 	switch (objectid) {
1164 	case BTRFS_ROOT_TREE_OBJECTID:
1165 		return btrfs_grab_root(fs_info->tree_root);
1166 	case BTRFS_EXTENT_TREE_OBJECTID:
1167 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1168 	case BTRFS_CHUNK_TREE_OBJECTID:
1169 		return btrfs_grab_root(fs_info->chunk_root);
1170 	case BTRFS_DEV_TREE_OBJECTID:
1171 		return btrfs_grab_root(fs_info->dev_root);
1172 	case BTRFS_CSUM_TREE_OBJECTID:
1173 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1174 	case BTRFS_QUOTA_TREE_OBJECTID:
1175 		return btrfs_grab_root(fs_info->quota_root);
1176 	case BTRFS_UUID_TREE_OBJECTID:
1177 		return btrfs_grab_root(fs_info->uuid_root);
1178 	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1179 		return btrfs_grab_root(fs_info->block_group_root);
1180 	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1181 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1182 	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1183 		return btrfs_grab_root(fs_info->stripe_root);
1184 	default:
1185 		return NULL;
1186 	}
1187 }
1188 
1189 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1190 			 struct btrfs_root *root)
1191 {
1192 	int ret;
1193 
1194 	ret = radix_tree_preload(GFP_NOFS);
1195 	if (ret)
1196 		return ret;
1197 
1198 	spin_lock(&fs_info->fs_roots_radix_lock);
1199 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1200 				(unsigned long)btrfs_root_id(root),
1201 				root);
1202 	if (ret == 0) {
1203 		btrfs_grab_root(root);
1204 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1205 	}
1206 	spin_unlock(&fs_info->fs_roots_radix_lock);
1207 	radix_tree_preload_end();
1208 
1209 	return ret;
1210 }
1211 
1212 void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
1213 {
1214 #ifdef CONFIG_BTRFS_DEBUG
1215 	struct btrfs_root *root;
1216 
1217 	while (!list_empty(&fs_info->allocated_roots)) {
1218 		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1219 
1220 		root = list_first_entry(&fs_info->allocated_roots,
1221 					struct btrfs_root, leak_list);
1222 		btrfs_err(fs_info, "leaked root %s refcount %d",
1223 			  btrfs_root_name(&root->root_key, buf),
1224 			  refcount_read(&root->refs));
1225 		WARN_ON_ONCE(1);
1226 		while (refcount_read(&root->refs) > 1)
1227 			btrfs_put_root(root);
1228 		btrfs_put_root(root);
1229 	}
1230 #endif
1231 }
1232 
1233 static void free_global_roots(struct btrfs_fs_info *fs_info)
1234 {
1235 	struct btrfs_root *root;
1236 	struct rb_node *node;
1237 
1238 	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1239 		root = rb_entry(node, struct btrfs_root, rb_node);
1240 		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1241 		btrfs_put_root(root);
1242 	}
1243 }
1244 
1245 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1246 {
1247 	struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1248 
1249 	if (fs_info->fs_devices)
1250 		btrfs_close_devices(fs_info->fs_devices);
1251 	percpu_counter_destroy(&fs_info->stats_read_blocks);
1252 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1253 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1254 	percpu_counter_destroy(&fs_info->ordered_bytes);
1255 	if (percpu_counter_initialized(em_counter))
1256 		ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1257 	percpu_counter_destroy(em_counter);
1258 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1259 	btrfs_free_csum_hash(fs_info);
1260 	btrfs_free_stripe_hash_table(fs_info);
1261 	btrfs_free_ref_cache(fs_info);
1262 	kfree(fs_info->balance_ctl);
1263 	kfree(fs_info->delayed_root);
1264 	free_global_roots(fs_info);
1265 	btrfs_put_root(fs_info->tree_root);
1266 	btrfs_put_root(fs_info->chunk_root);
1267 	btrfs_put_root(fs_info->dev_root);
1268 	btrfs_put_root(fs_info->quota_root);
1269 	btrfs_put_root(fs_info->uuid_root);
1270 	btrfs_put_root(fs_info->fs_root);
1271 	btrfs_put_root(fs_info->data_reloc_root);
1272 	btrfs_put_root(fs_info->block_group_root);
1273 	btrfs_put_root(fs_info->stripe_root);
1274 	btrfs_check_leaked_roots(fs_info);
1275 	btrfs_extent_buffer_leak_debug_check(fs_info);
1276 	kfree(fs_info->super_copy);
1277 	kfree(fs_info->super_for_commit);
1278 	kvfree(fs_info);
1279 }
1280 
1281 
1282 /*
1283  * Get an in-memory reference of a root structure.
1284  *
1285  * For essential trees like root/extent tree, we grab it from fs_info directly.
1286  * For subvolume trees, we check the cached filesystem roots first. If not
1287  * found, then read it from disk and add it to cached fs roots.
1288  *
1289  * Caller should release the root by calling btrfs_put_root() after the usage.
1290  *
1291  * NOTE: Reloc and log trees can't be read by this function as they share the
1292  *	 same root objectid.
1293  *
1294  * @objectid:	root id
1295  * @anon_dev:	preallocated anonymous block device number for new roots,
1296  *		pass NULL for a new allocation.
1297  * @check_ref:	whether to check root item references, If true, return -ENOENT
1298  *		for orphan roots
1299  */
1300 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1301 					     u64 objectid, dev_t *anon_dev,
1302 					     bool check_ref)
1303 {
1304 	struct btrfs_root *root;
1305 	struct btrfs_path *path;
1306 	struct btrfs_key key;
1307 	int ret;
1308 
1309 	root = btrfs_get_global_root(fs_info, objectid);
1310 	if (root)
1311 		return root;
1312 
1313 	/*
1314 	 * If we're called for non-subvolume trees, and above function didn't
1315 	 * find one, do not try to read it from disk.
1316 	 *
1317 	 * This is namely for free-space-tree and quota tree, which can change
1318 	 * at runtime and should only be grabbed from fs_info.
1319 	 */
1320 	if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1321 		return ERR_PTR(-ENOENT);
1322 again:
1323 	root = btrfs_lookup_fs_root(fs_info, objectid);
1324 	if (root) {
1325 		/*
1326 		 * Some other caller may have read out the newly inserted
1327 		 * subvolume already (for things like backref walk etc).  Not
1328 		 * that common but still possible.  In that case, we just need
1329 		 * to free the anon_dev.
1330 		 */
1331 		if (unlikely(anon_dev && *anon_dev)) {
1332 			free_anon_bdev(*anon_dev);
1333 			*anon_dev = 0;
1334 		}
1335 
1336 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1337 			btrfs_put_root(root);
1338 			return ERR_PTR(-ENOENT);
1339 		}
1340 		return root;
1341 	}
1342 
1343 	key.objectid = objectid;
1344 	key.type = BTRFS_ROOT_ITEM_KEY;
1345 	key.offset = (u64)-1;
1346 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1347 	if (IS_ERR(root))
1348 		return root;
1349 
1350 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1351 		ret = -ENOENT;
1352 		goto fail;
1353 	}
1354 
1355 	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1356 	if (ret)
1357 		goto fail;
1358 
1359 	path = btrfs_alloc_path();
1360 	if (!path) {
1361 		ret = -ENOMEM;
1362 		goto fail;
1363 	}
1364 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1365 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1366 	key.offset = objectid;
1367 
1368 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1369 	btrfs_free_path(path);
1370 	if (ret < 0)
1371 		goto fail;
1372 	if (ret == 0)
1373 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1374 
1375 	ret = btrfs_insert_fs_root(fs_info, root);
1376 	if (ret) {
1377 		if (ret == -EEXIST) {
1378 			btrfs_put_root(root);
1379 			goto again;
1380 		}
1381 		goto fail;
1382 	}
1383 	return root;
1384 fail:
1385 	/*
1386 	 * If our caller provided us an anonymous device, then it's his
1387 	 * responsibility to free it in case we fail. So we have to set our
1388 	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1389 	 * and once again by our caller.
1390 	 */
1391 	if (anon_dev && *anon_dev)
1392 		root->anon_dev = 0;
1393 	btrfs_put_root(root);
1394 	return ERR_PTR(ret);
1395 }
1396 
1397 /*
1398  * Get in-memory reference of a root structure
1399  *
1400  * @objectid:	tree objectid
1401  * @check_ref:	if set, verify that the tree exists and the item has at least
1402  *		one reference
1403  */
1404 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1405 				     u64 objectid, bool check_ref)
1406 {
1407 	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1408 }
1409 
1410 /*
1411  * Get in-memory reference of a root structure, created as new, optionally pass
1412  * the anonymous block device id
1413  *
1414  * @objectid:	tree objectid
1415  * @anon_dev:	if NULL, allocate a new anonymous block device or use the
1416  *		parameter value if not NULL
1417  */
1418 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1419 					 u64 objectid, dev_t *anon_dev)
1420 {
1421 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1422 }
1423 
1424 /*
1425  * Return a root for the given objectid.
1426  *
1427  * @fs_info:	the fs_info
1428  * @objectid:	the objectid we need to lookup
1429  *
1430  * This is exclusively used for backref walking, and exists specifically because
1431  * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1432  * creation time, which means we may have to read the tree_root in order to look
1433  * up a fs root that is not in memory.  If the root is not in memory we will
1434  * read the tree root commit root and look up the fs root from there.  This is a
1435  * temporary root, it will not be inserted into the radix tree as it doesn't
1436  * have the most uptodate information, it'll simply be discarded once the
1437  * backref code is finished using the root.
1438  */
1439 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1440 						 struct btrfs_path *path,
1441 						 u64 objectid)
1442 {
1443 	struct btrfs_root *root;
1444 	struct btrfs_key key;
1445 
1446 	ASSERT(path->search_commit_root && path->skip_locking);
1447 
1448 	/*
1449 	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1450 	 * since this is called via the backref walking code we won't be looking
1451 	 * up a root that doesn't exist, unless there's corruption.  So if root
1452 	 * != NULL just return it.
1453 	 */
1454 	root = btrfs_get_global_root(fs_info, objectid);
1455 	if (root)
1456 		return root;
1457 
1458 	root = btrfs_lookup_fs_root(fs_info, objectid);
1459 	if (root)
1460 		return root;
1461 
1462 	key.objectid = objectid;
1463 	key.type = BTRFS_ROOT_ITEM_KEY;
1464 	key.offset = (u64)-1;
1465 	root = read_tree_root_path(fs_info->tree_root, path, &key);
1466 	btrfs_release_path(path);
1467 
1468 	return root;
1469 }
1470 
1471 static int cleaner_kthread(void *arg)
1472 {
1473 	struct btrfs_fs_info *fs_info = arg;
1474 	int again;
1475 
1476 	while (1) {
1477 		again = 0;
1478 
1479 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1480 
1481 		/* Make the cleaner go to sleep early. */
1482 		if (btrfs_need_cleaner_sleep(fs_info))
1483 			goto sleep;
1484 
1485 		/*
1486 		 * Do not do anything if we might cause open_ctree() to block
1487 		 * before we have finished mounting the filesystem.
1488 		 */
1489 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1490 			goto sleep;
1491 
1492 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1493 			goto sleep;
1494 
1495 		/*
1496 		 * Avoid the problem that we change the status of the fs
1497 		 * during the above check and trylock.
1498 		 */
1499 		if (btrfs_need_cleaner_sleep(fs_info)) {
1500 			mutex_unlock(&fs_info->cleaner_mutex);
1501 			goto sleep;
1502 		}
1503 
1504 		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1505 			btrfs_sysfs_feature_update(fs_info);
1506 
1507 		btrfs_run_delayed_iputs(fs_info);
1508 
1509 		again = btrfs_clean_one_deleted_snapshot(fs_info);
1510 		mutex_unlock(&fs_info->cleaner_mutex);
1511 
1512 		/*
1513 		 * The defragger has dealt with the R/O remount and umount,
1514 		 * needn't do anything special here.
1515 		 */
1516 		btrfs_run_defrag_inodes(fs_info);
1517 
1518 		/*
1519 		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1520 		 * with relocation (btrfs_relocate_chunk) and relocation
1521 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1522 		 * after acquiring fs_info->reclaim_bgs_lock. So we
1523 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1524 		 * unused block groups.
1525 		 */
1526 		btrfs_delete_unused_bgs(fs_info);
1527 
1528 		/*
1529 		 * Reclaim block groups in the reclaim_bgs list after we deleted
1530 		 * all unused block_groups. This possibly gives us some more free
1531 		 * space.
1532 		 */
1533 		btrfs_reclaim_bgs(fs_info);
1534 sleep:
1535 		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1536 		if (kthread_should_park())
1537 			kthread_parkme();
1538 		if (kthread_should_stop())
1539 			return 0;
1540 		if (!again) {
1541 			set_current_state(TASK_INTERRUPTIBLE);
1542 			schedule();
1543 			__set_current_state(TASK_RUNNING);
1544 		}
1545 	}
1546 }
1547 
1548 static int transaction_kthread(void *arg)
1549 {
1550 	struct btrfs_root *root = arg;
1551 	struct btrfs_fs_info *fs_info = root->fs_info;
1552 	struct btrfs_trans_handle *trans;
1553 	struct btrfs_transaction *cur;
1554 	u64 transid;
1555 	time64_t delta;
1556 	unsigned long delay;
1557 	bool cannot_commit;
1558 
1559 	do {
1560 		cannot_commit = false;
1561 		delay = secs_to_jiffies(fs_info->commit_interval);
1562 		mutex_lock(&fs_info->transaction_kthread_mutex);
1563 
1564 		spin_lock(&fs_info->trans_lock);
1565 		cur = fs_info->running_transaction;
1566 		if (!cur) {
1567 			spin_unlock(&fs_info->trans_lock);
1568 			goto sleep;
1569 		}
1570 
1571 		delta = ktime_get_seconds() - cur->start_time;
1572 		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1573 		    cur->state < TRANS_STATE_COMMIT_PREP &&
1574 		    delta < fs_info->commit_interval) {
1575 			spin_unlock(&fs_info->trans_lock);
1576 			delay -= secs_to_jiffies(delta - 1);
1577 			delay = min(delay,
1578 				    secs_to_jiffies(fs_info->commit_interval));
1579 			goto sleep;
1580 		}
1581 		transid = cur->transid;
1582 		spin_unlock(&fs_info->trans_lock);
1583 
1584 		/* If the file system is aborted, this will always fail. */
1585 		trans = btrfs_attach_transaction(root);
1586 		if (IS_ERR(trans)) {
1587 			if (PTR_ERR(trans) != -ENOENT)
1588 				cannot_commit = true;
1589 			goto sleep;
1590 		}
1591 		if (transid == trans->transid) {
1592 			btrfs_commit_transaction(trans);
1593 		} else {
1594 			btrfs_end_transaction(trans);
1595 		}
1596 sleep:
1597 		wake_up_process(fs_info->cleaner_kthread);
1598 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1599 
1600 		if (BTRFS_FS_ERROR(fs_info))
1601 			btrfs_cleanup_transaction(fs_info);
1602 		if (!kthread_should_stop() &&
1603 				(!btrfs_transaction_blocked(fs_info) ||
1604 				 cannot_commit))
1605 			schedule_timeout_interruptible(delay);
1606 	} while (!kthread_should_stop());
1607 	return 0;
1608 }
1609 
1610 /*
1611  * This will find the highest generation in the array of root backups.  The
1612  * index of the highest array is returned, or -EINVAL if we can't find
1613  * anything.
1614  *
1615  * We check to make sure the array is valid by comparing the
1616  * generation of the latest  root in the array with the generation
1617  * in the super block.  If they don't match we pitch it.
1618  */
1619 static int find_newest_super_backup(struct btrfs_fs_info *info)
1620 {
1621 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1622 	u64 cur;
1623 	struct btrfs_root_backup *root_backup;
1624 	int i;
1625 
1626 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1627 		root_backup = info->super_copy->super_roots + i;
1628 		cur = btrfs_backup_tree_root_gen(root_backup);
1629 		if (cur == newest_gen)
1630 			return i;
1631 	}
1632 
1633 	return -EINVAL;
1634 }
1635 
1636 /*
1637  * copy all the root pointers into the super backup array.
1638  * this will bump the backup pointer by one when it is
1639  * done
1640  */
1641 static void backup_super_roots(struct btrfs_fs_info *info)
1642 {
1643 	const int next_backup = info->backup_root_index;
1644 	struct btrfs_root_backup *root_backup;
1645 
1646 	root_backup = info->super_for_commit->super_roots + next_backup;
1647 
1648 	/*
1649 	 * make sure all of our padding and empty slots get zero filled
1650 	 * regardless of which ones we use today
1651 	 */
1652 	memset(root_backup, 0, sizeof(*root_backup));
1653 
1654 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1655 
1656 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1657 	btrfs_set_backup_tree_root_gen(root_backup,
1658 			       btrfs_header_generation(info->tree_root->node));
1659 
1660 	btrfs_set_backup_tree_root_level(root_backup,
1661 			       btrfs_header_level(info->tree_root->node));
1662 
1663 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1664 	btrfs_set_backup_chunk_root_gen(root_backup,
1665 			       btrfs_header_generation(info->chunk_root->node));
1666 	btrfs_set_backup_chunk_root_level(root_backup,
1667 			       btrfs_header_level(info->chunk_root->node));
1668 
1669 	if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1670 		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1671 		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1672 
1673 		btrfs_set_backup_extent_root(root_backup,
1674 					     extent_root->node->start);
1675 		btrfs_set_backup_extent_root_gen(root_backup,
1676 				btrfs_header_generation(extent_root->node));
1677 		btrfs_set_backup_extent_root_level(root_backup,
1678 					btrfs_header_level(extent_root->node));
1679 
1680 		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1681 		btrfs_set_backup_csum_root_gen(root_backup,
1682 					       btrfs_header_generation(csum_root->node));
1683 		btrfs_set_backup_csum_root_level(root_backup,
1684 						 btrfs_header_level(csum_root->node));
1685 	}
1686 
1687 	/*
1688 	 * we might commit during log recovery, which happens before we set
1689 	 * the fs_root.  Make sure it is valid before we fill it in.
1690 	 */
1691 	if (info->fs_root && info->fs_root->node) {
1692 		btrfs_set_backup_fs_root(root_backup,
1693 					 info->fs_root->node->start);
1694 		btrfs_set_backup_fs_root_gen(root_backup,
1695 			       btrfs_header_generation(info->fs_root->node));
1696 		btrfs_set_backup_fs_root_level(root_backup,
1697 			       btrfs_header_level(info->fs_root->node));
1698 	}
1699 
1700 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1701 	btrfs_set_backup_dev_root_gen(root_backup,
1702 			       btrfs_header_generation(info->dev_root->node));
1703 	btrfs_set_backup_dev_root_level(root_backup,
1704 				       btrfs_header_level(info->dev_root->node));
1705 
1706 	btrfs_set_backup_total_bytes(root_backup,
1707 			     btrfs_super_total_bytes(info->super_copy));
1708 	btrfs_set_backup_bytes_used(root_backup,
1709 			     btrfs_super_bytes_used(info->super_copy));
1710 	btrfs_set_backup_num_devices(root_backup,
1711 			     btrfs_super_num_devices(info->super_copy));
1712 
1713 	/*
1714 	 * if we don't copy this out to the super_copy, it won't get remembered
1715 	 * for the next commit
1716 	 */
1717 	memcpy(&info->super_copy->super_roots,
1718 	       &info->super_for_commit->super_roots,
1719 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1720 }
1721 
1722 /*
1723  * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1724  * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1725  *
1726  * @fs_info:  filesystem whose backup roots need to be read
1727  * @priority: priority of backup root required
1728  *
1729  * Returns backup root index on success and -EINVAL otherwise.
1730  */
1731 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1732 {
1733 	int backup_index = find_newest_super_backup(fs_info);
1734 	struct btrfs_super_block *super = fs_info->super_copy;
1735 	struct btrfs_root_backup *root_backup;
1736 
1737 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1738 		if (priority == 0)
1739 			return backup_index;
1740 
1741 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1742 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1743 	} else {
1744 		return -EINVAL;
1745 	}
1746 
1747 	root_backup = super->super_roots + backup_index;
1748 
1749 	btrfs_set_super_generation(super,
1750 				   btrfs_backup_tree_root_gen(root_backup));
1751 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1752 	btrfs_set_super_root_level(super,
1753 				   btrfs_backup_tree_root_level(root_backup));
1754 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1755 
1756 	/*
1757 	 * Fixme: the total bytes and num_devices need to match or we should
1758 	 * need a fsck
1759 	 */
1760 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1761 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1762 
1763 	return backup_index;
1764 }
1765 
1766 /* helper to cleanup workers */
1767 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1768 {
1769 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1770 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1771 	btrfs_destroy_workqueue(fs_info->workers);
1772 	if (fs_info->endio_workers)
1773 		destroy_workqueue(fs_info->endio_workers);
1774 	if (fs_info->rmw_workers)
1775 		destroy_workqueue(fs_info->rmw_workers);
1776 	if (fs_info->compressed_write_workers)
1777 		destroy_workqueue(fs_info->compressed_write_workers);
1778 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1779 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1780 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1781 	btrfs_destroy_workqueue(fs_info->caching_workers);
1782 	btrfs_destroy_workqueue(fs_info->flush_workers);
1783 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1784 	if (fs_info->discard_ctl.discard_workers)
1785 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1786 	/*
1787 	 * Now that all other work queues are destroyed, we can safely destroy
1788 	 * the queues used for metadata I/O, since tasks from those other work
1789 	 * queues can do metadata I/O operations.
1790 	 */
1791 	if (fs_info->endio_meta_workers)
1792 		destroy_workqueue(fs_info->endio_meta_workers);
1793 }
1794 
1795 static void free_root_extent_buffers(struct btrfs_root *root)
1796 {
1797 	if (root) {
1798 		free_extent_buffer(root->node);
1799 		free_extent_buffer(root->commit_root);
1800 		root->node = NULL;
1801 		root->commit_root = NULL;
1802 	}
1803 }
1804 
1805 static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1806 {
1807 	struct btrfs_root *root, *tmp;
1808 
1809 	rbtree_postorder_for_each_entry_safe(root, tmp,
1810 					     &fs_info->global_root_tree,
1811 					     rb_node)
1812 		free_root_extent_buffers(root);
1813 }
1814 
1815 /* helper to cleanup tree roots */
1816 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1817 {
1818 	free_root_extent_buffers(info->tree_root);
1819 
1820 	free_global_root_pointers(info);
1821 	free_root_extent_buffers(info->dev_root);
1822 	free_root_extent_buffers(info->quota_root);
1823 	free_root_extent_buffers(info->uuid_root);
1824 	free_root_extent_buffers(info->fs_root);
1825 	free_root_extent_buffers(info->data_reloc_root);
1826 	free_root_extent_buffers(info->block_group_root);
1827 	free_root_extent_buffers(info->stripe_root);
1828 	if (free_chunk_root)
1829 		free_root_extent_buffers(info->chunk_root);
1830 }
1831 
1832 void btrfs_put_root(struct btrfs_root *root)
1833 {
1834 	if (!root)
1835 		return;
1836 
1837 	if (refcount_dec_and_test(&root->refs)) {
1838 		if (WARN_ON(!xa_empty(&root->inodes)))
1839 			xa_destroy(&root->inodes);
1840 		if (WARN_ON(!xa_empty(&root->delayed_nodes)))
1841 			xa_destroy(&root->delayed_nodes);
1842 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1843 		if (root->anon_dev)
1844 			free_anon_bdev(root->anon_dev);
1845 		free_root_extent_buffers(root);
1846 #ifdef CONFIG_BTRFS_DEBUG
1847 		spin_lock(&root->fs_info->fs_roots_radix_lock);
1848 		list_del_init(&root->leak_list);
1849 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1850 #endif
1851 		kfree(root);
1852 	}
1853 }
1854 
1855 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1856 {
1857 	int ret;
1858 	struct btrfs_root *gang[8];
1859 	int i;
1860 
1861 	while (!list_empty(&fs_info->dead_roots)) {
1862 		gang[0] = list_first_entry(&fs_info->dead_roots,
1863 					   struct btrfs_root, root_list);
1864 		list_del(&gang[0]->root_list);
1865 
1866 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1867 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1868 		btrfs_put_root(gang[0]);
1869 	}
1870 
1871 	while (1) {
1872 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1873 					     (void **)gang, 0,
1874 					     ARRAY_SIZE(gang));
1875 		if (!ret)
1876 			break;
1877 		for (i = 0; i < ret; i++)
1878 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1879 	}
1880 }
1881 
1882 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1883 {
1884 	mutex_init(&fs_info->scrub_lock);
1885 	atomic_set(&fs_info->scrubs_running, 0);
1886 	atomic_set(&fs_info->scrub_pause_req, 0);
1887 	atomic_set(&fs_info->scrubs_paused, 0);
1888 	atomic_set(&fs_info->scrub_cancel_req, 0);
1889 	init_waitqueue_head(&fs_info->scrub_pause_wait);
1890 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1891 }
1892 
1893 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1894 {
1895 	spin_lock_init(&fs_info->balance_lock);
1896 	mutex_init(&fs_info->balance_mutex);
1897 	atomic_set(&fs_info->balance_pause_req, 0);
1898 	atomic_set(&fs_info->balance_cancel_req, 0);
1899 	fs_info->balance_ctl = NULL;
1900 	init_waitqueue_head(&fs_info->balance_wait_q);
1901 	atomic_set(&fs_info->reloc_cancel_req, 0);
1902 }
1903 
1904 static int btrfs_init_btree_inode(struct super_block *sb)
1905 {
1906 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1907 	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1908 					      fs_info->tree_root);
1909 	struct inode *inode;
1910 
1911 	inode = new_inode(sb);
1912 	if (!inode)
1913 		return -ENOMEM;
1914 
1915 	btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
1916 	set_nlink(inode, 1);
1917 	/*
1918 	 * we set the i_size on the btree inode to the max possible int.
1919 	 * the real end of the address space is determined by all of
1920 	 * the devices in the system
1921 	 */
1922 	inode->i_size = OFFSET_MAX;
1923 	inode->i_mapping->a_ops = &btree_aops;
1924 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1925 
1926 	btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1927 				  IO_TREE_BTREE_INODE_IO);
1928 	btrfs_extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1929 
1930 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1931 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1932 	__insert_inode_hash(inode, hash);
1933 	set_bit(AS_KERNEL_FILE, &inode->i_mapping->flags);
1934 	fs_info->btree_inode = inode;
1935 
1936 	return 0;
1937 }
1938 
1939 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1940 {
1941 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1942 	init_rwsem(&fs_info->dev_replace.rwsem);
1943 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1944 }
1945 
1946 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1947 {
1948 	spin_lock_init(&fs_info->qgroup_lock);
1949 	mutex_init(&fs_info->qgroup_ioctl_lock);
1950 	fs_info->qgroup_tree = RB_ROOT;
1951 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1952 	fs_info->qgroup_seq = 1;
1953 	fs_info->qgroup_rescan_running = false;
1954 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1955 	mutex_init(&fs_info->qgroup_rescan_lock);
1956 }
1957 
1958 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1959 {
1960 	u32 max_active = fs_info->thread_pool_size;
1961 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1962 	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1963 
1964 	fs_info->workers =
1965 		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1966 
1967 	fs_info->delalloc_workers =
1968 		btrfs_alloc_workqueue(fs_info, "delalloc",
1969 				      flags, max_active, 2);
1970 
1971 	fs_info->flush_workers =
1972 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1973 				      flags, max_active, 0);
1974 
1975 	fs_info->caching_workers =
1976 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1977 
1978 	fs_info->fixup_workers =
1979 		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1980 
1981 	fs_info->endio_workers =
1982 		alloc_workqueue("btrfs-endio", flags, max_active);
1983 	fs_info->endio_meta_workers =
1984 		alloc_workqueue("btrfs-endio-meta", flags, max_active);
1985 	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1986 	fs_info->endio_write_workers =
1987 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1988 				      max_active, 2);
1989 	fs_info->compressed_write_workers =
1990 		alloc_workqueue("btrfs-compressed-write", flags, max_active);
1991 	fs_info->endio_freespace_worker =
1992 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
1993 				      max_active, 0);
1994 	fs_info->delayed_workers =
1995 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
1996 				      max_active, 0);
1997 	fs_info->qgroup_rescan_workers =
1998 		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
1999 					      ordered_flags);
2000 	fs_info->discard_ctl.discard_workers =
2001 		alloc_ordered_workqueue("btrfs-discard", WQ_FREEZABLE);
2002 
2003 	if (!(fs_info->workers &&
2004 	      fs_info->delalloc_workers && fs_info->flush_workers &&
2005 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2006 	      fs_info->compressed_write_workers &&
2007 	      fs_info->endio_write_workers &&
2008 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2009 	      fs_info->caching_workers && fs_info->fixup_workers &&
2010 	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2011 	      fs_info->discard_ctl.discard_workers)) {
2012 		return -ENOMEM;
2013 	}
2014 
2015 	return 0;
2016 }
2017 
2018 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2019 {
2020 	struct crypto_shash *csum_shash;
2021 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2022 
2023 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2024 
2025 	if (IS_ERR(csum_shash)) {
2026 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2027 			  csum_driver);
2028 		return PTR_ERR(csum_shash);
2029 	}
2030 
2031 	fs_info->csum_shash = csum_shash;
2032 
2033 	/* Check if the checksum implementation is a fast accelerated one. */
2034 	switch (csum_type) {
2035 	case BTRFS_CSUM_TYPE_CRC32:
2036 		if (crc32_optimizations() & CRC32C_OPTIMIZATION)
2037 			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2038 		break;
2039 	case BTRFS_CSUM_TYPE_XXHASH:
2040 		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2041 		break;
2042 	default:
2043 		break;
2044 	}
2045 
2046 	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2047 			btrfs_super_csum_name(csum_type),
2048 			crypto_shash_driver_name(csum_shash));
2049 	return 0;
2050 }
2051 
2052 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2053 			    struct btrfs_fs_devices *fs_devices)
2054 {
2055 	int ret;
2056 	struct btrfs_tree_parent_check check = { 0 };
2057 	struct btrfs_root *log_tree_root;
2058 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2059 	u64 bytenr = btrfs_super_log_root(disk_super);
2060 	int level = btrfs_super_log_root_level(disk_super);
2061 
2062 	if (fs_devices->rw_devices == 0) {
2063 		btrfs_warn(fs_info, "log replay required on RO media");
2064 		return -EIO;
2065 	}
2066 
2067 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2068 					 GFP_KERNEL);
2069 	if (!log_tree_root)
2070 		return -ENOMEM;
2071 
2072 	check.level = level;
2073 	check.transid = fs_info->generation + 1;
2074 	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2075 	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2076 	if (IS_ERR(log_tree_root->node)) {
2077 		btrfs_warn(fs_info, "failed to read log tree");
2078 		ret = PTR_ERR(log_tree_root->node);
2079 		log_tree_root->node = NULL;
2080 		btrfs_put_root(log_tree_root);
2081 		return ret;
2082 	}
2083 	if (!extent_buffer_uptodate(log_tree_root->node)) {
2084 		btrfs_err(fs_info, "failed to read log tree");
2085 		btrfs_put_root(log_tree_root);
2086 		return -EIO;
2087 	}
2088 
2089 	/* returns with log_tree_root freed on success */
2090 	ret = btrfs_recover_log_trees(log_tree_root);
2091 	if (ret) {
2092 		btrfs_handle_fs_error(fs_info, ret,
2093 				      "Failed to recover log tree");
2094 		btrfs_put_root(log_tree_root);
2095 		return ret;
2096 	}
2097 
2098 	if (sb_rdonly(fs_info->sb)) {
2099 		ret = btrfs_commit_super(fs_info);
2100 		if (ret)
2101 			return ret;
2102 	}
2103 
2104 	return 0;
2105 }
2106 
2107 static int load_global_roots_objectid(struct btrfs_root *tree_root,
2108 				      struct btrfs_path *path, u64 objectid,
2109 				      const char *name)
2110 {
2111 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2112 	struct btrfs_root *root;
2113 	u64 max_global_id = 0;
2114 	int ret;
2115 	struct btrfs_key key = {
2116 		.objectid = objectid,
2117 		.type = BTRFS_ROOT_ITEM_KEY,
2118 		.offset = 0,
2119 	};
2120 	bool found = false;
2121 
2122 	/* If we have IGNOREDATACSUMS skip loading these roots. */
2123 	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2124 	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2125 		set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2126 		return 0;
2127 	}
2128 
2129 	while (1) {
2130 		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2131 		if (ret < 0)
2132 			break;
2133 
2134 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2135 			ret = btrfs_next_leaf(tree_root, path);
2136 			if (ret) {
2137 				if (ret > 0)
2138 					ret = 0;
2139 				break;
2140 			}
2141 		}
2142 		ret = 0;
2143 
2144 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2145 		if (key.objectid != objectid)
2146 			break;
2147 		btrfs_release_path(path);
2148 
2149 		/*
2150 		 * Just worry about this for extent tree, it'll be the same for
2151 		 * everybody.
2152 		 */
2153 		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2154 			max_global_id = max(max_global_id, key.offset);
2155 
2156 		found = true;
2157 		root = read_tree_root_path(tree_root, path, &key);
2158 		if (IS_ERR(root)) {
2159 			ret = PTR_ERR(root);
2160 			break;
2161 		}
2162 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2163 		ret = btrfs_global_root_insert(root);
2164 		if (ret) {
2165 			btrfs_put_root(root);
2166 			break;
2167 		}
2168 		key.offset++;
2169 	}
2170 	btrfs_release_path(path);
2171 
2172 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2173 		fs_info->nr_global_roots = max_global_id + 1;
2174 
2175 	if (!found || ret) {
2176 		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2177 			set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2178 
2179 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2180 			ret = ret ? ret : -ENOENT;
2181 		else
2182 			ret = 0;
2183 		btrfs_err(fs_info, "failed to load root %s", name);
2184 	}
2185 	return ret;
2186 }
2187 
2188 static int load_global_roots(struct btrfs_root *tree_root)
2189 {
2190 	BTRFS_PATH_AUTO_FREE(path);
2191 	int ret;
2192 
2193 	path = btrfs_alloc_path();
2194 	if (!path)
2195 		return -ENOMEM;
2196 
2197 	ret = load_global_roots_objectid(tree_root, path,
2198 					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2199 	if (ret)
2200 		return ret;
2201 	ret = load_global_roots_objectid(tree_root, path,
2202 					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2203 	if (ret)
2204 		return ret;
2205 	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2206 		return ret;
2207 	ret = load_global_roots_objectid(tree_root, path,
2208 					 BTRFS_FREE_SPACE_TREE_OBJECTID,
2209 					 "free space");
2210 
2211 	return ret;
2212 }
2213 
2214 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2215 {
2216 	struct btrfs_root *tree_root = fs_info->tree_root;
2217 	struct btrfs_root *root;
2218 	struct btrfs_key location;
2219 	int ret;
2220 
2221 	ASSERT(fs_info->tree_root);
2222 
2223 	ret = load_global_roots(tree_root);
2224 	if (ret)
2225 		return ret;
2226 
2227 	location.type = BTRFS_ROOT_ITEM_KEY;
2228 	location.offset = 0;
2229 
2230 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2231 		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2232 		root = btrfs_read_tree_root(tree_root, &location);
2233 		if (IS_ERR(root)) {
2234 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2235 				ret = PTR_ERR(root);
2236 				goto out;
2237 			}
2238 		} else {
2239 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2240 			fs_info->block_group_root = root;
2241 		}
2242 	}
2243 
2244 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2245 	root = btrfs_read_tree_root(tree_root, &location);
2246 	if (IS_ERR(root)) {
2247 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2248 			ret = PTR_ERR(root);
2249 			goto out;
2250 		}
2251 	} else {
2252 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2253 		fs_info->dev_root = root;
2254 	}
2255 	/* Initialize fs_info for all devices in any case */
2256 	ret = btrfs_init_devices_late(fs_info);
2257 	if (ret)
2258 		goto out;
2259 
2260 	/*
2261 	 * This tree can share blocks with some other fs tree during relocation
2262 	 * and we need a proper setup by btrfs_get_fs_root
2263 	 */
2264 	root = btrfs_get_fs_root(tree_root->fs_info,
2265 				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2266 	if (IS_ERR(root)) {
2267 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2268 			ret = PTR_ERR(root);
2269 			goto out;
2270 		}
2271 	} else {
2272 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2273 		fs_info->data_reloc_root = root;
2274 	}
2275 
2276 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2277 	root = btrfs_read_tree_root(tree_root, &location);
2278 	if (!IS_ERR(root)) {
2279 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2280 		fs_info->quota_root = root;
2281 	}
2282 
2283 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2284 	root = btrfs_read_tree_root(tree_root, &location);
2285 	if (IS_ERR(root)) {
2286 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2287 			ret = PTR_ERR(root);
2288 			if (ret != -ENOENT)
2289 				goto out;
2290 		}
2291 	} else {
2292 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2293 		fs_info->uuid_root = root;
2294 	}
2295 
2296 	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2297 		location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2298 		root = btrfs_read_tree_root(tree_root, &location);
2299 		if (IS_ERR(root)) {
2300 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2301 				ret = PTR_ERR(root);
2302 				goto out;
2303 			}
2304 		} else {
2305 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2306 			fs_info->stripe_root = root;
2307 		}
2308 	}
2309 
2310 	return 0;
2311 out:
2312 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2313 		   location.objectid, ret);
2314 	return ret;
2315 }
2316 
2317 static int validate_sys_chunk_array(const struct btrfs_fs_info *fs_info,
2318 				    const struct btrfs_super_block *sb)
2319 {
2320 	unsigned int cur = 0; /* Offset inside the sys chunk array */
2321 	/*
2322 	 * At sb read time, fs_info is not fully initialized. Thus we have
2323 	 * to use super block sectorsize, which should have been validated.
2324 	 */
2325 	const u32 sectorsize = btrfs_super_sectorsize(sb);
2326 	u32 sys_array_size = btrfs_super_sys_array_size(sb);
2327 
2328 	if (sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2329 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2330 			  sys_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2331 		return -EUCLEAN;
2332 	}
2333 
2334 	while (cur < sys_array_size) {
2335 		struct btrfs_disk_key *disk_key;
2336 		struct btrfs_chunk *chunk;
2337 		struct btrfs_key key;
2338 		u64 type;
2339 		u16 num_stripes;
2340 		u32 len;
2341 		int ret;
2342 
2343 		disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
2344 		len = sizeof(*disk_key);
2345 
2346 		if (cur + len > sys_array_size)
2347 			goto short_read;
2348 		cur += len;
2349 
2350 		btrfs_disk_key_to_cpu(&key, disk_key);
2351 		if (key.type != BTRFS_CHUNK_ITEM_KEY) {
2352 			btrfs_err(fs_info,
2353 			    "unexpected item type %u in sys_array at offset %u",
2354 				  key.type, cur);
2355 			return -EUCLEAN;
2356 		}
2357 		chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
2358 		num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2359 		if (cur + btrfs_chunk_item_size(num_stripes) > sys_array_size)
2360 			goto short_read;
2361 		type = btrfs_stack_chunk_type(chunk);
2362 		if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
2363 			btrfs_err(fs_info,
2364 			"invalid chunk type %llu in sys_array at offset %u",
2365 				  type, cur);
2366 			return -EUCLEAN;
2367 		}
2368 		ret = btrfs_check_chunk_valid(fs_info, NULL, chunk, key.offset,
2369 					      sectorsize);
2370 		if (ret < 0)
2371 			return ret;
2372 		cur += btrfs_chunk_item_size(num_stripes);
2373 	}
2374 	return 0;
2375 short_read:
2376 	btrfs_err(fs_info,
2377 	"super block sys chunk array short read, cur=%u sys_array_size=%u",
2378 		  cur, sys_array_size);
2379 	return -EUCLEAN;
2380 }
2381 
2382 /*
2383  * Real super block validation
2384  * NOTE: super csum type and incompat features will not be checked here.
2385  *
2386  * @sb:		super block to check
2387  * @mirror_num:	the super block number to check its bytenr:
2388  * 		0	the primary (1st) sb
2389  * 		1, 2	2nd and 3rd backup copy
2390  * 	       -1	skip bytenr check
2391  */
2392 int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
2393 			 const struct btrfs_super_block *sb, int mirror_num)
2394 {
2395 	u64 nodesize = btrfs_super_nodesize(sb);
2396 	u64 sectorsize = btrfs_super_sectorsize(sb);
2397 	int ret = 0;
2398 	const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
2399 
2400 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2401 		btrfs_err(fs_info, "no valid FS found");
2402 		ret = -EINVAL;
2403 	}
2404 	if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
2405 		if (!ignore_flags) {
2406 			btrfs_err(fs_info,
2407 			"unrecognized or unsupported super flag 0x%llx",
2408 				  btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2409 			ret = -EINVAL;
2410 		} else {
2411 			btrfs_info(fs_info,
2412 			"unrecognized or unsupported super flags: 0x%llx, ignored",
2413 				   btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2414 		}
2415 	}
2416 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2417 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2418 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2419 		ret = -EINVAL;
2420 	}
2421 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2422 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2423 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2424 		ret = -EINVAL;
2425 	}
2426 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2427 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2428 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2429 		ret = -EINVAL;
2430 	}
2431 
2432 	/*
2433 	 * Check sectorsize and nodesize first, other check will need it.
2434 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2435 	 */
2436 	if (!is_power_of_2(sectorsize) || sectorsize < BTRFS_MIN_BLOCKSIZE ||
2437 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2438 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2439 		ret = -EINVAL;
2440 	}
2441 
2442 	/*
2443 	 * We only support at most 3 sectorsizes: 4K, PAGE_SIZE, MIN_BLOCKSIZE.
2444 	 *
2445 	 * For 4K page sized systems with non-debug builds, all 3 matches (4K).
2446 	 * For 4K page sized systems with debug builds, there are two block sizes
2447 	 * supported. (4K and 2K)
2448 	 *
2449 	 * We can support 16K sectorsize with 64K page size without problem,
2450 	 * but such sectorsize/pagesize combination doesn't make much sense.
2451 	 * 4K will be our future standard, PAGE_SIZE is supported from the very
2452 	 * beginning.
2453 	 */
2454 	if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K &&
2455 				       sectorsize != PAGE_SIZE &&
2456 				       sectorsize != BTRFS_MIN_BLOCKSIZE)) {
2457 		btrfs_err(fs_info,
2458 			"sectorsize %llu not yet supported for page size %lu",
2459 			sectorsize, PAGE_SIZE);
2460 		ret = -EINVAL;
2461 	}
2462 
2463 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2464 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2465 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2466 		ret = -EINVAL;
2467 	}
2468 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2469 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2470 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2471 		ret = -EINVAL;
2472 	}
2473 
2474 	/* Root alignment check */
2475 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2476 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2477 			   btrfs_super_root(sb));
2478 		ret = -EINVAL;
2479 	}
2480 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2481 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2482 			   btrfs_super_chunk_root(sb));
2483 		ret = -EINVAL;
2484 	}
2485 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2486 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2487 			   btrfs_super_log_root(sb));
2488 		ret = -EINVAL;
2489 	}
2490 
2491 	if (!fs_info->fs_devices->temp_fsid &&
2492 	    memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2493 		btrfs_err(fs_info,
2494 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2495 			  sb->fsid, fs_info->fs_devices->fsid);
2496 		ret = -EINVAL;
2497 	}
2498 
2499 	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2500 		   BTRFS_FSID_SIZE) != 0) {
2501 		btrfs_err(fs_info,
2502 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2503 			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2504 		ret = -EINVAL;
2505 	}
2506 
2507 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2508 		   BTRFS_FSID_SIZE) != 0) {
2509 		btrfs_err(fs_info,
2510 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2511 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2512 		ret = -EINVAL;
2513 	}
2514 
2515 	/*
2516 	 * Artificial requirement for block-group-tree to force newer features
2517 	 * (free-space-tree, no-holes) so the test matrix is smaller.
2518 	 */
2519 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2520 	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2521 	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2522 		btrfs_err(fs_info,
2523 		"block-group-tree feature requires free-space-tree and no-holes");
2524 		ret = -EINVAL;
2525 	}
2526 
2527 	/*
2528 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2529 	 * done later
2530 	 */
2531 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2532 		btrfs_err(fs_info, "bytes_used is too small %llu",
2533 			  btrfs_super_bytes_used(sb));
2534 		ret = -EINVAL;
2535 	}
2536 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2537 		btrfs_err(fs_info, "invalid stripesize %u",
2538 			  btrfs_super_stripesize(sb));
2539 		ret = -EINVAL;
2540 	}
2541 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2542 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2543 			   btrfs_super_num_devices(sb));
2544 	if (btrfs_super_num_devices(sb) == 0) {
2545 		btrfs_err(fs_info, "number of devices is 0");
2546 		ret = -EINVAL;
2547 	}
2548 
2549 	if (mirror_num >= 0 &&
2550 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2551 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2552 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2553 		ret = -EINVAL;
2554 	}
2555 
2556 	if (ret)
2557 		return ret;
2558 
2559 	ret = validate_sys_chunk_array(fs_info, sb);
2560 
2561 	/*
2562 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2563 	 * and one chunk
2564 	 */
2565 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2566 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2567 			  btrfs_super_sys_array_size(sb),
2568 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2569 		ret = -EINVAL;
2570 	}
2571 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2572 			+ sizeof(struct btrfs_chunk)) {
2573 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2574 			  btrfs_super_sys_array_size(sb),
2575 			  sizeof(struct btrfs_disk_key)
2576 			  + sizeof(struct btrfs_chunk));
2577 		ret = -EINVAL;
2578 	}
2579 
2580 	/*
2581 	 * The generation is a global counter, we'll trust it more than the others
2582 	 * but it's still possible that it's the one that's wrong.
2583 	 */
2584 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2585 		btrfs_warn(fs_info,
2586 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2587 			btrfs_super_generation(sb),
2588 			btrfs_super_chunk_root_generation(sb));
2589 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2590 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2591 		btrfs_warn(fs_info,
2592 			"suspicious: generation < cache_generation: %llu < %llu",
2593 			btrfs_super_generation(sb),
2594 			btrfs_super_cache_generation(sb));
2595 
2596 	return ret;
2597 }
2598 
2599 /*
2600  * Validation of super block at mount time.
2601  * Some checks already done early at mount time, like csum type and incompat
2602  * flags will be skipped.
2603  */
2604 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2605 {
2606 	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2607 }
2608 
2609 /*
2610  * Validation of super block at write time.
2611  * Some checks like bytenr check will be skipped as their values will be
2612  * overwritten soon.
2613  * Extra checks like csum type and incompat flags will be done here.
2614  */
2615 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2616 				      struct btrfs_super_block *sb)
2617 {
2618 	int ret;
2619 
2620 	ret = btrfs_validate_super(fs_info, sb, -1);
2621 	if (ret < 0)
2622 		goto out;
2623 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2624 		ret = -EUCLEAN;
2625 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2626 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2627 		goto out;
2628 	}
2629 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2630 		ret = -EUCLEAN;
2631 		btrfs_err(fs_info,
2632 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2633 			  btrfs_super_incompat_flags(sb),
2634 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2635 		goto out;
2636 	}
2637 out:
2638 	if (ret < 0)
2639 		btrfs_err(fs_info,
2640 		"super block corruption detected before writing it to disk");
2641 	return ret;
2642 }
2643 
2644 static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2645 {
2646 	struct btrfs_tree_parent_check check = {
2647 		.level = level,
2648 		.transid = gen,
2649 		.owner_root = btrfs_root_id(root)
2650 	};
2651 	int ret = 0;
2652 
2653 	root->node = read_tree_block(root->fs_info, bytenr, &check);
2654 	if (IS_ERR(root->node)) {
2655 		ret = PTR_ERR(root->node);
2656 		root->node = NULL;
2657 		return ret;
2658 	}
2659 	if (!extent_buffer_uptodate(root->node)) {
2660 		free_extent_buffer(root->node);
2661 		root->node = NULL;
2662 		return -EIO;
2663 	}
2664 
2665 	btrfs_set_root_node(&root->root_item, root->node);
2666 	root->commit_root = btrfs_root_node(root);
2667 	btrfs_set_root_refs(&root->root_item, 1);
2668 	return ret;
2669 }
2670 
2671 static int load_important_roots(struct btrfs_fs_info *fs_info)
2672 {
2673 	struct btrfs_super_block *sb = fs_info->super_copy;
2674 	u64 gen, bytenr;
2675 	int level, ret;
2676 
2677 	bytenr = btrfs_super_root(sb);
2678 	gen = btrfs_super_generation(sb);
2679 	level = btrfs_super_root_level(sb);
2680 	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2681 	if (ret) {
2682 		btrfs_warn(fs_info, "couldn't read tree root");
2683 		return ret;
2684 	}
2685 	return 0;
2686 }
2687 
2688 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2689 {
2690 	int backup_index = find_newest_super_backup(fs_info);
2691 	struct btrfs_super_block *sb = fs_info->super_copy;
2692 	struct btrfs_root *tree_root = fs_info->tree_root;
2693 	bool handle_error = false;
2694 	int ret = 0;
2695 	int i;
2696 
2697 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2698 		if (handle_error) {
2699 			if (!IS_ERR(tree_root->node))
2700 				free_extent_buffer(tree_root->node);
2701 			tree_root->node = NULL;
2702 
2703 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2704 				break;
2705 
2706 			free_root_pointers(fs_info, 0);
2707 
2708 			/*
2709 			 * Don't use the log in recovery mode, it won't be
2710 			 * valid
2711 			 */
2712 			btrfs_set_super_log_root(sb, 0);
2713 
2714 			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2715 			ret = read_backup_root(fs_info, i);
2716 			backup_index = ret;
2717 			if (ret < 0)
2718 				return ret;
2719 		}
2720 
2721 		ret = load_important_roots(fs_info);
2722 		if (ret) {
2723 			handle_error = true;
2724 			continue;
2725 		}
2726 
2727 		/*
2728 		 * No need to hold btrfs_root::objectid_mutex since the fs
2729 		 * hasn't been fully initialised and we are the only user
2730 		 */
2731 		ret = btrfs_init_root_free_objectid(tree_root);
2732 		if (ret < 0) {
2733 			handle_error = true;
2734 			continue;
2735 		}
2736 
2737 		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2738 
2739 		ret = btrfs_read_roots(fs_info);
2740 		if (ret < 0) {
2741 			handle_error = true;
2742 			continue;
2743 		}
2744 
2745 		/* All successful */
2746 		fs_info->generation = btrfs_header_generation(tree_root->node);
2747 		btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2748 		fs_info->last_reloc_trans = 0;
2749 
2750 		/* Always begin writing backup roots after the one being used */
2751 		if (backup_index < 0) {
2752 			fs_info->backup_root_index = 0;
2753 		} else {
2754 			fs_info->backup_root_index = backup_index + 1;
2755 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2756 		}
2757 		break;
2758 	}
2759 
2760 	return ret;
2761 }
2762 
2763 /*
2764  * Lockdep gets confused between our buffer_tree which requires IRQ locking because
2765  * we modify marks in the IRQ context, and our delayed inode xarray which doesn't
2766  * have these requirements. Use a class key so lockdep doesn't get them mixed up.
2767  */
2768 static struct lock_class_key buffer_xa_class;
2769 
2770 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2771 {
2772 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2773 
2774 	/* Use the same flags as mapping->i_pages. */
2775 	xa_init_flags(&fs_info->buffer_tree, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
2776 	lockdep_set_class(&fs_info->buffer_tree.xa_lock, &buffer_xa_class);
2777 
2778 	INIT_LIST_HEAD(&fs_info->trans_list);
2779 	INIT_LIST_HEAD(&fs_info->dead_roots);
2780 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2781 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2782 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2783 	spin_lock_init(&fs_info->delalloc_root_lock);
2784 	spin_lock_init(&fs_info->trans_lock);
2785 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2786 	spin_lock_init(&fs_info->delayed_iput_lock);
2787 	spin_lock_init(&fs_info->defrag_inodes_lock);
2788 	spin_lock_init(&fs_info->super_lock);
2789 	spin_lock_init(&fs_info->unused_bgs_lock);
2790 	spin_lock_init(&fs_info->treelog_bg_lock);
2791 	spin_lock_init(&fs_info->zone_active_bgs_lock);
2792 	spin_lock_init(&fs_info->relocation_bg_lock);
2793 	rwlock_init(&fs_info->tree_mod_log_lock);
2794 	rwlock_init(&fs_info->global_root_lock);
2795 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2796 	mutex_init(&fs_info->reclaim_bgs_lock);
2797 	mutex_init(&fs_info->reloc_mutex);
2798 	mutex_init(&fs_info->delalloc_root_mutex);
2799 	mutex_init(&fs_info->zoned_meta_io_lock);
2800 	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2801 	seqlock_init(&fs_info->profiles_lock);
2802 
2803 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2804 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2805 	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2806 	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2807 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2808 				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2809 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2810 				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2811 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2812 				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2813 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2814 				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2815 
2816 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2817 	INIT_LIST_HEAD(&fs_info->space_info);
2818 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2819 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2820 	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2821 	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2822 #ifdef CONFIG_BTRFS_DEBUG
2823 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2824 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2825 	spin_lock_init(&fs_info->eb_leak_lock);
2826 #endif
2827 	fs_info->mapping_tree = RB_ROOT_CACHED;
2828 	rwlock_init(&fs_info->mapping_tree_lock);
2829 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2830 			     BTRFS_BLOCK_RSV_GLOBAL);
2831 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2832 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2833 	btrfs_init_block_rsv(&fs_info->treelog_rsv, BTRFS_BLOCK_RSV_TREELOG);
2834 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2835 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2836 			     BTRFS_BLOCK_RSV_DELOPS);
2837 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2838 			     BTRFS_BLOCK_RSV_DELREFS);
2839 
2840 	atomic_set(&fs_info->async_delalloc_pages, 0);
2841 	atomic_set(&fs_info->defrag_running, 0);
2842 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2843 	atomic64_set(&fs_info->tree_mod_seq, 0);
2844 	fs_info->global_root_tree = RB_ROOT;
2845 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2846 	fs_info->metadata_ratio = 0;
2847 	fs_info->defrag_inodes = RB_ROOT;
2848 	atomic64_set(&fs_info->free_chunk_space, 0);
2849 	fs_info->tree_mod_log = RB_ROOT;
2850 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2851 	btrfs_init_ref_verify(fs_info);
2852 
2853 	fs_info->thread_pool_size = min_t(unsigned long,
2854 					  num_online_cpus() + 2, 8);
2855 
2856 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2857 	spin_lock_init(&fs_info->ordered_root_lock);
2858 
2859 	btrfs_init_scrub(fs_info);
2860 	btrfs_init_balance(fs_info);
2861 	btrfs_init_async_reclaim_work(fs_info);
2862 	btrfs_init_extent_map_shrinker_work(fs_info);
2863 
2864 	rwlock_init(&fs_info->block_group_cache_lock);
2865 	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2866 
2867 	btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2868 				  IO_TREE_FS_EXCLUDED_EXTENTS);
2869 
2870 	mutex_init(&fs_info->ordered_operations_mutex);
2871 	mutex_init(&fs_info->tree_log_mutex);
2872 	mutex_init(&fs_info->chunk_mutex);
2873 	mutex_init(&fs_info->transaction_kthread_mutex);
2874 	mutex_init(&fs_info->cleaner_mutex);
2875 	mutex_init(&fs_info->ro_block_group_mutex);
2876 	init_rwsem(&fs_info->commit_root_sem);
2877 	init_rwsem(&fs_info->cleanup_work_sem);
2878 	init_rwsem(&fs_info->subvol_sem);
2879 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2880 
2881 	btrfs_init_dev_replace_locks(fs_info);
2882 	btrfs_init_qgroup(fs_info);
2883 	btrfs_discard_init(fs_info);
2884 
2885 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2886 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2887 
2888 	init_waitqueue_head(&fs_info->transaction_throttle);
2889 	init_waitqueue_head(&fs_info->transaction_wait);
2890 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2891 	init_waitqueue_head(&fs_info->async_submit_wait);
2892 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2893 
2894 	/* Usable values until the real ones are cached from the superblock */
2895 	fs_info->nodesize = 4096;
2896 	fs_info->sectorsize = 4096;
2897 	fs_info->sectorsize_bits = ilog2(4096);
2898 	fs_info->stripesize = 4096;
2899 
2900 	/* Default compress algorithm when user does -o compress */
2901 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2902 
2903 	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2904 
2905 	spin_lock_init(&fs_info->swapfile_pins_lock);
2906 	fs_info->swapfile_pins = RB_ROOT;
2907 
2908 	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2909 	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2910 }
2911 
2912 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2913 {
2914 	int ret;
2915 
2916 	fs_info->sb = sb;
2917 	/* Temporary fixed values for block size until we read the superblock. */
2918 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2919 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2920 
2921 	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2922 	if (ret)
2923 		return ret;
2924 
2925 	ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2926 	if (ret)
2927 		return ret;
2928 
2929 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2930 	if (ret)
2931 		return ret;
2932 
2933 	ret = percpu_counter_init(&fs_info->stats_read_blocks, 0, GFP_KERNEL);
2934 	if (ret)
2935 		return ret;
2936 
2937 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2938 					(1 + ilog2(nr_cpu_ids));
2939 
2940 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2941 	if (ret)
2942 		return ret;
2943 
2944 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2945 			GFP_KERNEL);
2946 	if (ret)
2947 		return ret;
2948 
2949 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2950 					GFP_KERNEL);
2951 	if (!fs_info->delayed_root)
2952 		return -ENOMEM;
2953 	btrfs_init_delayed_root(fs_info->delayed_root);
2954 
2955 	if (sb_rdonly(sb))
2956 		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2957 	if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
2958 		set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
2959 
2960 	return btrfs_alloc_stripe_hash_table(fs_info);
2961 }
2962 
2963 static int btrfs_uuid_rescan_kthread(void *data)
2964 {
2965 	struct btrfs_fs_info *fs_info = data;
2966 	int ret;
2967 
2968 	/*
2969 	 * 1st step is to iterate through the existing UUID tree and
2970 	 * to delete all entries that contain outdated data.
2971 	 * 2nd step is to add all missing entries to the UUID tree.
2972 	 */
2973 	ret = btrfs_uuid_tree_iterate(fs_info);
2974 	if (ret < 0) {
2975 		if (ret != -EINTR)
2976 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2977 				   ret);
2978 		up(&fs_info->uuid_tree_rescan_sem);
2979 		return ret;
2980 	}
2981 	return btrfs_uuid_scan_kthread(data);
2982 }
2983 
2984 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2985 {
2986 	struct task_struct *task;
2987 
2988 	down(&fs_info->uuid_tree_rescan_sem);
2989 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2990 	if (IS_ERR(task)) {
2991 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2992 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2993 		up(&fs_info->uuid_tree_rescan_sem);
2994 		return PTR_ERR(task);
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3001 {
3002 	u64 root_objectid = 0;
3003 	struct btrfs_root *gang[8];
3004 	int ret = 0;
3005 
3006 	while (1) {
3007 		unsigned int found;
3008 
3009 		spin_lock(&fs_info->fs_roots_radix_lock);
3010 		found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3011 					     (void **)gang, root_objectid,
3012 					     ARRAY_SIZE(gang));
3013 		if (!found) {
3014 			spin_unlock(&fs_info->fs_roots_radix_lock);
3015 			break;
3016 		}
3017 		root_objectid = btrfs_root_id(gang[found - 1]) + 1;
3018 
3019 		for (int i = 0; i < found; i++) {
3020 			/* Avoid to grab roots in dead_roots. */
3021 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3022 				gang[i] = NULL;
3023 				continue;
3024 			}
3025 			/* Grab all the search result for later use. */
3026 			gang[i] = btrfs_grab_root(gang[i]);
3027 		}
3028 		spin_unlock(&fs_info->fs_roots_radix_lock);
3029 
3030 		for (int i = 0; i < found; i++) {
3031 			if (!gang[i])
3032 				continue;
3033 			root_objectid = btrfs_root_id(gang[i]);
3034 			/*
3035 			 * Continue to release the remaining roots after the first
3036 			 * error without cleanup and preserve the first error
3037 			 * for the return.
3038 			 */
3039 			if (!ret)
3040 				ret = btrfs_orphan_cleanup(gang[i]);
3041 			btrfs_put_root(gang[i]);
3042 		}
3043 		if (ret)
3044 			break;
3045 
3046 		root_objectid++;
3047 	}
3048 	return ret;
3049 }
3050 
3051 /*
3052  * Mounting logic specific to read-write file systems. Shared by open_ctree
3053  * and btrfs_remount when remounting from read-only to read-write.
3054  */
3055 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
3056 {
3057 	int ret;
3058 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
3059 	bool rebuild_free_space_tree = false;
3060 
3061 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3062 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3063 		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
3064 			btrfs_warn(fs_info,
3065 				   "'clear_cache' option is ignored with extent tree v2");
3066 		else
3067 			rebuild_free_space_tree = true;
3068 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3069 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3070 		btrfs_warn(fs_info, "free space tree is invalid");
3071 		rebuild_free_space_tree = true;
3072 	}
3073 
3074 	if (rebuild_free_space_tree) {
3075 		btrfs_info(fs_info, "rebuilding free space tree");
3076 		ret = btrfs_rebuild_free_space_tree(fs_info);
3077 		if (ret) {
3078 			btrfs_warn(fs_info,
3079 				   "failed to rebuild free space tree: %d", ret);
3080 			goto out;
3081 		}
3082 	}
3083 
3084 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3085 	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3086 		btrfs_info(fs_info, "disabling free space tree");
3087 		ret = btrfs_delete_free_space_tree(fs_info);
3088 		if (ret) {
3089 			btrfs_warn(fs_info,
3090 				   "failed to disable free space tree: %d", ret);
3091 			goto out;
3092 		}
3093 	}
3094 
3095 	/*
3096 	 * btrfs_find_orphan_roots() is responsible for finding all the dead
3097 	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3098 	 * them into the fs_info->fs_roots_radix tree. This must be done before
3099 	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3100 	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3101 	 * item before the root's tree is deleted - this means that if we unmount
3102 	 * or crash before the deletion completes, on the next mount we will not
3103 	 * delete what remains of the tree because the orphan item does not
3104 	 * exists anymore, which is what tells us we have a pending deletion.
3105 	 */
3106 	ret = btrfs_find_orphan_roots(fs_info);
3107 	if (ret)
3108 		goto out;
3109 
3110 	ret = btrfs_cleanup_fs_roots(fs_info);
3111 	if (ret)
3112 		goto out;
3113 
3114 	down_read(&fs_info->cleanup_work_sem);
3115 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3116 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3117 		up_read(&fs_info->cleanup_work_sem);
3118 		goto out;
3119 	}
3120 	up_read(&fs_info->cleanup_work_sem);
3121 
3122 	mutex_lock(&fs_info->cleaner_mutex);
3123 	ret = btrfs_recover_relocation(fs_info);
3124 	mutex_unlock(&fs_info->cleaner_mutex);
3125 	if (ret < 0) {
3126 		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3127 		goto out;
3128 	}
3129 
3130 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3131 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3132 		btrfs_info(fs_info, "creating free space tree");
3133 		ret = btrfs_create_free_space_tree(fs_info);
3134 		if (ret) {
3135 			btrfs_warn(fs_info,
3136 				"failed to create free space tree: %d", ret);
3137 			goto out;
3138 		}
3139 	}
3140 
3141 	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3142 		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3143 		if (ret)
3144 			goto out;
3145 	}
3146 
3147 	ret = btrfs_resume_balance_async(fs_info);
3148 	if (ret)
3149 		goto out;
3150 
3151 	ret = btrfs_resume_dev_replace_async(fs_info);
3152 	if (ret) {
3153 		btrfs_warn(fs_info, "failed to resume dev_replace");
3154 		goto out;
3155 	}
3156 
3157 	btrfs_qgroup_rescan_resume(fs_info);
3158 
3159 	if (!fs_info->uuid_root) {
3160 		btrfs_info(fs_info, "creating UUID tree");
3161 		ret = btrfs_create_uuid_tree(fs_info);
3162 		if (ret) {
3163 			btrfs_warn(fs_info,
3164 				   "failed to create the UUID tree %d", ret);
3165 			goto out;
3166 		}
3167 	}
3168 
3169 out:
3170 	return ret;
3171 }
3172 
3173 /*
3174  * Do various sanity and dependency checks of different features.
3175  *
3176  * @is_rw_mount:	If the mount is read-write.
3177  *
3178  * This is the place for less strict checks (like for subpage or artificial
3179  * feature dependencies).
3180  *
3181  * For strict checks or possible corruption detection, see
3182  * btrfs_validate_super().
3183  *
3184  * This should be called after btrfs_parse_options(), as some mount options
3185  * (space cache related) can modify on-disk format like free space tree and
3186  * screw up certain feature dependencies.
3187  */
3188 int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3189 {
3190 	struct btrfs_super_block *disk_super = fs_info->super_copy;
3191 	u64 incompat = btrfs_super_incompat_flags(disk_super);
3192 	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3193 	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3194 
3195 	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3196 		btrfs_err(fs_info,
3197 		"cannot mount because of unknown incompat features (0x%llx)",
3198 		    incompat);
3199 		return -EINVAL;
3200 	}
3201 
3202 	/* Runtime limitation for mixed block groups. */
3203 	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3204 	    (fs_info->sectorsize != fs_info->nodesize)) {
3205 		btrfs_err(fs_info,
3206 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3207 			fs_info->nodesize, fs_info->sectorsize);
3208 		return -EINVAL;
3209 	}
3210 
3211 	/* Mixed backref is an always-enabled feature. */
3212 	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3213 
3214 	/* Set compression related flags just in case. */
3215 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3216 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3217 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3218 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3219 
3220 	/*
3221 	 * An ancient flag, which should really be marked deprecated.
3222 	 * Such runtime limitation doesn't really need a incompat flag.
3223 	 */
3224 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3225 		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3226 
3227 	if (compat_ro_unsupp && is_rw_mount) {
3228 		btrfs_err(fs_info,
3229 	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3230 		       compat_ro);
3231 		return -EINVAL;
3232 	}
3233 
3234 	/*
3235 	 * We have unsupported RO compat features, although RO mounted, we
3236 	 * should not cause any metadata writes, including log replay.
3237 	 * Or we could screw up whatever the new feature requires.
3238 	 */
3239 	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3240 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3241 		btrfs_err(fs_info,
3242 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3243 			  compat_ro);
3244 		return -EINVAL;
3245 	}
3246 
3247 	/*
3248 	 * Artificial limitations for block group tree, to force
3249 	 * block-group-tree to rely on no-holes and free-space-tree.
3250 	 */
3251 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3252 	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3253 	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3254 		btrfs_err(fs_info,
3255 "block-group-tree feature requires no-holes and free-space-tree features");
3256 		return -EINVAL;
3257 	}
3258 
3259 	/*
3260 	 * Subpage runtime limitation on v1 cache.
3261 	 *
3262 	 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3263 	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3264 	 * going to be deprecated anyway.
3265 	 */
3266 	if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3267 		btrfs_warn(fs_info,
3268 	"v1 space cache is not supported for page size %lu with sectorsize %u",
3269 			   PAGE_SIZE, fs_info->sectorsize);
3270 		return -EINVAL;
3271 	}
3272 
3273 	/* This can be called by remount, we need to protect the super block. */
3274 	spin_lock(&fs_info->super_lock);
3275 	btrfs_set_super_incompat_flags(disk_super, incompat);
3276 	spin_unlock(&fs_info->super_lock);
3277 
3278 	return 0;
3279 }
3280 
3281 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
3282 {
3283 	u32 sectorsize;
3284 	u32 nodesize;
3285 	u32 stripesize;
3286 	u64 generation;
3287 	u16 csum_type;
3288 	struct btrfs_super_block *disk_super;
3289 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3290 	struct btrfs_root *tree_root;
3291 	struct btrfs_root *chunk_root;
3292 	int ret;
3293 	int level;
3294 
3295 	ret = init_mount_fs_info(fs_info, sb);
3296 	if (ret)
3297 		goto fail;
3298 
3299 	/* These need to be init'ed before we start creating inodes and such. */
3300 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3301 				     GFP_KERNEL);
3302 	fs_info->tree_root = tree_root;
3303 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3304 				      GFP_KERNEL);
3305 	fs_info->chunk_root = chunk_root;
3306 	if (!tree_root || !chunk_root) {
3307 		ret = -ENOMEM;
3308 		goto fail;
3309 	}
3310 
3311 	ret = btrfs_init_btree_inode(sb);
3312 	if (ret)
3313 		goto fail;
3314 
3315 	invalidate_bdev(fs_devices->latest_dev->bdev);
3316 
3317 	/*
3318 	 * Read super block and check the signature bytes only
3319 	 */
3320 	disk_super = btrfs_read_disk_super(fs_devices->latest_dev->bdev, 0, false);
3321 	if (IS_ERR(disk_super)) {
3322 		ret = PTR_ERR(disk_super);
3323 		goto fail_alloc;
3324 	}
3325 
3326 	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3327 	/*
3328 	 * Verify the type first, if that or the checksum value are
3329 	 * corrupted, we'll find out
3330 	 */
3331 	csum_type = btrfs_super_csum_type(disk_super);
3332 	if (!btrfs_supported_super_csum(csum_type)) {
3333 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3334 			  csum_type);
3335 		ret = -EINVAL;
3336 		btrfs_release_disk_super(disk_super);
3337 		goto fail_alloc;
3338 	}
3339 
3340 	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3341 
3342 	ret = btrfs_init_csum_hash(fs_info, csum_type);
3343 	if (ret) {
3344 		btrfs_release_disk_super(disk_super);
3345 		goto fail_alloc;
3346 	}
3347 
3348 	/*
3349 	 * We want to check superblock checksum, the type is stored inside.
3350 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3351 	 */
3352 	if (btrfs_check_super_csum(fs_info, disk_super)) {
3353 		btrfs_err(fs_info, "superblock checksum mismatch");
3354 		ret = -EINVAL;
3355 		btrfs_release_disk_super(disk_super);
3356 		goto fail_alloc;
3357 	}
3358 
3359 	/*
3360 	 * super_copy is zeroed at allocation time and we never touch the
3361 	 * following bytes up to INFO_SIZE, the checksum is calculated from
3362 	 * the whole block of INFO_SIZE
3363 	 */
3364 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3365 	btrfs_release_disk_super(disk_super);
3366 
3367 	disk_super = fs_info->super_copy;
3368 
3369 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3370 	       sizeof(*fs_info->super_for_commit));
3371 
3372 	ret = btrfs_validate_mount_super(fs_info);
3373 	if (ret) {
3374 		btrfs_err(fs_info, "superblock contains fatal errors");
3375 		ret = -EINVAL;
3376 		goto fail_alloc;
3377 	}
3378 
3379 	if (!btrfs_super_root(disk_super)) {
3380 		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3381 		ret = -EINVAL;
3382 		goto fail_alloc;
3383 	}
3384 
3385 	/* check FS state, whether FS is broken. */
3386 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3387 		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3388 
3389 	/* Set up fs_info before parsing mount options */
3390 	nodesize = btrfs_super_nodesize(disk_super);
3391 	sectorsize = btrfs_super_sectorsize(disk_super);
3392 	stripesize = sectorsize;
3393 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3394 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3395 
3396 	fs_info->nodesize = nodesize;
3397 	fs_info->nodesize_bits = ilog2(nodesize);
3398 	fs_info->sectorsize = sectorsize;
3399 	fs_info->sectorsize_bits = ilog2(sectorsize);
3400 	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3401 	fs_info->stripesize = stripesize;
3402 	fs_info->fs_devices->fs_info = fs_info;
3403 
3404 	/*
3405 	 * Handle the space caching options appropriately now that we have the
3406 	 * super block loaded and validated.
3407 	 */
3408 	btrfs_set_free_space_cache_settings(fs_info);
3409 
3410 	if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3411 		ret = -EINVAL;
3412 		goto fail_alloc;
3413 	}
3414 
3415 	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3416 	if (ret < 0)
3417 		goto fail_alloc;
3418 
3419 	/*
3420 	 * At this point our mount options are validated, if we set ->max_inline
3421 	 * to something non-standard make sure we truncate it to sectorsize.
3422 	 */
3423 	fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3424 
3425 	ret = btrfs_init_workqueues(fs_info);
3426 	if (ret)
3427 		goto fail_sb_buffer;
3428 
3429 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3430 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3431 
3432 	/* Update the values for the current filesystem. */
3433 	sb->s_blocksize = sectorsize;
3434 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3435 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3436 
3437 	mutex_lock(&fs_info->chunk_mutex);
3438 	ret = btrfs_read_sys_array(fs_info);
3439 	mutex_unlock(&fs_info->chunk_mutex);
3440 	if (ret) {
3441 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3442 		goto fail_sb_buffer;
3443 	}
3444 
3445 	generation = btrfs_super_chunk_root_generation(disk_super);
3446 	level = btrfs_super_chunk_root_level(disk_super);
3447 	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3448 			      generation, level);
3449 	if (ret) {
3450 		btrfs_err(fs_info, "failed to read chunk root");
3451 		goto fail_tree_roots;
3452 	}
3453 
3454 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3455 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3456 			   BTRFS_UUID_SIZE);
3457 
3458 	ret = btrfs_read_chunk_tree(fs_info);
3459 	if (ret) {
3460 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3461 		goto fail_tree_roots;
3462 	}
3463 
3464 	/*
3465 	 * At this point we know all the devices that make this filesystem,
3466 	 * including the seed devices but we don't know yet if the replace
3467 	 * target is required. So free devices that are not part of this
3468 	 * filesystem but skip the replace target device which is checked
3469 	 * below in btrfs_init_dev_replace().
3470 	 */
3471 	btrfs_free_extra_devids(fs_devices);
3472 	if (!fs_devices->latest_dev->bdev) {
3473 		btrfs_err(fs_info, "failed to read devices");
3474 		ret = -EIO;
3475 		goto fail_tree_roots;
3476 	}
3477 
3478 	ret = init_tree_roots(fs_info);
3479 	if (ret)
3480 		goto fail_tree_roots;
3481 
3482 	/*
3483 	 * Get zone type information of zoned block devices. This will also
3484 	 * handle emulation of a zoned filesystem if a regular device has the
3485 	 * zoned incompat feature flag set.
3486 	 */
3487 	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3488 	if (ret) {
3489 		btrfs_err(fs_info,
3490 			  "zoned: failed to read device zone info: %d", ret);
3491 		goto fail_block_groups;
3492 	}
3493 
3494 	/*
3495 	 * If we have a uuid root and we're not being told to rescan we need to
3496 	 * check the generation here so we can set the
3497 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3498 	 * transaction during a balance or the log replay without updating the
3499 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3500 	 * even though it was perfectly fine.
3501 	 */
3502 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3503 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3504 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3505 
3506 	ret = btrfs_verify_dev_extents(fs_info);
3507 	if (ret) {
3508 		btrfs_err(fs_info,
3509 			  "failed to verify dev extents against chunks: %d",
3510 			  ret);
3511 		goto fail_block_groups;
3512 	}
3513 	ret = btrfs_recover_balance(fs_info);
3514 	if (ret) {
3515 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3516 		goto fail_block_groups;
3517 	}
3518 
3519 	ret = btrfs_init_dev_stats(fs_info);
3520 	if (ret) {
3521 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3522 		goto fail_block_groups;
3523 	}
3524 
3525 	ret = btrfs_init_dev_replace(fs_info);
3526 	if (ret) {
3527 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3528 		goto fail_block_groups;
3529 	}
3530 
3531 	ret = btrfs_check_zoned_mode(fs_info);
3532 	if (ret) {
3533 		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3534 			  ret);
3535 		goto fail_block_groups;
3536 	}
3537 
3538 	ret = btrfs_sysfs_add_fsid(fs_devices);
3539 	if (ret) {
3540 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3541 				ret);
3542 		goto fail_block_groups;
3543 	}
3544 
3545 	ret = btrfs_sysfs_add_mounted(fs_info);
3546 	if (ret) {
3547 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3548 		goto fail_fsdev_sysfs;
3549 	}
3550 
3551 	ret = btrfs_init_space_info(fs_info);
3552 	if (ret) {
3553 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3554 		goto fail_sysfs;
3555 	}
3556 
3557 	ret = btrfs_read_block_groups(fs_info);
3558 	if (ret) {
3559 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3560 		goto fail_sysfs;
3561 	}
3562 
3563 	btrfs_zoned_reserve_data_reloc_bg(fs_info);
3564 	btrfs_free_zone_cache(fs_info);
3565 
3566 	btrfs_check_active_zone_reservation(fs_info);
3567 
3568 	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3569 	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3570 		btrfs_warn(fs_info,
3571 		"writable mount is not allowed due to too many missing devices");
3572 		ret = -EINVAL;
3573 		goto fail_sysfs;
3574 	}
3575 
3576 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3577 					       "btrfs-cleaner");
3578 	if (IS_ERR(fs_info->cleaner_kthread)) {
3579 		ret = PTR_ERR(fs_info->cleaner_kthread);
3580 		goto fail_sysfs;
3581 	}
3582 
3583 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3584 						   tree_root,
3585 						   "btrfs-transaction");
3586 	if (IS_ERR(fs_info->transaction_kthread)) {
3587 		ret = PTR_ERR(fs_info->transaction_kthread);
3588 		goto fail_cleaner;
3589 	}
3590 
3591 	ret = btrfs_read_qgroup_config(fs_info);
3592 	if (ret)
3593 		goto fail_trans_kthread;
3594 
3595 	if (btrfs_build_ref_tree(fs_info))
3596 		btrfs_err(fs_info, "couldn't build ref tree");
3597 
3598 	/* do not make disk changes in broken FS or nologreplay is given */
3599 	if (btrfs_super_log_root(disk_super) != 0 &&
3600 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3601 		btrfs_info(fs_info, "start tree-log replay");
3602 		ret = btrfs_replay_log(fs_info, fs_devices);
3603 		if (ret)
3604 			goto fail_qgroup;
3605 	}
3606 
3607 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3608 	if (IS_ERR(fs_info->fs_root)) {
3609 		ret = PTR_ERR(fs_info->fs_root);
3610 		btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3611 		fs_info->fs_root = NULL;
3612 		goto fail_qgroup;
3613 	}
3614 
3615 	if (sb_rdonly(sb))
3616 		return 0;
3617 
3618 	ret = btrfs_start_pre_rw_mount(fs_info);
3619 	if (ret) {
3620 		close_ctree(fs_info);
3621 		return ret;
3622 	}
3623 	btrfs_discard_resume(fs_info);
3624 
3625 	if (fs_info->uuid_root &&
3626 	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3627 	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3628 		btrfs_info(fs_info, "checking UUID tree");
3629 		ret = btrfs_check_uuid_tree(fs_info);
3630 		if (ret) {
3631 			btrfs_warn(fs_info,
3632 				"failed to check the UUID tree: %d", ret);
3633 			close_ctree(fs_info);
3634 			return ret;
3635 		}
3636 	}
3637 
3638 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3639 
3640 	/* Kick the cleaner thread so it'll start deleting snapshots. */
3641 	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3642 		wake_up_process(fs_info->cleaner_kthread);
3643 
3644 	return 0;
3645 
3646 fail_qgroup:
3647 	btrfs_free_qgroup_config(fs_info);
3648 fail_trans_kthread:
3649 	kthread_stop(fs_info->transaction_kthread);
3650 	btrfs_cleanup_transaction(fs_info);
3651 	btrfs_free_fs_roots(fs_info);
3652 fail_cleaner:
3653 	kthread_stop(fs_info->cleaner_kthread);
3654 
3655 	/*
3656 	 * make sure we're done with the btree inode before we stop our
3657 	 * kthreads
3658 	 */
3659 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3660 
3661 fail_sysfs:
3662 	btrfs_sysfs_remove_mounted(fs_info);
3663 
3664 fail_fsdev_sysfs:
3665 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3666 
3667 fail_block_groups:
3668 	btrfs_put_block_group_cache(fs_info);
3669 
3670 fail_tree_roots:
3671 	if (fs_info->data_reloc_root)
3672 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3673 	free_root_pointers(fs_info, true);
3674 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3675 
3676 fail_sb_buffer:
3677 	btrfs_stop_all_workers(fs_info);
3678 	btrfs_free_block_groups(fs_info);
3679 fail_alloc:
3680 	btrfs_mapping_tree_free(fs_info);
3681 
3682 	iput(fs_info->btree_inode);
3683 fail:
3684 	ASSERT(ret < 0);
3685 	return ret;
3686 }
3687 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3688 
3689 static void btrfs_end_super_write(struct bio *bio)
3690 {
3691 	struct btrfs_device *device = bio->bi_private;
3692 	struct folio_iter fi;
3693 
3694 	bio_for_each_folio_all(fi, bio) {
3695 		if (bio->bi_status) {
3696 			btrfs_warn_rl(device->fs_info,
3697 				"lost super block write due to IO error on %s (%d)",
3698 				btrfs_dev_name(device),
3699 				blk_status_to_errno(bio->bi_status));
3700 			btrfs_dev_stat_inc_and_print(device,
3701 						     BTRFS_DEV_STAT_WRITE_ERRS);
3702 			/* Ensure failure if the primary sb fails. */
3703 			if (bio->bi_opf & REQ_FUA)
3704 				atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3705 					   &device->sb_write_errors);
3706 			else
3707 				atomic_inc(&device->sb_write_errors);
3708 		}
3709 		folio_unlock(fi.folio);
3710 		folio_put(fi.folio);
3711 	}
3712 
3713 	bio_put(bio);
3714 }
3715 
3716 /*
3717  * Write superblock @sb to the @device. Do not wait for completion, all the
3718  * folios we use for writing are locked.
3719  *
3720  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3721  * the expected device size at commit time. Note that max_mirrors must be
3722  * same for write and wait phases.
3723  *
3724  * Return number of errors when folio is not found or submission fails.
3725  */
3726 static int write_dev_supers(struct btrfs_device *device,
3727 			    struct btrfs_super_block *sb, int max_mirrors)
3728 {
3729 	struct btrfs_fs_info *fs_info = device->fs_info;
3730 	struct address_space *mapping = device->bdev->bd_mapping;
3731 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3732 	int i;
3733 	int ret;
3734 	u64 bytenr, bytenr_orig;
3735 
3736 	atomic_set(&device->sb_write_errors, 0);
3737 
3738 	if (max_mirrors == 0)
3739 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3740 
3741 	shash->tfm = fs_info->csum_shash;
3742 
3743 	for (i = 0; i < max_mirrors; i++) {
3744 		struct folio *folio;
3745 		struct bio *bio;
3746 		struct btrfs_super_block *disk_super;
3747 		size_t offset;
3748 
3749 		bytenr_orig = btrfs_sb_offset(i);
3750 		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3751 		if (ret == -ENOENT) {
3752 			continue;
3753 		} else if (ret < 0) {
3754 			btrfs_err(device->fs_info,
3755 			  "couldn't get super block location for mirror %d error %d",
3756 			  i, ret);
3757 			atomic_inc(&device->sb_write_errors);
3758 			continue;
3759 		}
3760 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3761 		    device->commit_total_bytes)
3762 			break;
3763 
3764 		btrfs_set_super_bytenr(sb, bytenr_orig);
3765 
3766 		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3767 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3768 				    sb->csum);
3769 
3770 		folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3771 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3772 					    GFP_NOFS);
3773 		if (IS_ERR(folio)) {
3774 			btrfs_err(device->fs_info,
3775 			  "couldn't get super block page for bytenr %llu error %ld",
3776 			  bytenr, PTR_ERR(folio));
3777 			atomic_inc(&device->sb_write_errors);
3778 			continue;
3779 		}
3780 
3781 		offset = offset_in_folio(folio, bytenr);
3782 		disk_super = folio_address(folio) + offset;
3783 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3784 
3785 		/*
3786 		 * Directly use bios here instead of relying on the page cache
3787 		 * to do I/O, so we don't lose the ability to do integrity
3788 		 * checking.
3789 		 */
3790 		bio = bio_alloc(device->bdev, 1,
3791 				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3792 				GFP_NOFS);
3793 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3794 		bio->bi_private = device;
3795 		bio->bi_end_io = btrfs_end_super_write;
3796 		bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
3797 
3798 		/*
3799 		 * We FUA only the first super block.  The others we allow to
3800 		 * go down lazy and there's a short window where the on-disk
3801 		 * copies might still contain the older version.
3802 		 */
3803 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3804 			bio->bi_opf |= REQ_FUA;
3805 		submit_bio(bio);
3806 
3807 		if (btrfs_advance_sb_log(device, i))
3808 			atomic_inc(&device->sb_write_errors);
3809 	}
3810 	return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3811 }
3812 
3813 /*
3814  * Wait for write completion of superblocks done by write_dev_supers,
3815  * @max_mirrors same for write and wait phases.
3816  *
3817  * Return -1 if primary super block write failed or when there were no super block
3818  * copies written. Otherwise 0.
3819  */
3820 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3821 {
3822 	int i;
3823 	int errors = 0;
3824 	bool primary_failed = false;
3825 	int ret;
3826 	u64 bytenr;
3827 
3828 	if (max_mirrors == 0)
3829 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3830 
3831 	for (i = 0; i < max_mirrors; i++) {
3832 		struct folio *folio;
3833 
3834 		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3835 		if (ret == -ENOENT) {
3836 			break;
3837 		} else if (ret < 0) {
3838 			errors++;
3839 			if (i == 0)
3840 				primary_failed = true;
3841 			continue;
3842 		}
3843 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3844 		    device->commit_total_bytes)
3845 			break;
3846 
3847 		folio = filemap_get_folio(device->bdev->bd_mapping,
3848 					  bytenr >> PAGE_SHIFT);
3849 		/* If the folio has been removed, then we know it completed. */
3850 		if (IS_ERR(folio))
3851 			continue;
3852 
3853 		/* Folio will be unlocked once the write completes. */
3854 		folio_wait_locked(folio);
3855 		folio_put(folio);
3856 	}
3857 
3858 	errors += atomic_read(&device->sb_write_errors);
3859 	if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3860 		primary_failed = true;
3861 	if (primary_failed) {
3862 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3863 			  device->devid);
3864 		return -1;
3865 	}
3866 
3867 	return errors < i ? 0 : -1;
3868 }
3869 
3870 /*
3871  * endio for the write_dev_flush, this will wake anyone waiting
3872  * for the barrier when it is done
3873  */
3874 static void btrfs_end_empty_barrier(struct bio *bio)
3875 {
3876 	bio_uninit(bio);
3877 	complete(bio->bi_private);
3878 }
3879 
3880 /*
3881  * Submit a flush request to the device if it supports it. Error handling is
3882  * done in the waiting counterpart.
3883  */
3884 static void write_dev_flush(struct btrfs_device *device)
3885 {
3886 	struct bio *bio = &device->flush_bio;
3887 
3888 	device->last_flush_error = BLK_STS_OK;
3889 
3890 	bio_init(bio, device->bdev, NULL, 0,
3891 		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3892 	bio->bi_end_io = btrfs_end_empty_barrier;
3893 	init_completion(&device->flush_wait);
3894 	bio->bi_private = &device->flush_wait;
3895 	submit_bio(bio);
3896 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3897 }
3898 
3899 /*
3900  * If the flush bio has been submitted by write_dev_flush, wait for it.
3901  * Return true for any error, and false otherwise.
3902  */
3903 static bool wait_dev_flush(struct btrfs_device *device)
3904 {
3905 	struct bio *bio = &device->flush_bio;
3906 
3907 	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3908 		return false;
3909 
3910 	wait_for_completion_io(&device->flush_wait);
3911 
3912 	if (bio->bi_status) {
3913 		device->last_flush_error = bio->bi_status;
3914 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3915 		return true;
3916 	}
3917 
3918 	return false;
3919 }
3920 
3921 /*
3922  * send an empty flush down to each device in parallel,
3923  * then wait for them
3924  */
3925 static int barrier_all_devices(struct btrfs_fs_info *info)
3926 {
3927 	struct list_head *head;
3928 	struct btrfs_device *dev;
3929 	int errors_wait = 0;
3930 
3931 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3932 	/* send down all the barriers */
3933 	head = &info->fs_devices->devices;
3934 	list_for_each_entry(dev, head, dev_list) {
3935 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3936 			continue;
3937 		if (!dev->bdev)
3938 			continue;
3939 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3940 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3941 			continue;
3942 
3943 		write_dev_flush(dev);
3944 	}
3945 
3946 	/* wait for all the barriers */
3947 	list_for_each_entry(dev, head, dev_list) {
3948 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3949 			continue;
3950 		if (!dev->bdev) {
3951 			errors_wait++;
3952 			continue;
3953 		}
3954 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3955 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3956 			continue;
3957 
3958 		if (wait_dev_flush(dev))
3959 			errors_wait++;
3960 	}
3961 
3962 	/*
3963 	 * Checks last_flush_error of disks in order to determine the device
3964 	 * state.
3965 	 */
3966 	if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3967 		return -EIO;
3968 
3969 	return 0;
3970 }
3971 
3972 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3973 {
3974 	int raid_type;
3975 	int min_tolerated = INT_MAX;
3976 
3977 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3978 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3979 		min_tolerated = min_t(int, min_tolerated,
3980 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3981 				    tolerated_failures);
3982 
3983 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3984 		if (raid_type == BTRFS_RAID_SINGLE)
3985 			continue;
3986 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3987 			continue;
3988 		min_tolerated = min_t(int, min_tolerated,
3989 				    btrfs_raid_array[raid_type].
3990 				    tolerated_failures);
3991 	}
3992 
3993 	if (min_tolerated == INT_MAX) {
3994 		btrfs_warn(NULL, "unknown raid flag: %llu", flags);
3995 		min_tolerated = 0;
3996 	}
3997 
3998 	return min_tolerated;
3999 }
4000 
4001 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4002 {
4003 	struct list_head *head;
4004 	struct btrfs_device *dev;
4005 	struct btrfs_super_block *sb;
4006 	struct btrfs_dev_item *dev_item;
4007 	int ret;
4008 	int do_barriers;
4009 	int max_errors;
4010 	int total_errors = 0;
4011 	u64 flags;
4012 
4013 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4014 
4015 	/*
4016 	 * max_mirrors == 0 indicates we're from commit_transaction,
4017 	 * not from fsync where the tree roots in fs_info have not
4018 	 * been consistent on disk.
4019 	 */
4020 	if (max_mirrors == 0)
4021 		backup_super_roots(fs_info);
4022 
4023 	sb = fs_info->super_for_commit;
4024 	dev_item = &sb->dev_item;
4025 
4026 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4027 	head = &fs_info->fs_devices->devices;
4028 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4029 
4030 	if (do_barriers) {
4031 		ret = barrier_all_devices(fs_info);
4032 		if (ret) {
4033 			mutex_unlock(
4034 				&fs_info->fs_devices->device_list_mutex);
4035 			btrfs_handle_fs_error(fs_info, ret,
4036 					      "errors while submitting device barriers.");
4037 			return ret;
4038 		}
4039 	}
4040 
4041 	list_for_each_entry(dev, head, dev_list) {
4042 		if (!dev->bdev) {
4043 			total_errors++;
4044 			continue;
4045 		}
4046 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4047 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4048 			continue;
4049 
4050 		btrfs_set_stack_device_generation(dev_item, 0);
4051 		btrfs_set_stack_device_type(dev_item, dev->type);
4052 		btrfs_set_stack_device_id(dev_item, dev->devid);
4053 		btrfs_set_stack_device_total_bytes(dev_item,
4054 						   dev->commit_total_bytes);
4055 		btrfs_set_stack_device_bytes_used(dev_item,
4056 						  dev->commit_bytes_used);
4057 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4058 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4059 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4060 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4061 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4062 		       BTRFS_FSID_SIZE);
4063 
4064 		flags = btrfs_super_flags(sb);
4065 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4066 
4067 		ret = btrfs_validate_write_super(fs_info, sb);
4068 		if (ret < 0) {
4069 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4070 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4071 				"unexpected superblock corruption detected");
4072 			return -EUCLEAN;
4073 		}
4074 
4075 		ret = write_dev_supers(dev, sb, max_mirrors);
4076 		if (ret)
4077 			total_errors++;
4078 	}
4079 	if (total_errors > max_errors) {
4080 		btrfs_err(fs_info, "%d errors while writing supers",
4081 			  total_errors);
4082 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4083 
4084 		/* FUA is masked off if unsupported and can't be the reason */
4085 		btrfs_handle_fs_error(fs_info, -EIO,
4086 				      "%d errors while writing supers",
4087 				      total_errors);
4088 		return -EIO;
4089 	}
4090 
4091 	total_errors = 0;
4092 	list_for_each_entry(dev, head, dev_list) {
4093 		if (!dev->bdev)
4094 			continue;
4095 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4096 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4097 			continue;
4098 
4099 		ret = wait_dev_supers(dev, max_mirrors);
4100 		if (ret)
4101 			total_errors++;
4102 	}
4103 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4104 	if (total_errors > max_errors) {
4105 		btrfs_handle_fs_error(fs_info, -EIO,
4106 				      "%d errors while writing supers",
4107 				      total_errors);
4108 		return -EIO;
4109 	}
4110 	return 0;
4111 }
4112 
4113 /* Drop a fs root from the radix tree and free it. */
4114 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4115 				  struct btrfs_root *root)
4116 {
4117 	bool drop_ref = false;
4118 
4119 	spin_lock(&fs_info->fs_roots_radix_lock);
4120 	radix_tree_delete(&fs_info->fs_roots_radix,
4121 			  (unsigned long)btrfs_root_id(root));
4122 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4123 		drop_ref = true;
4124 	spin_unlock(&fs_info->fs_roots_radix_lock);
4125 
4126 	if (BTRFS_FS_ERROR(fs_info)) {
4127 		ASSERT(root->log_root == NULL);
4128 		if (root->reloc_root) {
4129 			btrfs_put_root(root->reloc_root);
4130 			root->reloc_root = NULL;
4131 		}
4132 	}
4133 
4134 	if (drop_ref)
4135 		btrfs_put_root(root);
4136 }
4137 
4138 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4139 {
4140 	mutex_lock(&fs_info->cleaner_mutex);
4141 	btrfs_run_delayed_iputs(fs_info);
4142 	mutex_unlock(&fs_info->cleaner_mutex);
4143 	wake_up_process(fs_info->cleaner_kthread);
4144 
4145 	/* wait until ongoing cleanup work done */
4146 	down_write(&fs_info->cleanup_work_sem);
4147 	up_write(&fs_info->cleanup_work_sem);
4148 
4149 	return btrfs_commit_current_transaction(fs_info->tree_root);
4150 }
4151 
4152 static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4153 {
4154 	struct btrfs_transaction *trans;
4155 	struct btrfs_transaction *tmp;
4156 	bool found = false;
4157 
4158 	/*
4159 	 * This function is only called at the very end of close_ctree(),
4160 	 * thus no other running transaction, no need to take trans_lock.
4161 	 */
4162 	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4163 	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4164 		struct extent_state *cached = NULL;
4165 		u64 dirty_bytes = 0;
4166 		u64 cur = 0;
4167 		u64 found_start;
4168 		u64 found_end;
4169 
4170 		found = true;
4171 		while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur,
4172 						   &found_start, &found_end,
4173 						   EXTENT_DIRTY, &cached)) {
4174 			dirty_bytes += found_end + 1 - found_start;
4175 			cur = found_end + 1;
4176 		}
4177 		btrfs_warn(fs_info,
4178 	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4179 			   trans->transid, dirty_bytes);
4180 		btrfs_cleanup_one_transaction(trans);
4181 
4182 		if (trans == fs_info->running_transaction)
4183 			fs_info->running_transaction = NULL;
4184 		list_del_init(&trans->list);
4185 
4186 		btrfs_put_transaction(trans);
4187 		trace_btrfs_transaction_commit(fs_info);
4188 	}
4189 	ASSERT(!found);
4190 }
4191 
4192 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4193 {
4194 	int ret;
4195 
4196 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4197 
4198 	/*
4199 	 * If we had UNFINISHED_DROPS we could still be processing them, so
4200 	 * clear that bit and wake up relocation so it can stop.
4201 	 * We must do this before stopping the block group reclaim task, because
4202 	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4203 	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4204 	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4205 	 * return 1.
4206 	 */
4207 	btrfs_wake_unfinished_drop(fs_info);
4208 
4209 	/*
4210 	 * We may have the reclaim task running and relocating a data block group,
4211 	 * in which case it may create delayed iputs. So stop it before we park
4212 	 * the cleaner kthread otherwise we can get new delayed iputs after
4213 	 * parking the cleaner, and that can make the async reclaim task to hang
4214 	 * if it's waiting for delayed iputs to complete, since the cleaner is
4215 	 * parked and can not run delayed iputs - this will make us hang when
4216 	 * trying to stop the async reclaim task.
4217 	 */
4218 	cancel_work_sync(&fs_info->reclaim_bgs_work);
4219 	/*
4220 	 * We don't want the cleaner to start new transactions, add more delayed
4221 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4222 	 * because that frees the task_struct, and the transaction kthread might
4223 	 * still try to wake up the cleaner.
4224 	 */
4225 	kthread_park(fs_info->cleaner_kthread);
4226 
4227 	/* wait for the qgroup rescan worker to stop */
4228 	btrfs_qgroup_wait_for_completion(fs_info, false);
4229 
4230 	/* wait for the uuid_scan task to finish */
4231 	down(&fs_info->uuid_tree_rescan_sem);
4232 	/* avoid complains from lockdep et al., set sem back to initial state */
4233 	up(&fs_info->uuid_tree_rescan_sem);
4234 
4235 	/* pause restriper - we want to resume on mount */
4236 	btrfs_pause_balance(fs_info);
4237 
4238 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4239 
4240 	btrfs_scrub_cancel(fs_info);
4241 
4242 	/* wait for any defraggers to finish */
4243 	wait_event(fs_info->transaction_wait,
4244 		   (atomic_read(&fs_info->defrag_running) == 0));
4245 
4246 	/* clear out the rbtree of defraggable inodes */
4247 	btrfs_cleanup_defrag_inodes(fs_info);
4248 
4249 	/*
4250 	 * Handle the error fs first, as it will flush and wait for all ordered
4251 	 * extents.  This will generate delayed iputs, thus we want to handle
4252 	 * it first.
4253 	 */
4254 	if (unlikely(BTRFS_FS_ERROR(fs_info)))
4255 		btrfs_error_commit_super(fs_info);
4256 
4257 	/*
4258 	 * Wait for any fixup workers to complete.
4259 	 * If we don't wait for them here and they are still running by the time
4260 	 * we call kthread_stop() against the cleaner kthread further below, we
4261 	 * get an use-after-free on the cleaner because the fixup worker adds an
4262 	 * inode to the list of delayed iputs and then attempts to wakeup the
4263 	 * cleaner kthread, which was already stopped and destroyed. We parked
4264 	 * already the cleaner, but below we run all pending delayed iputs.
4265 	 */
4266 	btrfs_flush_workqueue(fs_info->fixup_workers);
4267 	/*
4268 	 * Similar case here, we have to wait for delalloc workers before we
4269 	 * proceed below and stop the cleaner kthread, otherwise we trigger a
4270 	 * use-after-tree on the cleaner kthread task_struct when a delalloc
4271 	 * worker running submit_compressed_extents() adds a delayed iput, which
4272 	 * does a wake up on the cleaner kthread, which was already freed below
4273 	 * when we call kthread_stop().
4274 	 */
4275 	btrfs_flush_workqueue(fs_info->delalloc_workers);
4276 
4277 	/*
4278 	 * We can have ordered extents getting their last reference dropped from
4279 	 * the fs_info->workers queue because for async writes for data bios we
4280 	 * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
4281 	 * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
4282 	 * has an error, and that later function can do the final
4283 	 * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
4284 	 * which adds a delayed iput for the inode. So we must flush the queue
4285 	 * so that we don't have delayed iputs after committing the current
4286 	 * transaction below and stopping the cleaner and transaction kthreads.
4287 	 */
4288 	btrfs_flush_workqueue(fs_info->workers);
4289 
4290 	/*
4291 	 * When finishing a compressed write bio we schedule a work queue item
4292 	 * to finish an ordered extent - btrfs_finish_compressed_write_work()
4293 	 * calls btrfs_finish_ordered_extent() which in turns does a call to
4294 	 * btrfs_queue_ordered_fn(), and that queues the ordered extent
4295 	 * completion either in the endio_write_workers work queue or in the
4296 	 * fs_info->endio_freespace_worker work queue. We flush those queues
4297 	 * below, so before we flush them we must flush this queue for the
4298 	 * workers of compressed writes.
4299 	 */
4300 	flush_workqueue(fs_info->compressed_write_workers);
4301 
4302 	/*
4303 	 * After we parked the cleaner kthread, ordered extents may have
4304 	 * completed and created new delayed iputs. If one of the async reclaim
4305 	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4306 	 * can hang forever trying to stop it, because if a delayed iput is
4307 	 * added after it ran btrfs_run_delayed_iputs() and before it called
4308 	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4309 	 * no one else to run iputs.
4310 	 *
4311 	 * So wait for all ongoing ordered extents to complete and then run
4312 	 * delayed iputs. This works because once we reach this point no one
4313 	 * can create new ordered extents, but delayed iputs can still be added
4314 	 * by a reclaim worker (see comments further below).
4315 	 *
4316 	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4317 	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4318 	 * but the delayed iput for the respective inode is made only when doing
4319 	 * the final btrfs_put_ordered_extent() (which must happen at
4320 	 * btrfs_finish_ordered_io() when we are unmounting).
4321 	 */
4322 	btrfs_flush_workqueue(fs_info->endio_write_workers);
4323 	/* Ordered extents for free space inodes. */
4324 	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4325 	/*
4326 	 * Run delayed iputs in case an async reclaim worker is waiting for them
4327 	 * to be run as mentioned above.
4328 	 */
4329 	btrfs_run_delayed_iputs(fs_info);
4330 
4331 	cancel_work_sync(&fs_info->async_reclaim_work);
4332 	cancel_work_sync(&fs_info->async_data_reclaim_work);
4333 	cancel_work_sync(&fs_info->preempt_reclaim_work);
4334 	cancel_work_sync(&fs_info->em_shrinker_work);
4335 
4336 	/*
4337 	 * Run delayed iputs again because an async reclaim worker may have
4338 	 * added new ones if it was flushing delalloc:
4339 	 *
4340 	 * shrink_delalloc() -> btrfs_start_delalloc_roots() ->
4341 	 *    start_delalloc_inodes() -> btrfs_add_delayed_iput()
4342 	 */
4343 	btrfs_run_delayed_iputs(fs_info);
4344 
4345 	/* There should be no more workload to generate new delayed iputs. */
4346 	set_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state);
4347 
4348 	/* Cancel or finish ongoing discard work */
4349 	btrfs_discard_cleanup(fs_info);
4350 
4351 	if (!sb_rdonly(fs_info->sb)) {
4352 		/*
4353 		 * The cleaner kthread is stopped, so do one final pass over
4354 		 * unused block groups.
4355 		 */
4356 		btrfs_delete_unused_bgs(fs_info);
4357 
4358 		/*
4359 		 * There might be existing delayed inode workers still running
4360 		 * and holding an empty delayed inode item. We must wait for
4361 		 * them to complete first because they can create a transaction.
4362 		 * This happens when someone calls btrfs_balance_delayed_items()
4363 		 * and then a transaction commit runs the same delayed nodes
4364 		 * before any delayed worker has done something with the nodes.
4365 		 * We must wait for any worker here and not at transaction
4366 		 * commit time since that could cause a deadlock.
4367 		 * This is a very rare case.
4368 		 */
4369 		btrfs_flush_workqueue(fs_info->delayed_workers);
4370 
4371 		ret = btrfs_commit_super(fs_info);
4372 		if (ret)
4373 			btrfs_err(fs_info, "commit super ret %d", ret);
4374 	}
4375 
4376 	kthread_stop(fs_info->transaction_kthread);
4377 	kthread_stop(fs_info->cleaner_kthread);
4378 
4379 	ASSERT(list_empty(&fs_info->delayed_iputs));
4380 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4381 
4382 	if (btrfs_check_quota_leak(fs_info)) {
4383 		DEBUG_WARN("qgroup reserved space leaked");
4384 		btrfs_err(fs_info, "qgroup reserved space leaked");
4385 	}
4386 
4387 	btrfs_free_qgroup_config(fs_info);
4388 	ASSERT(list_empty(&fs_info->delalloc_roots));
4389 
4390 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4391 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4392 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4393 	}
4394 
4395 	if (percpu_counter_sum(&fs_info->ordered_bytes))
4396 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4397 			   percpu_counter_sum(&fs_info->ordered_bytes));
4398 
4399 	btrfs_sysfs_remove_mounted(fs_info);
4400 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4401 
4402 	btrfs_put_block_group_cache(fs_info);
4403 
4404 	/*
4405 	 * we must make sure there is not any read request to
4406 	 * submit after we stopping all workers.
4407 	 */
4408 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4409 	btrfs_stop_all_workers(fs_info);
4410 
4411 	/* We shouldn't have any transaction open at this point */
4412 	warn_about_uncommitted_trans(fs_info);
4413 
4414 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4415 	free_root_pointers(fs_info, true);
4416 	btrfs_free_fs_roots(fs_info);
4417 
4418 	/*
4419 	 * We must free the block groups after dropping the fs_roots as we could
4420 	 * have had an IO error and have left over tree log blocks that aren't
4421 	 * cleaned up until the fs roots are freed.  This makes the block group
4422 	 * accounting appear to be wrong because there's pending reserved bytes,
4423 	 * so make sure we do the block group cleanup afterwards.
4424 	 */
4425 	btrfs_free_block_groups(fs_info);
4426 
4427 	iput(fs_info->btree_inode);
4428 
4429 	btrfs_mapping_tree_free(fs_info);
4430 }
4431 
4432 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4433 			     struct extent_buffer *buf)
4434 {
4435 	struct btrfs_fs_info *fs_info = buf->fs_info;
4436 	u64 transid = btrfs_header_generation(buf);
4437 
4438 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4439 	/*
4440 	 * This is a fast path so only do this check if we have sanity tests
4441 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4442 	 * outside of the sanity tests.
4443 	 */
4444 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4445 		return;
4446 #endif
4447 	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4448 	ASSERT(trans->transid == fs_info->generation);
4449 	btrfs_assert_tree_write_locked(buf);
4450 	if (unlikely(transid != fs_info->generation)) {
4451 		btrfs_abort_transaction(trans, -EUCLEAN);
4452 		btrfs_crit(fs_info,
4453 "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4454 			   buf->start, transid, fs_info->generation);
4455 	}
4456 	set_extent_buffer_dirty(buf);
4457 }
4458 
4459 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4460 					int flush_delayed)
4461 {
4462 	/*
4463 	 * looks as though older kernels can get into trouble with
4464 	 * this code, they end up stuck in balance_dirty_pages forever
4465 	 */
4466 	int ret;
4467 
4468 	if (current->flags & PF_MEMALLOC)
4469 		return;
4470 
4471 	if (flush_delayed)
4472 		btrfs_balance_delayed_items(fs_info);
4473 
4474 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4475 				     BTRFS_DIRTY_METADATA_THRESH,
4476 				     fs_info->dirty_metadata_batch);
4477 	if (ret > 0) {
4478 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4479 	}
4480 }
4481 
4482 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4483 {
4484 	__btrfs_btree_balance_dirty(fs_info, 1);
4485 }
4486 
4487 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4488 {
4489 	__btrfs_btree_balance_dirty(fs_info, 0);
4490 }
4491 
4492 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4493 {
4494 	/* cleanup FS via transaction */
4495 	btrfs_cleanup_transaction(fs_info);
4496 
4497 	down_write(&fs_info->cleanup_work_sem);
4498 	up_write(&fs_info->cleanup_work_sem);
4499 }
4500 
4501 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4502 {
4503 	struct btrfs_root *gang[8];
4504 	u64 root_objectid = 0;
4505 	int ret;
4506 
4507 	spin_lock(&fs_info->fs_roots_radix_lock);
4508 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4509 					     (void **)gang, root_objectid,
4510 					     ARRAY_SIZE(gang))) != 0) {
4511 		int i;
4512 
4513 		for (i = 0; i < ret; i++)
4514 			gang[i] = btrfs_grab_root(gang[i]);
4515 		spin_unlock(&fs_info->fs_roots_radix_lock);
4516 
4517 		for (i = 0; i < ret; i++) {
4518 			if (!gang[i])
4519 				continue;
4520 			root_objectid = btrfs_root_id(gang[i]);
4521 			btrfs_free_log(NULL, gang[i]);
4522 			btrfs_put_root(gang[i]);
4523 		}
4524 		root_objectid++;
4525 		spin_lock(&fs_info->fs_roots_radix_lock);
4526 	}
4527 	spin_unlock(&fs_info->fs_roots_radix_lock);
4528 	btrfs_free_log_root_tree(NULL, fs_info);
4529 }
4530 
4531 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4532 {
4533 	struct btrfs_ordered_extent *ordered;
4534 
4535 	spin_lock(&root->ordered_extent_lock);
4536 	/*
4537 	 * This will just short circuit the ordered completion stuff which will
4538 	 * make sure the ordered extent gets properly cleaned up.
4539 	 */
4540 	list_for_each_entry(ordered, &root->ordered_extents,
4541 			    root_extent_list)
4542 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4543 	spin_unlock(&root->ordered_extent_lock);
4544 }
4545 
4546 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4547 {
4548 	struct btrfs_root *root;
4549 	LIST_HEAD(splice);
4550 
4551 	spin_lock(&fs_info->ordered_root_lock);
4552 	list_splice_init(&fs_info->ordered_roots, &splice);
4553 	while (!list_empty(&splice)) {
4554 		root = list_first_entry(&splice, struct btrfs_root,
4555 					ordered_root);
4556 		list_move_tail(&root->ordered_root,
4557 			       &fs_info->ordered_roots);
4558 
4559 		spin_unlock(&fs_info->ordered_root_lock);
4560 		btrfs_destroy_ordered_extents(root);
4561 
4562 		cond_resched();
4563 		spin_lock(&fs_info->ordered_root_lock);
4564 	}
4565 	spin_unlock(&fs_info->ordered_root_lock);
4566 
4567 	/*
4568 	 * We need this here because if we've been flipped read-only we won't
4569 	 * get sync() from the umount, so we need to make sure any ordered
4570 	 * extents that haven't had their dirty pages IO start writeout yet
4571 	 * actually get run and error out properly.
4572 	 */
4573 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
4574 }
4575 
4576 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4577 {
4578 	struct btrfs_inode *btrfs_inode;
4579 	LIST_HEAD(splice);
4580 
4581 	spin_lock(&root->delalloc_lock);
4582 	list_splice_init(&root->delalloc_inodes, &splice);
4583 
4584 	while (!list_empty(&splice)) {
4585 		struct inode *inode = NULL;
4586 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4587 					       delalloc_inodes);
4588 		btrfs_del_delalloc_inode(btrfs_inode);
4589 		spin_unlock(&root->delalloc_lock);
4590 
4591 		/*
4592 		 * Make sure we get a live inode and that it'll not disappear
4593 		 * meanwhile.
4594 		 */
4595 		inode = igrab(&btrfs_inode->vfs_inode);
4596 		if (inode) {
4597 			unsigned int nofs_flag;
4598 
4599 			nofs_flag = memalloc_nofs_save();
4600 			invalidate_inode_pages2(inode->i_mapping);
4601 			memalloc_nofs_restore(nofs_flag);
4602 			iput(inode);
4603 		}
4604 		spin_lock(&root->delalloc_lock);
4605 	}
4606 	spin_unlock(&root->delalloc_lock);
4607 }
4608 
4609 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4610 {
4611 	struct btrfs_root *root;
4612 	LIST_HEAD(splice);
4613 
4614 	spin_lock(&fs_info->delalloc_root_lock);
4615 	list_splice_init(&fs_info->delalloc_roots, &splice);
4616 	while (!list_empty(&splice)) {
4617 		root = list_first_entry(&splice, struct btrfs_root,
4618 					 delalloc_root);
4619 		root = btrfs_grab_root(root);
4620 		BUG_ON(!root);
4621 		spin_unlock(&fs_info->delalloc_root_lock);
4622 
4623 		btrfs_destroy_delalloc_inodes(root);
4624 		btrfs_put_root(root);
4625 
4626 		spin_lock(&fs_info->delalloc_root_lock);
4627 	}
4628 	spin_unlock(&fs_info->delalloc_root_lock);
4629 }
4630 
4631 static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4632 					 struct extent_io_tree *dirty_pages,
4633 					 int mark)
4634 {
4635 	struct extent_buffer *eb;
4636 	u64 start = 0;
4637 	u64 end;
4638 
4639 	while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
4640 					   mark, NULL)) {
4641 		btrfs_clear_extent_bit(dirty_pages, start, end, mark, NULL);
4642 		while (start <= end) {
4643 			eb = find_extent_buffer(fs_info, start);
4644 			start += fs_info->nodesize;
4645 			if (!eb)
4646 				continue;
4647 
4648 			btrfs_tree_lock(eb);
4649 			wait_on_extent_buffer_writeback(eb);
4650 			btrfs_clear_buffer_dirty(NULL, eb);
4651 			btrfs_tree_unlock(eb);
4652 
4653 			free_extent_buffer_stale(eb);
4654 		}
4655 	}
4656 }
4657 
4658 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4659 					struct extent_io_tree *unpin)
4660 {
4661 	u64 start;
4662 	u64 end;
4663 
4664 	while (1) {
4665 		struct extent_state *cached_state = NULL;
4666 
4667 		/*
4668 		 * The btrfs_finish_extent_commit() may get the same range as
4669 		 * ours between find_first_extent_bit and clear_extent_dirty.
4670 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4671 		 * the same extent range.
4672 		 */
4673 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4674 		if (!btrfs_find_first_extent_bit(unpin, 0, &start, &end,
4675 						 EXTENT_DIRTY, &cached_state)) {
4676 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4677 			break;
4678 		}
4679 
4680 		btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
4681 		btrfs_free_extent_state(cached_state);
4682 		btrfs_error_unpin_extent_range(fs_info, start, end);
4683 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4684 		cond_resched();
4685 	}
4686 }
4687 
4688 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4689 {
4690 	struct inode *inode;
4691 
4692 	inode = cache->io_ctl.inode;
4693 	if (inode) {
4694 		unsigned int nofs_flag;
4695 
4696 		nofs_flag = memalloc_nofs_save();
4697 		invalidate_inode_pages2(inode->i_mapping);
4698 		memalloc_nofs_restore(nofs_flag);
4699 
4700 		BTRFS_I(inode)->generation = 0;
4701 		cache->io_ctl.inode = NULL;
4702 		iput(inode);
4703 	}
4704 	ASSERT(cache->io_ctl.pages == NULL);
4705 	btrfs_put_block_group(cache);
4706 }
4707 
4708 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4709 			     struct btrfs_fs_info *fs_info)
4710 {
4711 	struct btrfs_block_group *cache;
4712 
4713 	spin_lock(&cur_trans->dirty_bgs_lock);
4714 	while (!list_empty(&cur_trans->dirty_bgs)) {
4715 		cache = list_first_entry(&cur_trans->dirty_bgs,
4716 					 struct btrfs_block_group,
4717 					 dirty_list);
4718 
4719 		if (!list_empty(&cache->io_list)) {
4720 			spin_unlock(&cur_trans->dirty_bgs_lock);
4721 			list_del_init(&cache->io_list);
4722 			btrfs_cleanup_bg_io(cache);
4723 			spin_lock(&cur_trans->dirty_bgs_lock);
4724 		}
4725 
4726 		list_del_init(&cache->dirty_list);
4727 		spin_lock(&cache->lock);
4728 		cache->disk_cache_state = BTRFS_DC_ERROR;
4729 		spin_unlock(&cache->lock);
4730 
4731 		spin_unlock(&cur_trans->dirty_bgs_lock);
4732 		btrfs_put_block_group(cache);
4733 		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4734 		spin_lock(&cur_trans->dirty_bgs_lock);
4735 	}
4736 	spin_unlock(&cur_trans->dirty_bgs_lock);
4737 
4738 	/*
4739 	 * Refer to the definition of io_bgs member for details why it's safe
4740 	 * to use it without any locking
4741 	 */
4742 	while (!list_empty(&cur_trans->io_bgs)) {
4743 		cache = list_first_entry(&cur_trans->io_bgs,
4744 					 struct btrfs_block_group,
4745 					 io_list);
4746 
4747 		list_del_init(&cache->io_list);
4748 		spin_lock(&cache->lock);
4749 		cache->disk_cache_state = BTRFS_DC_ERROR;
4750 		spin_unlock(&cache->lock);
4751 		btrfs_cleanup_bg_io(cache);
4752 	}
4753 }
4754 
4755 static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4756 {
4757 	struct btrfs_root *gang[8];
4758 	int i;
4759 	int ret;
4760 
4761 	spin_lock(&fs_info->fs_roots_radix_lock);
4762 	while (1) {
4763 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4764 						 (void **)gang, 0,
4765 						 ARRAY_SIZE(gang),
4766 						 BTRFS_ROOT_TRANS_TAG);
4767 		if (ret == 0)
4768 			break;
4769 		for (i = 0; i < ret; i++) {
4770 			struct btrfs_root *root = gang[i];
4771 
4772 			btrfs_qgroup_free_meta_all_pertrans(root);
4773 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4774 					(unsigned long)btrfs_root_id(root),
4775 					BTRFS_ROOT_TRANS_TAG);
4776 		}
4777 	}
4778 	spin_unlock(&fs_info->fs_roots_radix_lock);
4779 }
4780 
4781 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans)
4782 {
4783 	struct btrfs_fs_info *fs_info = cur_trans->fs_info;
4784 	struct btrfs_device *dev, *tmp;
4785 
4786 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4787 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4788 	ASSERT(list_empty(&cur_trans->io_bgs));
4789 
4790 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4791 				 post_commit_list) {
4792 		list_del_init(&dev->post_commit_list);
4793 	}
4794 
4795 	btrfs_destroy_delayed_refs(cur_trans);
4796 
4797 	cur_trans->state = TRANS_STATE_COMMIT_START;
4798 	wake_up(&fs_info->transaction_blocked_wait);
4799 
4800 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4801 	wake_up(&fs_info->transaction_wait);
4802 
4803 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4804 				     EXTENT_DIRTY);
4805 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4806 
4807 	cur_trans->state =TRANS_STATE_COMPLETED;
4808 	wake_up(&cur_trans->commit_wait);
4809 }
4810 
4811 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4812 {
4813 	struct btrfs_transaction *t;
4814 
4815 	mutex_lock(&fs_info->transaction_kthread_mutex);
4816 
4817 	spin_lock(&fs_info->trans_lock);
4818 	while (!list_empty(&fs_info->trans_list)) {
4819 		t = list_first_entry(&fs_info->trans_list,
4820 				     struct btrfs_transaction, list);
4821 		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4822 			refcount_inc(&t->use_count);
4823 			spin_unlock(&fs_info->trans_lock);
4824 			btrfs_wait_for_commit(fs_info, t->transid);
4825 			btrfs_put_transaction(t);
4826 			spin_lock(&fs_info->trans_lock);
4827 			continue;
4828 		}
4829 		if (t == fs_info->running_transaction) {
4830 			t->state = TRANS_STATE_COMMIT_DOING;
4831 			spin_unlock(&fs_info->trans_lock);
4832 			/*
4833 			 * We wait for 0 num_writers since we don't hold a trans
4834 			 * handle open currently for this transaction.
4835 			 */
4836 			wait_event(t->writer_wait,
4837 				   atomic_read(&t->num_writers) == 0);
4838 		} else {
4839 			spin_unlock(&fs_info->trans_lock);
4840 		}
4841 		btrfs_cleanup_one_transaction(t);
4842 
4843 		spin_lock(&fs_info->trans_lock);
4844 		if (t == fs_info->running_transaction)
4845 			fs_info->running_transaction = NULL;
4846 		list_del_init(&t->list);
4847 		spin_unlock(&fs_info->trans_lock);
4848 
4849 		btrfs_put_transaction(t);
4850 		trace_btrfs_transaction_commit(fs_info);
4851 		spin_lock(&fs_info->trans_lock);
4852 	}
4853 	spin_unlock(&fs_info->trans_lock);
4854 	btrfs_destroy_all_ordered_extents(fs_info);
4855 	btrfs_destroy_delayed_inodes(fs_info);
4856 	btrfs_assert_delayed_root_empty(fs_info);
4857 	btrfs_destroy_all_delalloc_inodes(fs_info);
4858 	btrfs_drop_all_logs(fs_info);
4859 	btrfs_free_all_qgroup_pertrans(fs_info);
4860 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4861 
4862 	return 0;
4863 }
4864 
4865 int btrfs_init_root_free_objectid(struct btrfs_root *root)
4866 {
4867 	BTRFS_PATH_AUTO_FREE(path);
4868 	int ret;
4869 	struct extent_buffer *l;
4870 	struct btrfs_key search_key;
4871 	struct btrfs_key found_key;
4872 	int slot;
4873 
4874 	path = btrfs_alloc_path();
4875 	if (!path)
4876 		return -ENOMEM;
4877 
4878 	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4879 	search_key.type = -1;
4880 	search_key.offset = (u64)-1;
4881 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4882 	if (ret < 0)
4883 		return ret;
4884 	if (ret == 0) {
4885 		/*
4886 		 * Key with offset -1 found, there would have to exist a root
4887 		 * with such id, but this is out of valid range.
4888 		 */
4889 		return -EUCLEAN;
4890 	}
4891 	if (path->slots[0] > 0) {
4892 		slot = path->slots[0] - 1;
4893 		l = path->nodes[0];
4894 		btrfs_item_key_to_cpu(l, &found_key, slot);
4895 		root->free_objectid = max_t(u64, found_key.objectid + 1,
4896 					    BTRFS_FIRST_FREE_OBJECTID);
4897 	} else {
4898 		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4899 	}
4900 
4901 	return 0;
4902 }
4903 
4904 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4905 {
4906 	int ret;
4907 	mutex_lock(&root->objectid_mutex);
4908 
4909 	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4910 		btrfs_warn(root->fs_info,
4911 			   "the objectid of root %llu reaches its highest value",
4912 			   btrfs_root_id(root));
4913 		ret = -ENOSPC;
4914 		goto out;
4915 	}
4916 
4917 	*objectid = root->free_objectid++;
4918 	ret = 0;
4919 out:
4920 	mutex_unlock(&root->objectid_mutex);
4921 	return ret;
4922 }
4923