xref: /linux/fs/btrfs/disk-io.c (revision c44db6c820140ffbc0e293a34c6a6de4b363422b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <linux/unaligned.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "delayed-inode.h"
26 #include "bio.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "dev-replace.h"
33 #include "raid56.h"
34 #include "sysfs.h"
35 #include "qgroup.h"
36 #include "compression.h"
37 #include "tree-checker.h"
38 #include "ref-verify.h"
39 #include "block-group.h"
40 #include "discard.h"
41 #include "space-info.h"
42 #include "zoned.h"
43 #include "subpage.h"
44 #include "fs.h"
45 #include "accessors.h"
46 #include "extent-tree.h"
47 #include "root-tree.h"
48 #include "defrag.h"
49 #include "uuid-tree.h"
50 #include "relocation.h"
51 #include "scrub.h"
52 #include "super.h"
53 #include "delayed-inode.h"
54 
55 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
56 				 BTRFS_HEADER_FLAG_RELOC |\
57 				 BTRFS_SUPER_FLAG_ERROR |\
58 				 BTRFS_SUPER_FLAG_SEEDING |\
59 				 BTRFS_SUPER_FLAG_METADUMP |\
60 				 BTRFS_SUPER_FLAG_METADUMP_V2)
61 
62 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
63 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
64 
65 /*
66  * Compute the csum of a btree block and store the result to provided buffer.
67  */
csum_tree_block(struct extent_buffer * buf,u8 * result)68 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
69 {
70 	struct btrfs_fs_info *fs_info = buf->fs_info;
71 	int num_pages;
72 	u32 first_page_part;
73 	struct btrfs_csum_ctx csum;
74 	char *kaddr;
75 	int i;
76 
77 	btrfs_csum_init(&csum, fs_info->csum_type);
78 
79 	if (buf->addr) {
80 		/* Pages are contiguous, handle them as a big one. */
81 		kaddr = buf->addr;
82 		first_page_part = fs_info->nodesize;
83 		num_pages = 1;
84 	} else {
85 		kaddr = folio_address(buf->folios[0]);
86 		first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
87 		num_pages = num_extent_pages(buf);
88 	}
89 
90 	btrfs_csum_update(&csum, kaddr + BTRFS_CSUM_SIZE,
91 			  first_page_part - BTRFS_CSUM_SIZE);
92 
93 	/*
94 	 * Multiple single-page folios case would reach here.
95 	 *
96 	 * nodesize <= PAGE_SIZE and large folio all handled by above
97 	 * btrfs_csum_update() already.
98 	 */
99 	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
100 		kaddr = folio_address(buf->folios[i]);
101 		btrfs_csum_update(&csum, kaddr, PAGE_SIZE);
102 	}
103 	memset(result, 0, BTRFS_CSUM_SIZE);
104 	btrfs_csum_final(&csum, result);
105 }
106 
107 /*
108  * we can't consider a given block up to date unless the transid of the
109  * block matches the transid in the parent node's pointer.  This is how we
110  * detect blocks that either didn't get written at all or got written
111  * in the wrong place.
112  */
btrfs_buffer_uptodate(struct extent_buffer * eb,u64 parent_transid,bool atomic)113 int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, bool atomic)
114 {
115 	if (!extent_buffer_uptodate(eb))
116 		return 0;
117 
118 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
119 		return 1;
120 
121 	if (atomic)
122 		return -EAGAIN;
123 
124 	if (!extent_buffer_uptodate(eb) ||
125 	    btrfs_header_generation(eb) != parent_transid) {
126 		btrfs_err_rl(eb->fs_info,
127 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
128 			eb->start, eb->read_mirror,
129 			parent_transid, btrfs_header_generation(eb));
130 		clear_extent_buffer_uptodate(eb);
131 		return 0;
132 	}
133 	return 1;
134 }
135 
btrfs_supported_super_csum(u16 csum_type)136 static bool btrfs_supported_super_csum(u16 csum_type)
137 {
138 	switch (csum_type) {
139 	case BTRFS_CSUM_TYPE_CRC32:
140 	case BTRFS_CSUM_TYPE_XXHASH:
141 	case BTRFS_CSUM_TYPE_SHA256:
142 	case BTRFS_CSUM_TYPE_BLAKE2:
143 		return true;
144 	default:
145 		return false;
146 	}
147 }
148 
149 /*
150  * Return 0 if the superblock checksum type matches the checksum value of that
151  * algorithm. Pass the raw disk superblock data.
152  */
btrfs_check_super_csum(struct btrfs_fs_info * fs_info,const struct btrfs_super_block * disk_sb)153 int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
154 			   const struct btrfs_super_block *disk_sb)
155 {
156 	u8 result[BTRFS_CSUM_SIZE];
157 
158 	/*
159 	 * The super_block structure does not span the whole
160 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
161 	 * filled with zeros and is included in the checksum.
162 	 */
163 	btrfs_csum(fs_info->csum_type, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
164 		   BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
165 
166 	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
167 		return 1;
168 
169 	return 0;
170 }
171 
btrfs_repair_eb_io_failure(const struct extent_buffer * eb,int mirror_num)172 static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
173 				      int mirror_num)
174 {
175 	struct btrfs_fs_info *fs_info = eb->fs_info;
176 	const u32 step = min(fs_info->nodesize, PAGE_SIZE);
177 	const u32 nr_steps = eb->len / step;
178 	phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
179 
180 	if (sb_rdonly(fs_info->sb))
181 		return -EROFS;
182 
183 	for (int i = 0; i < num_extent_pages(eb); i++) {
184 		struct folio *folio = eb->folios[i];
185 
186 		/* No large folio support yet. */
187 		ASSERT(folio_order(folio) == 0);
188 		ASSERT(i < nr_steps);
189 
190 		/*
191 		 * For nodesize < page size, there is just one paddr, with some
192 		 * offset inside the page.
193 		 *
194 		 * For nodesize >= page size, it's one or more paddrs, and eb->start
195 		 * must be aligned to page boundary.
196 		 */
197 		paddrs[i] = page_to_phys(&folio->page) + offset_in_page(eb->start);
198 	}
199 
200 	return btrfs_repair_io_failure(fs_info, 0, eb->start, eb->len,
201 				       eb->start, paddrs, step, mirror_num);
202 }
203 
204 /*
205  * helper to read a given tree block, doing retries as required when
206  * the checksums don't match and we have alternate mirrors to try.
207  *
208  * @check:		expected tree parentness check, see the comments of the
209  *			structure for details.
210  */
btrfs_read_extent_buffer(struct extent_buffer * eb,const struct btrfs_tree_parent_check * check)211 int btrfs_read_extent_buffer(struct extent_buffer *eb,
212 			     const struct btrfs_tree_parent_check *check)
213 {
214 	struct btrfs_fs_info *fs_info = eb->fs_info;
215 	int failed = 0;
216 	int ret;
217 	int num_copies = 0;
218 	int mirror_num = 0;
219 	int failed_mirror = 0;
220 
221 	ASSERT(check);
222 
223 	while (1) {
224 		ret = read_extent_buffer_pages(eb, mirror_num, check);
225 		if (!ret)
226 			break;
227 
228 		num_copies = btrfs_num_copies(fs_info,
229 					      eb->start, eb->len);
230 		if (num_copies == 1)
231 			break;
232 
233 		if (!failed_mirror) {
234 			failed = 1;
235 			failed_mirror = eb->read_mirror;
236 		}
237 
238 		mirror_num++;
239 		if (mirror_num == failed_mirror)
240 			mirror_num++;
241 
242 		if (mirror_num > num_copies)
243 			break;
244 	}
245 
246 	if (failed && !ret && failed_mirror)
247 		btrfs_repair_eb_io_failure(eb, failed_mirror);
248 
249 	return ret;
250 }
251 
252 /*
253  * Checksum a dirty tree block before IO.
254  */
btree_csum_one_bio(struct btrfs_bio * bbio)255 int btree_csum_one_bio(struct btrfs_bio *bbio)
256 {
257 	struct extent_buffer *eb = bbio->private;
258 	struct btrfs_fs_info *fs_info = eb->fs_info;
259 	u64 found_start = btrfs_header_bytenr(eb);
260 	u64 last_trans;
261 	u8 result[BTRFS_CSUM_SIZE];
262 	int ret;
263 
264 	/* Btree blocks are always contiguous on disk. */
265 	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
266 		return -EIO;
267 	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
268 		return -EIO;
269 
270 	/*
271 	 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
272 	 * checksum it but zero-out its content. This is done to preserve
273 	 * ordering of I/O without unnecessarily writing out data.
274 	 */
275 	if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
276 		memzero_extent_buffer(eb, 0, eb->len);
277 		return 0;
278 	}
279 
280 	if (WARN_ON_ONCE(found_start != eb->start))
281 		return -EIO;
282 	if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb)))
283 		return -EIO;
284 
285 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
286 				    offsetof(struct btrfs_header, fsid),
287 				    BTRFS_FSID_SIZE) == 0);
288 	csum_tree_block(eb, result);
289 
290 	if (btrfs_header_level(eb))
291 		ret = btrfs_check_node(eb);
292 	else
293 		ret = btrfs_check_leaf(eb);
294 
295 	if (ret < 0)
296 		goto error;
297 
298 	/*
299 	 * Also check the generation, the eb reached here must be newer than
300 	 * last committed. Or something seriously wrong happened.
301 	 */
302 	last_trans = btrfs_get_last_trans_committed(fs_info);
303 	if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
304 		ret = -EUCLEAN;
305 		btrfs_err(fs_info,
306 			"block=%llu bad generation, have %llu expect > %llu",
307 			  eb->start, btrfs_header_generation(eb), last_trans);
308 		goto error;
309 	}
310 	write_extent_buffer(eb, result, 0, fs_info->csum_size);
311 	return 0;
312 
313 error:
314 	btrfs_print_tree(eb, 0);
315 	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
316 		  eb->start);
317 	/*
318 	 * Be noisy if this is an extent buffer from a log tree. We don't abort
319 	 * a transaction in case there's a bad log tree extent buffer, we just
320 	 * fallback to a transaction commit. Still we want to know when there is
321 	 * a bad log tree extent buffer, as that may signal a bug somewhere.
322 	 */
323 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
324 		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
325 	return ret;
326 }
327 
check_tree_block_fsid(struct extent_buffer * eb)328 static bool check_tree_block_fsid(struct extent_buffer *eb)
329 {
330 	struct btrfs_fs_info *fs_info = eb->fs_info;
331 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
332 	u8 fsid[BTRFS_FSID_SIZE];
333 
334 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
335 			   BTRFS_FSID_SIZE);
336 
337 	/*
338 	 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
339 	 * This is then overwritten by metadata_uuid if it is present in the
340 	 * device_list_add(). The same true for a seed device as well. So use of
341 	 * fs_devices::metadata_uuid is appropriate here.
342 	 */
343 	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
344 		return false;
345 
346 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
347 		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
348 			return false;
349 
350 	return true;
351 }
352 
353 /* Do basic extent buffer checks at read time */
btrfs_validate_extent_buffer(struct extent_buffer * eb,const struct btrfs_tree_parent_check * check)354 int btrfs_validate_extent_buffer(struct extent_buffer *eb,
355 				 const struct btrfs_tree_parent_check *check)
356 {
357 	struct btrfs_fs_info *fs_info = eb->fs_info;
358 	u64 found_start;
359 	const u32 csum_size = fs_info->csum_size;
360 	u8 found_level;
361 	u8 result[BTRFS_CSUM_SIZE];
362 	const u8 *header_csum;
363 	int ret = 0;
364 	const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
365 
366 	ASSERT(check);
367 
368 	found_start = btrfs_header_bytenr(eb);
369 	if (unlikely(found_start != eb->start)) {
370 		btrfs_err_rl(fs_info,
371 			"bad tree block start, mirror %u want %llu have %llu",
372 			     eb->read_mirror, eb->start, found_start);
373 		return -EIO;
374 	}
375 	if (unlikely(check_tree_block_fsid(eb))) {
376 		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
377 			     eb->start, eb->read_mirror);
378 		return -EIO;
379 	}
380 	found_level = btrfs_header_level(eb);
381 	if (unlikely(found_level >= BTRFS_MAX_LEVEL)) {
382 		btrfs_err(fs_info,
383 			"bad tree block level, mirror %u level %d on logical %llu",
384 			eb->read_mirror, btrfs_header_level(eb), eb->start);
385 		return -EIO;
386 	}
387 
388 	csum_tree_block(eb, result);
389 	header_csum = folio_address(eb->folios[0]) +
390 		get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
391 
392 	if (memcmp(result, header_csum, csum_size) != 0) {
393 		btrfs_warn_rl(fs_info,
394 "checksum verify failed on logical %llu mirror %u wanted " BTRFS_CSUM_FMT " found " BTRFS_CSUM_FMT " level %d%s",
395 			      eb->start, eb->read_mirror,
396 			      BTRFS_CSUM_FMT_VALUE(csum_size, header_csum),
397 			      BTRFS_CSUM_FMT_VALUE(csum_size, result),
398 			      btrfs_header_level(eb),
399 			      ignore_csum ? ", ignored" : "");
400 		if (unlikely(!ignore_csum))
401 			return -EUCLEAN;
402 	}
403 
404 	if (unlikely(found_level != check->level)) {
405 		btrfs_err(fs_info,
406 		"level verify failed on logical %llu mirror %u wanted %u found %u",
407 			  eb->start, eb->read_mirror, check->level, found_level);
408 		return -EIO;
409 	}
410 	if (unlikely(check->transid &&
411 		     btrfs_header_generation(eb) != check->transid)) {
412 		btrfs_err_rl(eb->fs_info,
413 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
414 				eb->start, eb->read_mirror, check->transid,
415 				btrfs_header_generation(eb));
416 		return -EIO;
417 	}
418 	if (check->has_first_key) {
419 		const struct btrfs_key *expect_key = &check->first_key;
420 		struct btrfs_key found_key;
421 
422 		if (found_level)
423 			btrfs_node_key_to_cpu(eb, &found_key, 0);
424 		else
425 			btrfs_item_key_to_cpu(eb, &found_key, 0);
426 		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
427 			btrfs_err(fs_info,
428 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
429 				  eb->start, check->transid,
430 				  expect_key->objectid,
431 				  expect_key->type, expect_key->offset,
432 				  found_key.objectid, found_key.type,
433 				  found_key.offset);
434 			return -EUCLEAN;
435 		}
436 	}
437 	if (check->owner_root) {
438 		ret = btrfs_check_eb_owner(eb, check->owner_root);
439 		if (ret < 0)
440 			return ret;
441 	}
442 
443 	/* If this is a leaf block and it is corrupt, just return -EIO. */
444 	if (found_level == 0 && btrfs_check_leaf(eb))
445 		ret = -EIO;
446 
447 	if (found_level > 0 && btrfs_check_node(eb))
448 		ret = -EIO;
449 
450 	if (ret)
451 		btrfs_err(fs_info,
452 		"read time tree block corruption detected on logical %llu mirror %u",
453 			  eb->start, eb->read_mirror);
454 	return ret;
455 }
456 
457 #ifdef CONFIG_MIGRATION
btree_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)458 static int btree_migrate_folio(struct address_space *mapping,
459 		struct folio *dst, struct folio *src, enum migrate_mode mode)
460 {
461 	/*
462 	 * we can't safely write a btree page from here,
463 	 * we haven't done the locking hook
464 	 */
465 	if (folio_test_dirty(src))
466 		return -EAGAIN;
467 	/*
468 	 * Buffers may be managed in a filesystem specific way.
469 	 * We must have no buffers or drop them.
470 	 */
471 	if (folio_get_private(src) &&
472 	    !filemap_release_folio(src, GFP_KERNEL))
473 		return -EAGAIN;
474 	return migrate_folio(mapping, dst, src, mode);
475 }
476 #else
477 #define btree_migrate_folio NULL
478 #endif
479 
btree_release_folio(struct folio * folio,gfp_t gfp_flags)480 static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
481 {
482 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
483 		return false;
484 
485 	return try_release_extent_buffer(folio);
486 }
487 
btree_invalidate_folio(struct folio * folio,size_t offset,size_t length)488 static void btree_invalidate_folio(struct folio *folio, size_t offset,
489 				 size_t length)
490 {
491 	struct extent_io_tree *tree;
492 
493 	tree = &folio_to_inode(folio)->io_tree;
494 	extent_invalidate_folio(tree, folio, offset);
495 	btree_release_folio(folio, GFP_NOFS);
496 	if (folio_get_private(folio)) {
497 		btrfs_warn(folio_to_fs_info(folio),
498 			   "folio private not zero on folio %llu",
499 			   (unsigned long long)folio_pos(folio));
500 		folio_detach_private(folio);
501 	}
502 }
503 
504 #ifdef DEBUG
btree_dirty_folio(struct address_space * mapping,struct folio * folio)505 static bool btree_dirty_folio(struct address_space *mapping,
506 		struct folio *folio)
507 {
508 	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
509 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
510 	struct btrfs_subpage *subpage;
511 	struct extent_buffer *eb;
512 	int cur_bit = 0;
513 	u64 page_start = folio_pos(folio);
514 
515 	if (fs_info->sectorsize == PAGE_SIZE) {
516 		eb = folio_get_private(folio);
517 		BUG_ON(!eb);
518 		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
519 		BUG_ON(!atomic_read(&eb->refs));
520 		btrfs_assert_tree_write_locked(eb);
521 		return filemap_dirty_folio(mapping, folio);
522 	}
523 
524 	ASSERT(spi);
525 	subpage = folio_get_private(folio);
526 
527 	for (cur_bit = spi->dirty_offset;
528 	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
529 	     cur_bit++) {
530 		unsigned long flags;
531 		u64 cur;
532 
533 		spin_lock_irqsave(&subpage->lock, flags);
534 		if (!test_bit(cur_bit, subpage->bitmaps)) {
535 			spin_unlock_irqrestore(&subpage->lock, flags);
536 			continue;
537 		}
538 		spin_unlock_irqrestore(&subpage->lock, flags);
539 		cur = page_start + cur_bit * fs_info->sectorsize;
540 
541 		eb = find_extent_buffer(fs_info, cur);
542 		ASSERT(eb);
543 		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
544 		ASSERT(atomic_read(&eb->refs));
545 		btrfs_assert_tree_write_locked(eb);
546 		free_extent_buffer(eb);
547 
548 		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
549 	}
550 	return filemap_dirty_folio(mapping, folio);
551 }
552 #else
553 #define btree_dirty_folio filemap_dirty_folio
554 #endif
555 
556 static const struct address_space_operations btree_aops = {
557 	.writepages	= btree_writepages,
558 	.release_folio	= btree_release_folio,
559 	.invalidate_folio = btree_invalidate_folio,
560 	.migrate_folio	= btree_migrate_folio,
561 	.dirty_folio	= btree_dirty_folio,
562 };
563 
btrfs_find_create_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,int level)564 struct extent_buffer *btrfs_find_create_tree_block(
565 						struct btrfs_fs_info *fs_info,
566 						u64 bytenr, u64 owner_root,
567 						int level)
568 {
569 	if (btrfs_is_testing(fs_info))
570 		return alloc_test_extent_buffer(fs_info, bytenr);
571 	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
572 }
573 
574 /*
575  * Read tree block at logical address @bytenr and do variant basic but critical
576  * verification.
577  *
578  * @check:		expected tree parentness check, see comments of the
579  *			structure for details.
580  */
read_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,struct btrfs_tree_parent_check * check)581 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
582 				      struct btrfs_tree_parent_check *check)
583 {
584 	struct extent_buffer *buf = NULL;
585 	int ret;
586 
587 	ASSERT(check);
588 
589 	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
590 					   check->level);
591 	if (IS_ERR(buf))
592 		return buf;
593 
594 	ret = btrfs_read_extent_buffer(buf, check);
595 	if (ret) {
596 		free_extent_buffer_stale(buf);
597 		return ERR_PTR(ret);
598 	}
599 	return buf;
600 
601 }
602 
btrfs_alloc_root(struct btrfs_fs_info * fs_info,u64 objectid,gfp_t flags)603 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
604 					   u64 objectid, gfp_t flags)
605 {
606 	struct btrfs_root *root;
607 
608 	root = kzalloc_obj(*root, flags);
609 	if (!root)
610 		return NULL;
611 
612 	root->fs_info = fs_info;
613 	root->root_key.objectid = objectid;
614 	RB_CLEAR_NODE(&root->rb_node);
615 
616 	xa_init(&root->inodes);
617 	xa_init(&root->delayed_nodes);
618 
619 	btrfs_init_root_block_rsv(root);
620 
621 	INIT_LIST_HEAD(&root->dirty_list);
622 	INIT_LIST_HEAD(&root->root_list);
623 	INIT_LIST_HEAD(&root->delalloc_inodes);
624 	INIT_LIST_HEAD(&root->delalloc_root);
625 	INIT_LIST_HEAD(&root->ordered_extents);
626 	INIT_LIST_HEAD(&root->ordered_root);
627 	INIT_LIST_HEAD(&root->reloc_dirty_list);
628 	spin_lock_init(&root->delalloc_lock);
629 	spin_lock_init(&root->ordered_extent_lock);
630 	spin_lock_init(&root->accounting_lock);
631 	spin_lock_init(&root->qgroup_meta_rsv_lock);
632 	mutex_init(&root->objectid_mutex);
633 	mutex_init(&root->log_mutex);
634 	mutex_init(&root->ordered_extent_mutex);
635 	mutex_init(&root->delalloc_mutex);
636 	init_waitqueue_head(&root->qgroup_flush_wait);
637 	init_waitqueue_head(&root->log_writer_wait);
638 	init_waitqueue_head(&root->log_commit_wait[0]);
639 	init_waitqueue_head(&root->log_commit_wait[1]);
640 	INIT_LIST_HEAD(&root->log_ctxs[0]);
641 	INIT_LIST_HEAD(&root->log_ctxs[1]);
642 	atomic_set(&root->log_commit[0], 0);
643 	atomic_set(&root->log_commit[1], 0);
644 	atomic_set(&root->log_writers, 0);
645 	atomic_set(&root->log_batch, 0);
646 	refcount_set(&root->refs, 1);
647 	atomic_set(&root->snapshot_force_cow, 0);
648 	atomic_set(&root->nr_swapfiles, 0);
649 	root->log_transid_committed = -1;
650 	if (!btrfs_is_testing(fs_info)) {
651 		btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages,
652 					  IO_TREE_ROOT_DIRTY_LOG_PAGES);
653 		btrfs_extent_io_tree_init(fs_info, &root->log_csum_range,
654 					  IO_TREE_LOG_CSUM_RANGE);
655 	}
656 
657 	spin_lock_init(&root->root_item_lock);
658 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
659 #ifdef CONFIG_BTRFS_DEBUG
660 	INIT_LIST_HEAD(&root->leak_list);
661 	spin_lock(&fs_info->fs_roots_radix_lock);
662 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
663 	spin_unlock(&fs_info->fs_roots_radix_lock);
664 #endif
665 
666 	return root;
667 }
668 
669 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
670 /* Should only be used by the testing infrastructure */
btrfs_alloc_dummy_root(struct btrfs_fs_info * fs_info)671 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
672 {
673 	struct btrfs_root *root;
674 
675 	if (!fs_info)
676 		return ERR_PTR(-EINVAL);
677 
678 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
679 	if (!root)
680 		return ERR_PTR(-ENOMEM);
681 
682 	/* We don't use the stripesize in selftest, set it as sectorsize */
683 	root->alloc_bytenr = 0;
684 
685 	return root;
686 }
687 #endif
688 
global_root_cmp(struct rb_node * a_node,const struct rb_node * b_node)689 static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
690 {
691 	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
692 	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
693 
694 	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
695 }
696 
global_root_key_cmp(const void * k,const struct rb_node * node)697 static int global_root_key_cmp(const void *k, const struct rb_node *node)
698 {
699 	const struct btrfs_key *key = k;
700 	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
701 
702 	return btrfs_comp_cpu_keys(key, &root->root_key);
703 }
704 
btrfs_global_root_insert(struct btrfs_root * root)705 int btrfs_global_root_insert(struct btrfs_root *root)
706 {
707 	struct btrfs_fs_info *fs_info = root->fs_info;
708 	struct rb_node *tmp;
709 	int ret = 0;
710 
711 	write_lock(&fs_info->global_root_lock);
712 	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
713 	write_unlock(&fs_info->global_root_lock);
714 
715 	if (tmp) {
716 		ret = -EEXIST;
717 		btrfs_warn(fs_info, "global root %llu %llu already exists",
718 			   btrfs_root_id(root), root->root_key.offset);
719 	}
720 	return ret;
721 }
722 
btrfs_global_root_delete(struct btrfs_root * root)723 void btrfs_global_root_delete(struct btrfs_root *root)
724 {
725 	struct btrfs_fs_info *fs_info = root->fs_info;
726 
727 	write_lock(&fs_info->global_root_lock);
728 	rb_erase(&root->rb_node, &fs_info->global_root_tree);
729 	write_unlock(&fs_info->global_root_lock);
730 }
731 
btrfs_global_root(struct btrfs_fs_info * fs_info,struct btrfs_key * key)732 struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
733 				     struct btrfs_key *key)
734 {
735 	struct rb_node *node;
736 	struct btrfs_root *root = NULL;
737 
738 	read_lock(&fs_info->global_root_lock);
739 	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
740 	if (node)
741 		root = container_of(node, struct btrfs_root, rb_node);
742 	read_unlock(&fs_info->global_root_lock);
743 
744 	return root;
745 }
746 
btrfs_global_root_id(struct btrfs_fs_info * fs_info,u64 bytenr)747 static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
748 {
749 	struct btrfs_block_group *block_group;
750 	u64 ret;
751 
752 	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
753 		return 0;
754 
755 	if (bytenr)
756 		block_group = btrfs_lookup_block_group(fs_info, bytenr);
757 	else
758 		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
759 	ASSERT(block_group);
760 	if (!block_group)
761 		return 0;
762 	ret = block_group->global_root_id;
763 	btrfs_put_block_group(block_group);
764 
765 	return ret;
766 }
767 
btrfs_csum_root(struct btrfs_fs_info * fs_info,u64 bytenr)768 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
769 {
770 	struct btrfs_key key = {
771 		.objectid = BTRFS_CSUM_TREE_OBJECTID,
772 		.type = BTRFS_ROOT_ITEM_KEY,
773 		.offset = btrfs_global_root_id(fs_info, bytenr),
774 	};
775 
776 	return btrfs_global_root(fs_info, &key);
777 }
778 
btrfs_extent_root(struct btrfs_fs_info * fs_info,u64 bytenr)779 struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
780 {
781 	struct btrfs_key key = {
782 		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
783 		.type = BTRFS_ROOT_ITEM_KEY,
784 		.offset = btrfs_global_root_id(fs_info, bytenr),
785 	};
786 
787 	return btrfs_global_root(fs_info, &key);
788 }
789 
btrfs_create_tree(struct btrfs_trans_handle * trans,u64 objectid)790 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
791 				     u64 objectid)
792 {
793 	struct btrfs_fs_info *fs_info = trans->fs_info;
794 	struct extent_buffer *leaf;
795 	struct btrfs_root *tree_root = fs_info->tree_root;
796 	struct btrfs_root *root;
797 	unsigned int nofs_flag;
798 	int ret = 0;
799 
800 	/*
801 	 * We're holding a transaction handle, so use a NOFS memory allocation
802 	 * context to avoid deadlock if reclaim happens.
803 	 */
804 	nofs_flag = memalloc_nofs_save();
805 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
806 	memalloc_nofs_restore(nofs_flag);
807 	if (!root)
808 		return ERR_PTR(-ENOMEM);
809 
810 	root->root_key.objectid = objectid;
811 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
812 	root->root_key.offset = 0;
813 
814 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
815 				      0, BTRFS_NESTING_NORMAL);
816 	if (IS_ERR(leaf)) {
817 		ret = PTR_ERR(leaf);
818 		leaf = NULL;
819 		goto fail;
820 	}
821 
822 	root->node = leaf;
823 	btrfs_mark_buffer_dirty(trans, leaf);
824 
825 	root->commit_root = btrfs_root_node(root);
826 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
827 
828 	btrfs_set_root_flags(&root->root_item, 0);
829 	btrfs_set_root_limit(&root->root_item, 0);
830 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
831 	btrfs_set_root_generation(&root->root_item, trans->transid);
832 	btrfs_set_root_level(&root->root_item, 0);
833 	btrfs_set_root_refs(&root->root_item, 1);
834 	btrfs_set_root_used(&root->root_item, leaf->len);
835 	btrfs_set_root_last_snapshot(&root->root_item, 0);
836 	btrfs_set_root_dirid(&root->root_item, 0);
837 	if (btrfs_is_fstree(objectid))
838 		generate_random_guid(root->root_item.uuid);
839 	else
840 		export_guid(root->root_item.uuid, &guid_null);
841 	btrfs_set_root_drop_level(&root->root_item, 0);
842 
843 	btrfs_tree_unlock(leaf);
844 
845 	ret = btrfs_insert_root(trans, tree_root, &root->root_key, &root->root_item);
846 	if (ret)
847 		goto fail;
848 
849 	return root;
850 
851 fail:
852 	btrfs_put_root(root);
853 
854 	return ERR_PTR(ret);
855 }
856 
alloc_log_tree(struct btrfs_fs_info * fs_info)857 static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info)
858 {
859 	struct btrfs_root *root;
860 
861 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
862 	if (!root)
863 		return ERR_PTR(-ENOMEM);
864 
865 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
866 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
867 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
868 
869 	return root;
870 }
871 
btrfs_alloc_log_tree_node(struct btrfs_trans_handle * trans,struct btrfs_root * root)872 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
873 			      struct btrfs_root *root)
874 {
875 	struct extent_buffer *leaf;
876 
877 	/*
878 	 * DON'T set SHAREABLE bit for log trees.
879 	 *
880 	 * Log trees are not exposed to user space thus can't be snapshotted,
881 	 * and they go away before a real commit is actually done.
882 	 *
883 	 * They do store pointers to file data extents, and those reference
884 	 * counts still get updated (along with back refs to the log tree).
885 	 */
886 
887 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
888 			NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
889 	if (IS_ERR(leaf))
890 		return PTR_ERR(leaf);
891 
892 	root->node = leaf;
893 
894 	btrfs_mark_buffer_dirty(trans, root->node);
895 	btrfs_tree_unlock(root->node);
896 
897 	return 0;
898 }
899 
btrfs_init_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)900 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
901 			     struct btrfs_fs_info *fs_info)
902 {
903 	struct btrfs_root *log_root;
904 
905 	log_root = alloc_log_tree(fs_info);
906 	if (IS_ERR(log_root))
907 		return PTR_ERR(log_root);
908 
909 	if (!btrfs_is_zoned(fs_info)) {
910 		int ret = btrfs_alloc_log_tree_node(trans, log_root);
911 
912 		if (ret) {
913 			btrfs_put_root(log_root);
914 			return ret;
915 		}
916 	}
917 
918 	WARN_ON(fs_info->log_root_tree);
919 	fs_info->log_root_tree = log_root;
920 	return 0;
921 }
922 
btrfs_add_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)923 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
924 		       struct btrfs_root *root)
925 {
926 	struct btrfs_fs_info *fs_info = root->fs_info;
927 	struct btrfs_root *log_root;
928 	struct btrfs_inode_item *inode_item;
929 	int ret;
930 
931 	log_root = alloc_log_tree(fs_info);
932 	if (IS_ERR(log_root))
933 		return PTR_ERR(log_root);
934 
935 	ret = btrfs_alloc_log_tree_node(trans, log_root);
936 	if (ret) {
937 		btrfs_put_root(log_root);
938 		return ret;
939 	}
940 
941 	btrfs_set_root_last_trans(log_root, trans->transid);
942 	log_root->root_key.offset = btrfs_root_id(root);
943 
944 	inode_item = &log_root->root_item.inode;
945 	btrfs_set_stack_inode_generation(inode_item, 1);
946 	btrfs_set_stack_inode_size(inode_item, 3);
947 	btrfs_set_stack_inode_nlink(inode_item, 1);
948 	btrfs_set_stack_inode_nbytes(inode_item,
949 				     fs_info->nodesize);
950 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
951 
952 	btrfs_set_root_node(&log_root->root_item, log_root->node);
953 
954 	WARN_ON(root->log_root);
955 	root->log_root = log_root;
956 	btrfs_set_root_log_transid(root, 0);
957 	root->log_transid_committed = -1;
958 	btrfs_set_root_last_log_commit(root, 0);
959 	return 0;
960 }
961 
read_tree_root_path(struct btrfs_root * tree_root,struct btrfs_path * path,const struct btrfs_key * key)962 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
963 					      struct btrfs_path *path,
964 					      const struct btrfs_key *key)
965 {
966 	struct btrfs_root *root;
967 	struct btrfs_tree_parent_check check = { 0 };
968 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
969 	u64 generation;
970 	int ret;
971 	int level;
972 
973 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
974 	if (!root)
975 		return ERR_PTR(-ENOMEM);
976 
977 	ret = btrfs_find_root(tree_root, key, path,
978 			      &root->root_item, &root->root_key);
979 	if (ret) {
980 		if (ret > 0)
981 			ret = -ENOENT;
982 		goto fail;
983 	}
984 
985 	generation = btrfs_root_generation(&root->root_item);
986 	level = btrfs_root_level(&root->root_item);
987 	check.level = level;
988 	check.transid = generation;
989 	check.owner_root = key->objectid;
990 	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
991 				     &check);
992 	if (IS_ERR(root->node)) {
993 		ret = PTR_ERR(root->node);
994 		root->node = NULL;
995 		goto fail;
996 	}
997 	if (unlikely(!btrfs_buffer_uptodate(root->node, generation, false))) {
998 		ret = -EIO;
999 		goto fail;
1000 	}
1001 
1002 	/*
1003 	 * For real fs, and not log/reloc trees, root owner must
1004 	 * match its root node owner
1005 	 */
1006 	if (unlikely(!btrfs_is_testing(fs_info) &&
1007 		     btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1008 		     btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1009 		     btrfs_root_id(root) != btrfs_header_owner(root->node))) {
1010 		btrfs_crit(fs_info,
1011 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1012 			   btrfs_root_id(root), root->node->start,
1013 			   btrfs_header_owner(root->node),
1014 			   btrfs_root_id(root));
1015 		ret = -EUCLEAN;
1016 		goto fail;
1017 	}
1018 	root->commit_root = btrfs_root_node(root);
1019 	return root;
1020 fail:
1021 	btrfs_put_root(root);
1022 	return ERR_PTR(ret);
1023 }
1024 
btrfs_read_tree_root(struct btrfs_root * tree_root,const struct btrfs_key * key)1025 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1026 					const struct btrfs_key *key)
1027 {
1028 	struct btrfs_root *root;
1029 	BTRFS_PATH_AUTO_FREE(path);
1030 
1031 	path = btrfs_alloc_path();
1032 	if (!path)
1033 		return ERR_PTR(-ENOMEM);
1034 	root = read_tree_root_path(tree_root, path, key);
1035 
1036 	return root;
1037 }
1038 
1039 /*
1040  * Initialize subvolume root in-memory structure.
1041  *
1042  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1043  *
1044  * In case of failure the caller is responsible to call btrfs_free_fs_root()
1045  */
btrfs_init_fs_root(struct btrfs_root * root,dev_t anon_dev)1046 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1047 {
1048 	int ret;
1049 
1050 	btrfs_drew_lock_init(&root->snapshot_lock);
1051 
1052 	if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1053 	    !btrfs_is_data_reloc_root(root) &&
1054 	    btrfs_is_fstree(btrfs_root_id(root))) {
1055 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1056 		btrfs_check_and_init_root_item(&root->root_item);
1057 	}
1058 
1059 	/*
1060 	 * Don't assign anonymous block device to roots that are not exposed to
1061 	 * userspace, the id pool is limited to 1M
1062 	 */
1063 	if (btrfs_is_fstree(btrfs_root_id(root)) &&
1064 	    btrfs_root_refs(&root->root_item) > 0) {
1065 		if (!anon_dev) {
1066 			ret = get_anon_bdev(&root->anon_dev);
1067 			if (ret)
1068 				return ret;
1069 		} else {
1070 			root->anon_dev = anon_dev;
1071 		}
1072 	}
1073 
1074 	mutex_lock(&root->objectid_mutex);
1075 	ret = btrfs_init_root_free_objectid(root);
1076 	if (ret) {
1077 		mutex_unlock(&root->objectid_mutex);
1078 		return ret;
1079 	}
1080 
1081 	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1082 
1083 	mutex_unlock(&root->objectid_mutex);
1084 
1085 	return 0;
1086 }
1087 
btrfs_lookup_fs_root(struct btrfs_fs_info * fs_info,u64 root_id)1088 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1089 					       u64 root_id)
1090 {
1091 	struct btrfs_root *root;
1092 
1093 	spin_lock(&fs_info->fs_roots_radix_lock);
1094 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1095 				 (unsigned long)root_id);
1096 	root = btrfs_grab_root(root);
1097 	spin_unlock(&fs_info->fs_roots_radix_lock);
1098 	return root;
1099 }
1100 
btrfs_get_global_root(struct btrfs_fs_info * fs_info,u64 objectid)1101 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1102 						u64 objectid)
1103 {
1104 	struct btrfs_key key = {
1105 		.objectid = objectid,
1106 		.type = BTRFS_ROOT_ITEM_KEY,
1107 		.offset = 0,
1108 	};
1109 
1110 	switch (objectid) {
1111 	case BTRFS_ROOT_TREE_OBJECTID:
1112 		return btrfs_grab_root(fs_info->tree_root);
1113 	case BTRFS_EXTENT_TREE_OBJECTID:
1114 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1115 	case BTRFS_CHUNK_TREE_OBJECTID:
1116 		return btrfs_grab_root(fs_info->chunk_root);
1117 	case BTRFS_DEV_TREE_OBJECTID:
1118 		return btrfs_grab_root(fs_info->dev_root);
1119 	case BTRFS_CSUM_TREE_OBJECTID:
1120 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1121 	case BTRFS_QUOTA_TREE_OBJECTID:
1122 		return btrfs_grab_root(fs_info->quota_root);
1123 	case BTRFS_UUID_TREE_OBJECTID:
1124 		return btrfs_grab_root(fs_info->uuid_root);
1125 	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1126 		return btrfs_grab_root(fs_info->block_group_root);
1127 	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1128 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1129 	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1130 		return btrfs_grab_root(fs_info->stripe_root);
1131 	case BTRFS_REMAP_TREE_OBJECTID:
1132 		return btrfs_grab_root(fs_info->remap_root);
1133 	default:
1134 		return NULL;
1135 	}
1136 }
1137 
btrfs_insert_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)1138 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1139 			 struct btrfs_root *root)
1140 {
1141 	int ret;
1142 
1143 	ret = radix_tree_preload(GFP_NOFS);
1144 	if (ret)
1145 		return ret;
1146 
1147 	spin_lock(&fs_info->fs_roots_radix_lock);
1148 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1149 				(unsigned long)btrfs_root_id(root),
1150 				root);
1151 	if (ret == 0) {
1152 		btrfs_grab_root(root);
1153 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1154 	}
1155 	spin_unlock(&fs_info->fs_roots_radix_lock);
1156 	radix_tree_preload_end();
1157 
1158 	return ret;
1159 }
1160 
btrfs_check_leaked_roots(const struct btrfs_fs_info * fs_info)1161 void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
1162 {
1163 #ifdef CONFIG_BTRFS_DEBUG
1164 	struct btrfs_root *root;
1165 
1166 	while (!list_empty(&fs_info->allocated_roots)) {
1167 		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1168 
1169 		root = list_first_entry(&fs_info->allocated_roots,
1170 					struct btrfs_root, leak_list);
1171 		btrfs_err(fs_info, "leaked root %s refcount %d",
1172 			  btrfs_root_name(&root->root_key, buf),
1173 			  refcount_read(&root->refs));
1174 		WARN_ON_ONCE(1);
1175 		while (refcount_read(&root->refs) > 1)
1176 			btrfs_put_root(root);
1177 		btrfs_put_root(root);
1178 	}
1179 #endif
1180 }
1181 
free_global_roots(struct btrfs_fs_info * fs_info)1182 static void free_global_roots(struct btrfs_fs_info *fs_info)
1183 {
1184 	struct btrfs_root *root;
1185 	struct rb_node *node;
1186 
1187 	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1188 		root = rb_entry(node, struct btrfs_root, rb_node);
1189 		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1190 		btrfs_put_root(root);
1191 	}
1192 }
1193 
btrfs_free_fs_info(struct btrfs_fs_info * fs_info)1194 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1195 {
1196 	struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1197 
1198 	if (fs_info->fs_devices)
1199 		btrfs_close_devices(fs_info->fs_devices);
1200 	btrfs_free_compress_wsm(fs_info);
1201 	percpu_counter_destroy(&fs_info->stats_read_blocks);
1202 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1203 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1204 	percpu_counter_destroy(&fs_info->ordered_bytes);
1205 	if (percpu_counter_initialized(em_counter))
1206 		ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1207 	percpu_counter_destroy(em_counter);
1208 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1209 	btrfs_free_stripe_hash_table(fs_info);
1210 	btrfs_free_ref_cache(fs_info);
1211 	kfree(fs_info->balance_ctl);
1212 	free_global_roots(fs_info);
1213 	btrfs_put_root(fs_info->tree_root);
1214 	btrfs_put_root(fs_info->chunk_root);
1215 	btrfs_put_root(fs_info->dev_root);
1216 	btrfs_put_root(fs_info->quota_root);
1217 	btrfs_put_root(fs_info->uuid_root);
1218 	btrfs_put_root(fs_info->fs_root);
1219 	btrfs_put_root(fs_info->data_reloc_root);
1220 	btrfs_put_root(fs_info->block_group_root);
1221 	btrfs_put_root(fs_info->stripe_root);
1222 	btrfs_put_root(fs_info->remap_root);
1223 	btrfs_check_leaked_roots(fs_info);
1224 	btrfs_extent_buffer_leak_debug_check(fs_info);
1225 	kfree(fs_info->super_copy);
1226 	kfree(fs_info->super_for_commit);
1227 	kvfree(fs_info);
1228 }
1229 
1230 
1231 /*
1232  * Get an in-memory reference of a root structure.
1233  *
1234  * For essential trees like root/extent tree, we grab it from fs_info directly.
1235  * For subvolume trees, we check the cached filesystem roots first. If not
1236  * found, then read it from disk and add it to cached fs roots.
1237  *
1238  * Caller should release the root by calling btrfs_put_root() after the usage.
1239  *
1240  * NOTE: Reloc and log trees can't be read by this function as they share the
1241  *	 same root objectid.
1242  *
1243  * @objectid:	root id
1244  * @anon_dev:	preallocated anonymous block device number for new roots,
1245  *		pass NULL for a new allocation.
1246  * @check_ref:	whether to check root item references, If true, return -ENOENT
1247  *		for orphan roots
1248  */
btrfs_get_root_ref(struct btrfs_fs_info * fs_info,u64 objectid,dev_t * anon_dev,bool check_ref)1249 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1250 					     u64 objectid, dev_t *anon_dev,
1251 					     bool check_ref)
1252 {
1253 	struct btrfs_root *root;
1254 	struct btrfs_path *path;
1255 	struct btrfs_key key;
1256 	int ret;
1257 
1258 	root = btrfs_get_global_root(fs_info, objectid);
1259 	if (root)
1260 		return root;
1261 
1262 	/*
1263 	 * If we're called for non-subvolume trees, and above function didn't
1264 	 * find one, do not try to read it from disk.
1265 	 *
1266 	 * This is namely for free-space-tree and quota tree, which can change
1267 	 * at runtime and should only be grabbed from fs_info.
1268 	 */
1269 	if (!btrfs_is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1270 		return ERR_PTR(-ENOENT);
1271 again:
1272 	root = btrfs_lookup_fs_root(fs_info, objectid);
1273 	if (root) {
1274 		/*
1275 		 * Some other caller may have read out the newly inserted
1276 		 * subvolume already (for things like backref walk etc).  Not
1277 		 * that common but still possible.  In that case, we just need
1278 		 * to free the anon_dev.
1279 		 */
1280 		if (unlikely(anon_dev && *anon_dev)) {
1281 			free_anon_bdev(*anon_dev);
1282 			*anon_dev = 0;
1283 		}
1284 
1285 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1286 			btrfs_put_root(root);
1287 			return ERR_PTR(-ENOENT);
1288 		}
1289 		return root;
1290 	}
1291 
1292 	key.objectid = objectid;
1293 	key.type = BTRFS_ROOT_ITEM_KEY;
1294 	key.offset = (u64)-1;
1295 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1296 	if (IS_ERR(root))
1297 		return root;
1298 
1299 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1300 		ret = -ENOENT;
1301 		goto fail;
1302 	}
1303 
1304 	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1305 	if (ret)
1306 		goto fail;
1307 
1308 	path = btrfs_alloc_path();
1309 	if (!path) {
1310 		ret = -ENOMEM;
1311 		goto fail;
1312 	}
1313 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1314 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1315 	key.offset = objectid;
1316 
1317 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1318 	btrfs_free_path(path);
1319 	if (ret < 0)
1320 		goto fail;
1321 	if (ret == 0)
1322 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1323 
1324 	ret = btrfs_insert_fs_root(fs_info, root);
1325 	if (ret) {
1326 		if (ret == -EEXIST) {
1327 			btrfs_put_root(root);
1328 			goto again;
1329 		}
1330 		goto fail;
1331 	}
1332 	return root;
1333 fail:
1334 	/*
1335 	 * If our caller provided us an anonymous device, then it's his
1336 	 * responsibility to free it in case we fail. So we have to set our
1337 	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1338 	 * and once again by our caller.
1339 	 */
1340 	if (anon_dev && *anon_dev)
1341 		root->anon_dev = 0;
1342 	btrfs_put_root(root);
1343 	return ERR_PTR(ret);
1344 }
1345 
1346 /*
1347  * Get in-memory reference of a root structure
1348  *
1349  * @objectid:	tree objectid
1350  * @check_ref:	if set, verify that the tree exists and the item has at least
1351  *		one reference
1352  */
btrfs_get_fs_root(struct btrfs_fs_info * fs_info,u64 objectid,bool check_ref)1353 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1354 				     u64 objectid, bool check_ref)
1355 {
1356 	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1357 }
1358 
1359 /*
1360  * Get in-memory reference of a root structure, created as new, optionally pass
1361  * the anonymous block device id
1362  *
1363  * @objectid:	tree objectid
1364  * @anon_dev:	if NULL, allocate a new anonymous block device or use the
1365  *		parameter value if not NULL
1366  */
btrfs_get_new_fs_root(struct btrfs_fs_info * fs_info,u64 objectid,dev_t * anon_dev)1367 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1368 					 u64 objectid, dev_t *anon_dev)
1369 {
1370 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1371 }
1372 
1373 /*
1374  * Return a root for the given objectid.
1375  *
1376  * @fs_info:	the fs_info
1377  * @objectid:	the objectid we need to lookup
1378  *
1379  * This is exclusively used for backref walking, and exists specifically because
1380  * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1381  * creation time, which means we may have to read the tree_root in order to look
1382  * up a fs root that is not in memory.  If the root is not in memory we will
1383  * read the tree root commit root and look up the fs root from there.  This is a
1384  * temporary root, it will not be inserted into the radix tree as it doesn't
1385  * have the most uptodate information, it'll simply be discarded once the
1386  * backref code is finished using the root.
1387  */
btrfs_get_fs_root_commit_root(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 objectid)1388 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1389 						 struct btrfs_path *path,
1390 						 u64 objectid)
1391 {
1392 	struct btrfs_root *root;
1393 	struct btrfs_key key;
1394 
1395 	ASSERT(path->search_commit_root && path->skip_locking);
1396 
1397 	/*
1398 	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1399 	 * since this is called via the backref walking code we won't be looking
1400 	 * up a root that doesn't exist, unless there's corruption.  So if root
1401 	 * != NULL just return it.
1402 	 */
1403 	root = btrfs_get_global_root(fs_info, objectid);
1404 	if (root)
1405 		return root;
1406 
1407 	root = btrfs_lookup_fs_root(fs_info, objectid);
1408 	if (root)
1409 		return root;
1410 
1411 	key.objectid = objectid;
1412 	key.type = BTRFS_ROOT_ITEM_KEY;
1413 	key.offset = (u64)-1;
1414 	root = read_tree_root_path(fs_info->tree_root, path, &key);
1415 	btrfs_release_path(path);
1416 
1417 	return root;
1418 }
1419 
cleaner_kthread(void * arg)1420 static int cleaner_kthread(void *arg)
1421 {
1422 	struct btrfs_fs_info *fs_info = arg;
1423 	int again;
1424 
1425 	while (1) {
1426 		again = 0;
1427 
1428 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1429 
1430 		/* Make the cleaner go to sleep early. */
1431 		if (btrfs_need_cleaner_sleep(fs_info))
1432 			goto sleep;
1433 
1434 		/*
1435 		 * Do not do anything if we might cause open_ctree() to block
1436 		 * before we have finished mounting the filesystem.
1437 		 */
1438 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1439 			goto sleep;
1440 
1441 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1442 			goto sleep;
1443 
1444 		/*
1445 		 * Avoid the problem that we change the status of the fs
1446 		 * during the above check and trylock.
1447 		 */
1448 		if (btrfs_need_cleaner_sleep(fs_info)) {
1449 			mutex_unlock(&fs_info->cleaner_mutex);
1450 			goto sleep;
1451 		}
1452 
1453 		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1454 			btrfs_sysfs_feature_update(fs_info);
1455 
1456 		btrfs_run_delayed_iputs(fs_info);
1457 
1458 		again = btrfs_clean_one_deleted_snapshot(fs_info);
1459 		mutex_unlock(&fs_info->cleaner_mutex);
1460 
1461 		/*
1462 		 * The defragger has dealt with the R/O remount and umount,
1463 		 * needn't do anything special here.
1464 		 */
1465 		btrfs_run_defrag_inodes(fs_info);
1466 
1467 		if (btrfs_fs_incompat(fs_info, REMAP_TREE) &&
1468 		    !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1469 			btrfs_handle_fully_remapped_bgs(fs_info);
1470 
1471 		/*
1472 		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1473 		 * with relocation (btrfs_relocate_chunk) and relocation
1474 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1475 		 * after acquiring fs_info->reclaim_bgs_lock. So we
1476 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1477 		 * unused block groups.
1478 		 */
1479 		btrfs_delete_unused_bgs(fs_info);
1480 
1481 		/*
1482 		 * Reclaim block groups in the reclaim_bgs list after we deleted
1483 		 * all unused block_groups. This possibly gives us some more free
1484 		 * space.
1485 		 */
1486 		btrfs_reclaim_bgs(fs_info);
1487 sleep:
1488 		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1489 		if (kthread_should_park())
1490 			kthread_parkme();
1491 		if (kthread_should_stop())
1492 			return 0;
1493 		if (!again) {
1494 			set_current_state(TASK_INTERRUPTIBLE);
1495 			schedule();
1496 			__set_current_state(TASK_RUNNING);
1497 		}
1498 	}
1499 }
1500 
transaction_kthread(void * arg)1501 static int transaction_kthread(void *arg)
1502 {
1503 	struct btrfs_root *root = arg;
1504 	struct btrfs_fs_info *fs_info = root->fs_info;
1505 	struct btrfs_trans_handle *trans;
1506 	struct btrfs_transaction *cur;
1507 	u64 transid;
1508 	time64_t delta;
1509 	unsigned long delay;
1510 	bool cannot_commit;
1511 
1512 	do {
1513 		cannot_commit = false;
1514 		delay = secs_to_jiffies(fs_info->commit_interval);
1515 		mutex_lock(&fs_info->transaction_kthread_mutex);
1516 
1517 		spin_lock(&fs_info->trans_lock);
1518 		cur = fs_info->running_transaction;
1519 		if (!cur) {
1520 			spin_unlock(&fs_info->trans_lock);
1521 			goto sleep;
1522 		}
1523 
1524 		delta = ktime_get_seconds() - cur->start_time;
1525 		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1526 		    cur->state < TRANS_STATE_COMMIT_PREP &&
1527 		    delta < fs_info->commit_interval) {
1528 			spin_unlock(&fs_info->trans_lock);
1529 			delay -= secs_to_jiffies(delta - 1);
1530 			delay = min(delay,
1531 				    secs_to_jiffies(fs_info->commit_interval));
1532 			goto sleep;
1533 		}
1534 		transid = cur->transid;
1535 		spin_unlock(&fs_info->trans_lock);
1536 
1537 		/* If the file system is aborted, this will always fail. */
1538 		trans = btrfs_attach_transaction(root);
1539 		if (IS_ERR(trans)) {
1540 			if (PTR_ERR(trans) != -ENOENT)
1541 				cannot_commit = true;
1542 			goto sleep;
1543 		}
1544 		if (transid == trans->transid) {
1545 			btrfs_commit_transaction(trans);
1546 		} else {
1547 			btrfs_end_transaction(trans);
1548 		}
1549 sleep:
1550 		wake_up_process(fs_info->cleaner_kthread);
1551 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1552 
1553 		if (BTRFS_FS_ERROR(fs_info))
1554 			btrfs_cleanup_transaction(fs_info);
1555 		if (!kthread_should_stop() &&
1556 				(!btrfs_transaction_blocked(fs_info) ||
1557 				 cannot_commit))
1558 			schedule_timeout_interruptible(delay);
1559 	} while (!kthread_should_stop());
1560 	return 0;
1561 }
1562 
1563 /*
1564  * This will find the highest generation in the array of root backups.  The
1565  * index of the highest array is returned, or -EINVAL if we can't find
1566  * anything.
1567  *
1568  * We check to make sure the array is valid by comparing the
1569  * generation of the latest  root in the array with the generation
1570  * in the super block.  If they don't match we pitch it.
1571  */
find_newest_super_backup(struct btrfs_fs_info * info)1572 static int find_newest_super_backup(struct btrfs_fs_info *info)
1573 {
1574 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1575 	u64 cur;
1576 	struct btrfs_root_backup *root_backup;
1577 	int i;
1578 
1579 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1580 		root_backup = info->super_copy->super_roots + i;
1581 		cur = btrfs_backup_tree_root_gen(root_backup);
1582 		if (cur == newest_gen)
1583 			return i;
1584 	}
1585 
1586 	return -EINVAL;
1587 }
1588 
1589 /*
1590  * copy all the root pointers into the super backup array.
1591  * this will bump the backup pointer by one when it is
1592  * done
1593  */
backup_super_roots(struct btrfs_fs_info * info)1594 static void backup_super_roots(struct btrfs_fs_info *info)
1595 {
1596 	const int next_backup = info->backup_root_index;
1597 	struct btrfs_root_backup *root_backup;
1598 
1599 	root_backup = info->super_for_commit->super_roots + next_backup;
1600 
1601 	/*
1602 	 * make sure all of our padding and empty slots get zero filled
1603 	 * regardless of which ones we use today
1604 	 */
1605 	memset(root_backup, 0, sizeof(*root_backup));
1606 
1607 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1608 
1609 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1610 	btrfs_set_backup_tree_root_gen(root_backup,
1611 			       btrfs_header_generation(info->tree_root->node));
1612 
1613 	btrfs_set_backup_tree_root_level(root_backup,
1614 			       btrfs_header_level(info->tree_root->node));
1615 
1616 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1617 	btrfs_set_backup_chunk_root_gen(root_backup,
1618 			       btrfs_header_generation(info->chunk_root->node));
1619 	btrfs_set_backup_chunk_root_level(root_backup,
1620 			       btrfs_header_level(info->chunk_root->node));
1621 
1622 	if (!btrfs_fs_incompat(info, EXTENT_TREE_V2)) {
1623 		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1624 		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1625 
1626 		btrfs_set_backup_extent_root(root_backup,
1627 					     extent_root->node->start);
1628 		btrfs_set_backup_extent_root_gen(root_backup,
1629 				btrfs_header_generation(extent_root->node));
1630 		btrfs_set_backup_extent_root_level(root_backup,
1631 					btrfs_header_level(extent_root->node));
1632 
1633 		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1634 		btrfs_set_backup_csum_root_gen(root_backup,
1635 					       btrfs_header_generation(csum_root->node));
1636 		btrfs_set_backup_csum_root_level(root_backup,
1637 						 btrfs_header_level(csum_root->node));
1638 	}
1639 
1640 	/*
1641 	 * we might commit during log recovery, which happens before we set
1642 	 * the fs_root.  Make sure it is valid before we fill it in.
1643 	 */
1644 	if (info->fs_root && info->fs_root->node) {
1645 		btrfs_set_backup_fs_root(root_backup,
1646 					 info->fs_root->node->start);
1647 		btrfs_set_backup_fs_root_gen(root_backup,
1648 			       btrfs_header_generation(info->fs_root->node));
1649 		btrfs_set_backup_fs_root_level(root_backup,
1650 			       btrfs_header_level(info->fs_root->node));
1651 	}
1652 
1653 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1654 	btrfs_set_backup_dev_root_gen(root_backup,
1655 			       btrfs_header_generation(info->dev_root->node));
1656 	btrfs_set_backup_dev_root_level(root_backup,
1657 				       btrfs_header_level(info->dev_root->node));
1658 
1659 	btrfs_set_backup_total_bytes(root_backup,
1660 			     btrfs_super_total_bytes(info->super_copy));
1661 	btrfs_set_backup_bytes_used(root_backup,
1662 			     btrfs_super_bytes_used(info->super_copy));
1663 	btrfs_set_backup_num_devices(root_backup,
1664 			     btrfs_super_num_devices(info->super_copy));
1665 
1666 	/*
1667 	 * if we don't copy this out to the super_copy, it won't get remembered
1668 	 * for the next commit
1669 	 */
1670 	memcpy(&info->super_copy->super_roots,
1671 	       &info->super_for_commit->super_roots,
1672 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1673 }
1674 
1675 /*
1676  * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1677  * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1678  *
1679  * @fs_info:  filesystem whose backup roots need to be read
1680  * @priority: priority of backup root required
1681  *
1682  * Returns backup root index on success and -EINVAL otherwise.
1683  */
read_backup_root(struct btrfs_fs_info * fs_info,u8 priority)1684 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1685 {
1686 	int backup_index = find_newest_super_backup(fs_info);
1687 	struct btrfs_super_block *super = fs_info->super_copy;
1688 	struct btrfs_root_backup *root_backup;
1689 
1690 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1691 		if (priority == 0)
1692 			return backup_index;
1693 
1694 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1695 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1696 	} else {
1697 		return -EINVAL;
1698 	}
1699 
1700 	root_backup = super->super_roots + backup_index;
1701 
1702 	btrfs_set_super_generation(super,
1703 				   btrfs_backup_tree_root_gen(root_backup));
1704 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1705 	btrfs_set_super_root_level(super,
1706 				   btrfs_backup_tree_root_level(root_backup));
1707 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1708 
1709 	/*
1710 	 * Fixme: the total bytes and num_devices need to match or we should
1711 	 * need a fsck
1712 	 */
1713 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1714 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1715 
1716 	return backup_index;
1717 }
1718 
1719 /* helper to cleanup workers */
btrfs_stop_all_workers(struct btrfs_fs_info * fs_info)1720 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1721 {
1722 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1723 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1724 	btrfs_destroy_workqueue(fs_info->workers);
1725 	if (fs_info->endio_workers)
1726 		destroy_workqueue(fs_info->endio_workers);
1727 	if (fs_info->rmw_workers)
1728 		destroy_workqueue(fs_info->rmw_workers);
1729 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1730 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1731 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1732 	btrfs_destroy_workqueue(fs_info->caching_workers);
1733 	btrfs_destroy_workqueue(fs_info->flush_workers);
1734 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1735 	if (fs_info->discard_ctl.discard_workers)
1736 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1737 	/*
1738 	 * Now that all other work queues are destroyed, we can safely destroy
1739 	 * the queues used for metadata I/O, since tasks from those other work
1740 	 * queues can do metadata I/O operations.
1741 	 */
1742 	if (fs_info->endio_meta_workers)
1743 		destroy_workqueue(fs_info->endio_meta_workers);
1744 }
1745 
free_root_extent_buffers(struct btrfs_root * root)1746 static void free_root_extent_buffers(struct btrfs_root *root)
1747 {
1748 	if (root) {
1749 		free_extent_buffer(root->node);
1750 		free_extent_buffer(root->commit_root);
1751 		root->node = NULL;
1752 		root->commit_root = NULL;
1753 	}
1754 }
1755 
free_global_root_pointers(struct btrfs_fs_info * fs_info)1756 static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1757 {
1758 	struct btrfs_root *root, *tmp;
1759 
1760 	rbtree_postorder_for_each_entry_safe(root, tmp,
1761 					     &fs_info->global_root_tree,
1762 					     rb_node)
1763 		free_root_extent_buffers(root);
1764 }
1765 
1766 /* helper to cleanup tree roots */
free_root_pointers(struct btrfs_fs_info * info,bool free_chunk_root)1767 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1768 {
1769 	free_root_extent_buffers(info->tree_root);
1770 
1771 	free_global_root_pointers(info);
1772 	free_root_extent_buffers(info->dev_root);
1773 	free_root_extent_buffers(info->quota_root);
1774 	free_root_extent_buffers(info->uuid_root);
1775 	free_root_extent_buffers(info->fs_root);
1776 	free_root_extent_buffers(info->data_reloc_root);
1777 	free_root_extent_buffers(info->block_group_root);
1778 	free_root_extent_buffers(info->stripe_root);
1779 	free_root_extent_buffers(info->remap_root);
1780 	if (free_chunk_root)
1781 		free_root_extent_buffers(info->chunk_root);
1782 }
1783 
btrfs_put_root(struct btrfs_root * root)1784 void btrfs_put_root(struct btrfs_root *root)
1785 {
1786 	if (!root)
1787 		return;
1788 
1789 	if (refcount_dec_and_test(&root->refs)) {
1790 		if (WARN_ON(!xa_empty(&root->inodes)))
1791 			xa_destroy(&root->inodes);
1792 		if (WARN_ON(!xa_empty(&root->delayed_nodes)))
1793 			xa_destroy(&root->delayed_nodes);
1794 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1795 		if (root->anon_dev)
1796 			free_anon_bdev(root->anon_dev);
1797 		free_root_extent_buffers(root);
1798 #ifdef CONFIG_BTRFS_DEBUG
1799 		spin_lock(&root->fs_info->fs_roots_radix_lock);
1800 		list_del_init(&root->leak_list);
1801 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1802 #endif
1803 		kfree(root);
1804 	}
1805 }
1806 
btrfs_free_fs_roots(struct btrfs_fs_info * fs_info)1807 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1808 {
1809 	int ret;
1810 	struct btrfs_root *gang[8];
1811 	int i;
1812 
1813 	while (!list_empty(&fs_info->dead_roots)) {
1814 		gang[0] = list_first_entry(&fs_info->dead_roots,
1815 					   struct btrfs_root, root_list);
1816 		list_del(&gang[0]->root_list);
1817 
1818 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1819 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1820 		btrfs_put_root(gang[0]);
1821 	}
1822 
1823 	while (1) {
1824 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1825 					     (void **)gang, 0,
1826 					     ARRAY_SIZE(gang));
1827 		if (!ret)
1828 			break;
1829 		for (i = 0; i < ret; i++)
1830 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1831 	}
1832 }
1833 
btrfs_init_scrub(struct btrfs_fs_info * fs_info)1834 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1835 {
1836 	mutex_init(&fs_info->scrub_lock);
1837 	atomic_set(&fs_info->scrubs_running, 0);
1838 	atomic_set(&fs_info->scrub_pause_req, 0);
1839 	atomic_set(&fs_info->scrubs_paused, 0);
1840 	atomic_set(&fs_info->scrub_cancel_req, 0);
1841 	init_waitqueue_head(&fs_info->scrub_pause_wait);
1842 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1843 }
1844 
btrfs_init_balance(struct btrfs_fs_info * fs_info)1845 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1846 {
1847 	spin_lock_init(&fs_info->balance_lock);
1848 	mutex_init(&fs_info->balance_mutex);
1849 	atomic_set(&fs_info->balance_pause_req, 0);
1850 	atomic_set(&fs_info->balance_cancel_req, 0);
1851 	fs_info->balance_ctl = NULL;
1852 	init_waitqueue_head(&fs_info->balance_wait_q);
1853 	atomic_set(&fs_info->reloc_cancel_req, 0);
1854 }
1855 
btrfs_init_btree_inode(struct super_block * sb)1856 static int btrfs_init_btree_inode(struct super_block *sb)
1857 {
1858 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1859 	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1860 					      fs_info->tree_root);
1861 	struct inode *inode;
1862 
1863 	inode = new_inode(sb);
1864 	if (!inode)
1865 		return -ENOMEM;
1866 
1867 	btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
1868 	set_nlink(inode, 1);
1869 	/*
1870 	 * we set the i_size on the btree inode to the max possible int.
1871 	 * the real end of the address space is determined by all of
1872 	 * the devices in the system
1873 	 */
1874 	inode->i_size = OFFSET_MAX;
1875 	inode->i_mapping->a_ops = &btree_aops;
1876 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1877 
1878 	btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1879 				  IO_TREE_BTREE_INODE_IO);
1880 	btrfs_extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1881 
1882 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1883 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1884 	__insert_inode_hash(inode, hash);
1885 	set_bit(AS_KERNEL_FILE, &inode->i_mapping->flags);
1886 	fs_info->btree_inode = inode;
1887 
1888 	return 0;
1889 }
1890 
btrfs_init_dev_replace_locks(struct btrfs_fs_info * fs_info)1891 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1892 {
1893 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1894 	init_rwsem(&fs_info->dev_replace.rwsem);
1895 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1896 }
1897 
btrfs_init_qgroup(struct btrfs_fs_info * fs_info)1898 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1899 {
1900 	spin_lock_init(&fs_info->qgroup_lock);
1901 	mutex_init(&fs_info->qgroup_ioctl_lock);
1902 	fs_info->qgroup_tree = RB_ROOT;
1903 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1904 	fs_info->qgroup_seq = 1;
1905 	fs_info->qgroup_rescan_running = false;
1906 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1907 	mutex_init(&fs_info->qgroup_rescan_lock);
1908 }
1909 
btrfs_init_workqueues(struct btrfs_fs_info * fs_info)1910 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1911 {
1912 	u32 max_active = fs_info->thread_pool_size;
1913 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1914 	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU;
1915 
1916 	fs_info->workers =
1917 		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1918 
1919 	fs_info->delalloc_workers =
1920 		btrfs_alloc_workqueue(fs_info, "delalloc",
1921 				      flags, max_active, 2);
1922 
1923 	fs_info->flush_workers =
1924 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1925 				      flags, max_active, 0);
1926 
1927 	fs_info->caching_workers =
1928 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1929 
1930 	fs_info->fixup_workers =
1931 		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1932 
1933 	fs_info->endio_workers =
1934 		alloc_workqueue("btrfs-endio", flags, max_active);
1935 	fs_info->endio_meta_workers =
1936 		alloc_workqueue("btrfs-endio-meta", flags, max_active);
1937 	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1938 	fs_info->endio_write_workers =
1939 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1940 				      max_active, 2);
1941 	fs_info->endio_freespace_worker =
1942 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
1943 				      max_active, 0);
1944 	fs_info->delayed_workers =
1945 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
1946 				      max_active, 0);
1947 	fs_info->qgroup_rescan_workers =
1948 		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
1949 					      ordered_flags);
1950 	fs_info->discard_ctl.discard_workers =
1951 		alloc_ordered_workqueue("btrfs-discard", WQ_FREEZABLE);
1952 
1953 	if (!(fs_info->workers &&
1954 	      fs_info->delalloc_workers && fs_info->flush_workers &&
1955 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
1956 	      fs_info->endio_write_workers &&
1957 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
1958 	      fs_info->caching_workers && fs_info->fixup_workers &&
1959 	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
1960 	      fs_info->discard_ctl.discard_workers)) {
1961 		return -ENOMEM;
1962 	}
1963 
1964 	return 0;
1965 }
1966 
btrfs_init_csum_hash(struct btrfs_fs_info * fs_info,u16 csum_type)1967 static void btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
1968 {
1969 	/* Check if the checksum implementation is a fast accelerated one. */
1970 	switch (csum_type) {
1971 	case BTRFS_CSUM_TYPE_CRC32:
1972 		if (crc32_optimizations() & CRC32C_OPTIMIZATION)
1973 			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1974 		break;
1975 	case BTRFS_CSUM_TYPE_XXHASH:
1976 		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1977 		break;
1978 	default:
1979 		break;
1980 	}
1981 
1982 	btrfs_info(fs_info, "using %s checksum algorithm",
1983 		   btrfs_super_csum_name(csum_type));
1984 }
1985 
btrfs_replay_log(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)1986 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
1987 			    struct btrfs_fs_devices *fs_devices)
1988 {
1989 	int ret;
1990 	struct btrfs_tree_parent_check check = { 0 };
1991 	struct btrfs_root *log_tree_root;
1992 	struct btrfs_super_block *disk_super = fs_info->super_copy;
1993 	u64 bytenr = btrfs_super_log_root(disk_super);
1994 	int level = btrfs_super_log_root_level(disk_super);
1995 
1996 	if (unlikely(fs_devices->rw_devices == 0)) {
1997 		btrfs_err(fs_info, "log replay required on RO media");
1998 		return -EIO;
1999 	}
2000 
2001 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2002 					 GFP_KERNEL);
2003 	if (!log_tree_root)
2004 		return -ENOMEM;
2005 
2006 	check.level = level;
2007 	check.transid = fs_info->generation + 1;
2008 	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2009 	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2010 	if (IS_ERR(log_tree_root->node)) {
2011 		ret = PTR_ERR(log_tree_root->node);
2012 		log_tree_root->node = NULL;
2013 		btrfs_err(fs_info, "failed to read log tree with error: %d", ret);
2014 		btrfs_put_root(log_tree_root);
2015 		return ret;
2016 	}
2017 	if (unlikely(!extent_buffer_uptodate(log_tree_root->node))) {
2018 		btrfs_err(fs_info, "failed to read log tree");
2019 		btrfs_put_root(log_tree_root);
2020 		return -EIO;
2021 	}
2022 
2023 	/* returns with log_tree_root freed on success */
2024 	ret = btrfs_recover_log_trees(log_tree_root);
2025 	btrfs_put_root(log_tree_root);
2026 	if (unlikely(ret)) {
2027 		ASSERT(BTRFS_FS_ERROR(fs_info) != 0);
2028 		btrfs_err(fs_info, "failed to recover log trees with error: %d", ret);
2029 		return ret;
2030 	}
2031 
2032 	if (sb_rdonly(fs_info->sb)) {
2033 		ret = btrfs_commit_super(fs_info);
2034 		if (ret)
2035 			return ret;
2036 	}
2037 
2038 	return 0;
2039 }
2040 
load_global_roots_objectid(struct btrfs_root * tree_root,struct btrfs_path * path,u64 objectid,const char * name)2041 static int load_global_roots_objectid(struct btrfs_root *tree_root,
2042 				      struct btrfs_path *path, u64 objectid,
2043 				      const char *name)
2044 {
2045 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2046 	struct btrfs_root *root;
2047 	u64 max_global_id = 0;
2048 	int ret;
2049 	struct btrfs_key key = {
2050 		.objectid = objectid,
2051 		.type = BTRFS_ROOT_ITEM_KEY,
2052 		.offset = 0,
2053 	};
2054 	bool found = false;
2055 
2056 	/* If we have IGNOREDATACSUMS skip loading these roots. */
2057 	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2058 	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2059 		set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2060 		return 0;
2061 	}
2062 
2063 	while (1) {
2064 		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2065 		if (ret < 0)
2066 			break;
2067 
2068 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2069 			ret = btrfs_next_leaf(tree_root, path);
2070 			if (ret) {
2071 				if (ret > 0)
2072 					ret = 0;
2073 				break;
2074 			}
2075 		}
2076 		ret = 0;
2077 
2078 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2079 		if (key.objectid != objectid)
2080 			break;
2081 		btrfs_release_path(path);
2082 
2083 		/*
2084 		 * Just worry about this for extent tree, it'll be the same for
2085 		 * everybody.
2086 		 */
2087 		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2088 			max_global_id = max(max_global_id, key.offset);
2089 
2090 		found = true;
2091 		root = read_tree_root_path(tree_root, path, &key);
2092 		if (IS_ERR(root)) {
2093 			ret = PTR_ERR(root);
2094 			break;
2095 		}
2096 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2097 		ret = btrfs_global_root_insert(root);
2098 		if (ret) {
2099 			btrfs_put_root(root);
2100 			break;
2101 		}
2102 		key.offset++;
2103 	}
2104 	btrfs_release_path(path);
2105 
2106 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2107 		fs_info->nr_global_roots = max_global_id + 1;
2108 
2109 	if (!found || ret) {
2110 		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2111 			set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2112 
2113 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2114 			ret = ret ? ret : -ENOENT;
2115 		else
2116 			ret = 0;
2117 		btrfs_err(fs_info, "failed to load root %s", name);
2118 	}
2119 	return ret;
2120 }
2121 
load_global_roots(struct btrfs_root * tree_root)2122 static int load_global_roots(struct btrfs_root *tree_root)
2123 {
2124 	BTRFS_PATH_AUTO_FREE(path);
2125 	int ret;
2126 
2127 	path = btrfs_alloc_path();
2128 	if (!path)
2129 		return -ENOMEM;
2130 
2131 	ret = load_global_roots_objectid(tree_root, path,
2132 					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2133 	if (ret)
2134 		return ret;
2135 	ret = load_global_roots_objectid(tree_root, path,
2136 					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2137 	if (ret)
2138 		return ret;
2139 	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2140 		return ret;
2141 
2142 	return load_global_roots_objectid(tree_root, path,
2143 					  BTRFS_FREE_SPACE_TREE_OBJECTID,
2144 					  "free space");
2145 }
2146 
btrfs_read_roots(struct btrfs_fs_info * fs_info)2147 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2148 {
2149 	struct btrfs_root *tree_root = fs_info->tree_root;
2150 	struct btrfs_root *root;
2151 	struct btrfs_key location;
2152 	int ret;
2153 
2154 	ASSERT(fs_info->tree_root);
2155 
2156 	ret = load_global_roots(tree_root);
2157 	if (ret)
2158 		return ret;
2159 
2160 	location.type = BTRFS_ROOT_ITEM_KEY;
2161 	location.offset = 0;
2162 
2163 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2164 		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2165 		root = btrfs_read_tree_root(tree_root, &location);
2166 		if (IS_ERR(root)) {
2167 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2168 				ret = PTR_ERR(root);
2169 				goto out;
2170 			}
2171 		} else {
2172 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2173 			fs_info->block_group_root = root;
2174 		}
2175 	}
2176 
2177 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2178 	root = btrfs_read_tree_root(tree_root, &location);
2179 	if (IS_ERR(root)) {
2180 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2181 			ret = PTR_ERR(root);
2182 			goto out;
2183 		}
2184 	} else {
2185 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2186 		fs_info->dev_root = root;
2187 	}
2188 	/* Initialize fs_info for all devices in any case */
2189 	ret = btrfs_init_devices_late(fs_info);
2190 	if (ret)
2191 		goto out;
2192 
2193 	if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
2194 		/* The remap_root has already been loaded in load_important_roots(). */
2195 		root = fs_info->remap_root;
2196 
2197 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2198 
2199 		root->root_key.objectid = BTRFS_REMAP_TREE_OBJECTID;
2200 		root->root_key.type = BTRFS_ROOT_ITEM_KEY;
2201 		root->root_key.offset = 0;
2202 
2203 		/* Check that data reloc tree doesn't also exist. */
2204 		location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
2205 		root = btrfs_read_tree_root(fs_info->tree_root, &location);
2206 		if (!IS_ERR(root)) {
2207 			btrfs_err(fs_info, "data reloc tree exists when remap-tree enabled");
2208 			btrfs_put_root(root);
2209 			return -EIO;
2210 		} else if (PTR_ERR(root) != -ENOENT) {
2211 			btrfs_warn(fs_info, "error %ld when checking for data reloc tree",
2212 				   PTR_ERR(root));
2213 		}
2214 	} else {
2215 		/*
2216 		 * This tree can share blocks with some other fs tree during
2217 		 * relocation and we need a proper setup by btrfs_get_fs_root().
2218 		 */
2219 		root = btrfs_get_fs_root(tree_root->fs_info,
2220 					 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2221 		if (IS_ERR(root)) {
2222 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2223 				location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
2224 				ret = PTR_ERR(root);
2225 				goto out;
2226 			}
2227 		} else {
2228 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2229 			fs_info->data_reloc_root = root;
2230 		}
2231 	}
2232 
2233 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2234 	root = btrfs_read_tree_root(tree_root, &location);
2235 	if (!IS_ERR(root)) {
2236 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2237 		fs_info->quota_root = root;
2238 	}
2239 
2240 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2241 	root = btrfs_read_tree_root(tree_root, &location);
2242 	if (IS_ERR(root)) {
2243 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2244 			ret = PTR_ERR(root);
2245 			if (ret != -ENOENT)
2246 				goto out;
2247 		}
2248 	} else {
2249 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2250 		fs_info->uuid_root = root;
2251 	}
2252 
2253 	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2254 		location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2255 		root = btrfs_read_tree_root(tree_root, &location);
2256 		if (IS_ERR(root)) {
2257 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2258 				ret = PTR_ERR(root);
2259 				goto out;
2260 			}
2261 		} else {
2262 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2263 			fs_info->stripe_root = root;
2264 		}
2265 	}
2266 
2267 	return 0;
2268 out:
2269 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2270 		   location.objectid, ret);
2271 	return ret;
2272 }
2273 
validate_sys_chunk_array(const struct btrfs_fs_info * fs_info,const struct btrfs_super_block * sb)2274 static int validate_sys_chunk_array(const struct btrfs_fs_info *fs_info,
2275 				    const struct btrfs_super_block *sb)
2276 {
2277 	unsigned int cur = 0; /* Offset inside the sys chunk array */
2278 	/*
2279 	 * At sb read time, fs_info is not fully initialized. Thus we have
2280 	 * to use super block sectorsize, which should have been validated.
2281 	 */
2282 	const u32 sectorsize = btrfs_super_sectorsize(sb);
2283 	u32 sys_array_size = btrfs_super_sys_array_size(sb);
2284 
2285 	if (unlikely(sys_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)) {
2286 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2287 			  sys_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2288 		return -EUCLEAN;
2289 	}
2290 
2291 	while (cur < sys_array_size) {
2292 		struct btrfs_disk_key *disk_key;
2293 		struct btrfs_chunk *chunk;
2294 		struct btrfs_key key;
2295 		u64 type;
2296 		u16 num_stripes;
2297 		u32 len;
2298 		int ret;
2299 
2300 		disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
2301 		len = sizeof(*disk_key);
2302 
2303 		if (unlikely(cur + len > sys_array_size))
2304 			goto short_read;
2305 		cur += len;
2306 
2307 		btrfs_disk_key_to_cpu(&key, disk_key);
2308 		if (unlikely(key.type != BTRFS_CHUNK_ITEM_KEY)) {
2309 			btrfs_err(fs_info,
2310 			    "unexpected item type %u in sys_array at offset %u",
2311 				  key.type, cur);
2312 			return -EUCLEAN;
2313 		}
2314 		chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
2315 		num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2316 		if (unlikely(cur + btrfs_chunk_item_size(num_stripes) > sys_array_size))
2317 			goto short_read;
2318 		type = btrfs_stack_chunk_type(chunk);
2319 		if (unlikely(!(type & BTRFS_BLOCK_GROUP_SYSTEM))) {
2320 			btrfs_err(fs_info,
2321 			"invalid chunk type %llu in sys_array at offset %u",
2322 				  type, cur);
2323 			return -EUCLEAN;
2324 		}
2325 		ret = btrfs_check_chunk_valid(fs_info, NULL, chunk, key.offset,
2326 					      sectorsize);
2327 		if (ret < 0)
2328 			return ret;
2329 		cur += btrfs_chunk_item_size(num_stripes);
2330 	}
2331 	return 0;
2332 short_read:
2333 	btrfs_err(fs_info,
2334 	"super block sys chunk array short read, cur=%u sys_array_size=%u",
2335 		  cur, sys_array_size);
2336 	return -EUCLEAN;
2337 }
2338 
2339 /*
2340  * Real super block validation
2341  * NOTE: super csum type and incompat features will not be checked here.
2342  *
2343  * @sb:		super block to check
2344  * @mirror_num:	the super block number to check its bytenr:
2345  * 		0	the primary (1st) sb
2346  * 		1, 2	2nd and 3rd backup copy
2347  * 	       -1	skip bytenr check
2348  */
btrfs_validate_super(const struct btrfs_fs_info * fs_info,const struct btrfs_super_block * sb,int mirror_num)2349 int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
2350 			 const struct btrfs_super_block *sb, int mirror_num)
2351 {
2352 	u64 nodesize = btrfs_super_nodesize(sb);
2353 	u64 sectorsize = btrfs_super_sectorsize(sb);
2354 	int ret = 0;
2355 	const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
2356 
2357 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2358 		btrfs_err(fs_info, "no valid FS found");
2359 		ret = -EINVAL;
2360 	}
2361 	if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
2362 		if (!ignore_flags) {
2363 			btrfs_err(fs_info,
2364 			"unrecognized or unsupported super flag 0x%llx",
2365 				  btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2366 			ret = -EINVAL;
2367 		} else {
2368 			btrfs_info(fs_info,
2369 			"unrecognized or unsupported super flags: 0x%llx, ignored",
2370 				   btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2371 		}
2372 	}
2373 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2374 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2375 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2376 		ret = -EINVAL;
2377 	}
2378 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2379 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2380 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2381 		ret = -EINVAL;
2382 	}
2383 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2384 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2385 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2386 		ret = -EINVAL;
2387 	}
2388 
2389 	/*
2390 	 * Check sectorsize and nodesize first, other check will need it.
2391 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2392 	 */
2393 	if (!is_power_of_2(sectorsize) || sectorsize < BTRFS_MIN_BLOCKSIZE ||
2394 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2395 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2396 		ret = -EINVAL;
2397 	}
2398 
2399 	if (!btrfs_supported_blocksize(sectorsize)) {
2400 		btrfs_err(fs_info,
2401 			"sectorsize %llu not yet supported for page size %lu",
2402 			sectorsize, PAGE_SIZE);
2403 		ret = -EINVAL;
2404 	}
2405 
2406 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2407 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2408 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2409 		ret = -EINVAL;
2410 	}
2411 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2412 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2413 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2414 		ret = -EINVAL;
2415 	}
2416 
2417 	/* Root alignment check */
2418 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2419 		btrfs_err(fs_info, "tree_root block unaligned: %llu",
2420 			  btrfs_super_root(sb));
2421 		ret = -EINVAL;
2422 	}
2423 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2424 		btrfs_err(fs_info, "chunk_root block unaligned: %llu",
2425 			   btrfs_super_chunk_root(sb));
2426 		ret = -EINVAL;
2427 	}
2428 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2429 		btrfs_err(fs_info, "log_root block unaligned: %llu",
2430 			  btrfs_super_log_root(sb));
2431 		ret = -EINVAL;
2432 	}
2433 
2434 	if (!fs_info->fs_devices->temp_fsid &&
2435 	    memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2436 		btrfs_err(fs_info,
2437 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2438 			  sb->fsid, fs_info->fs_devices->fsid);
2439 		ret = -EINVAL;
2440 	}
2441 
2442 	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2443 		   BTRFS_FSID_SIZE) != 0) {
2444 		btrfs_err(fs_info,
2445 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2446 			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2447 		ret = -EINVAL;
2448 	}
2449 
2450 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2451 		   BTRFS_FSID_SIZE) != 0) {
2452 		btrfs_err(fs_info,
2453 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2454 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2455 		ret = -EINVAL;
2456 	}
2457 
2458 	/*
2459 	 * Artificial requirement for block-group-tree to force newer features
2460 	 * (free-space-tree, no-holes) so the test matrix is smaller.
2461 	 */
2462 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2463 	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2464 	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2465 		btrfs_err(fs_info,
2466 		"block-group-tree feature requires free-space-tree and no-holes");
2467 		ret = -EINVAL;
2468 	}
2469 
2470 	if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
2471 		/*
2472 		 * Reduce test matrix for remap tree by requiring block-group-tree
2473 		 * and no-holes. Free-space-tree is a hard requirement.
2474 		 */
2475 		if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2476 		    !btrfs_fs_incompat(fs_info, NO_HOLES) ||
2477 		    !btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2478 			btrfs_err(fs_info,
2479 "remap-tree feature requires free-space-tree, no-holes, and block-group-tree");
2480 			ret = -EINVAL;
2481 		}
2482 
2483 		if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
2484 			btrfs_err(fs_info, "remap-tree not supported with mixed-bg");
2485 			ret = -EINVAL;
2486 		}
2487 
2488 		if (btrfs_fs_incompat(fs_info, ZONED)) {
2489 			btrfs_err(fs_info, "remap-tree not supported with zoned devices");
2490 			ret = -EINVAL;
2491 		}
2492 
2493 		if (sectorsize > PAGE_SIZE) {
2494 			btrfs_err(fs_info, "remap-tree not supported when block size > page size");
2495 			ret = -EINVAL;
2496 		}
2497 	}
2498 
2499 	/*
2500 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2501 	 * done later
2502 	 */
2503 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2504 		btrfs_err(fs_info, "bytes_used is too small %llu",
2505 			  btrfs_super_bytes_used(sb));
2506 		ret = -EINVAL;
2507 	}
2508 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2509 		btrfs_err(fs_info, "invalid stripesize %u",
2510 			  btrfs_super_stripesize(sb));
2511 		ret = -EINVAL;
2512 	}
2513 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2514 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2515 			   btrfs_super_num_devices(sb));
2516 	if (btrfs_super_num_devices(sb) == 0) {
2517 		btrfs_err(fs_info, "number of devices is 0");
2518 		ret = -EINVAL;
2519 	}
2520 
2521 	if (mirror_num >= 0 &&
2522 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2523 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2524 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2525 		ret = -EINVAL;
2526 	}
2527 
2528 	if (ret)
2529 		return ret;
2530 
2531 	ret = validate_sys_chunk_array(fs_info, sb);
2532 
2533 	/*
2534 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2535 	 * and one chunk
2536 	 */
2537 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2538 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2539 			  btrfs_super_sys_array_size(sb),
2540 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2541 		ret = -EINVAL;
2542 	}
2543 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2544 			+ sizeof(struct btrfs_chunk)) {
2545 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2546 			  btrfs_super_sys_array_size(sb),
2547 			  sizeof(struct btrfs_disk_key)
2548 			  + sizeof(struct btrfs_chunk));
2549 		ret = -EINVAL;
2550 	}
2551 
2552 	/*
2553 	 * The generation is a global counter, we'll trust it more than the others
2554 	 * but it's still possible that it's the one that's wrong.
2555 	 */
2556 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2557 		btrfs_warn(fs_info,
2558 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2559 			btrfs_super_generation(sb),
2560 			btrfs_super_chunk_root_generation(sb));
2561 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2562 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2563 		btrfs_warn(fs_info,
2564 			"suspicious: generation < cache_generation: %llu < %llu",
2565 			btrfs_super_generation(sb),
2566 			btrfs_super_cache_generation(sb));
2567 
2568 	return ret;
2569 }
2570 
2571 /*
2572  * Validation of super block at mount time.
2573  * Some checks already done early at mount time, like csum type and incompat
2574  * flags will be skipped.
2575  */
btrfs_validate_mount_super(struct btrfs_fs_info * fs_info)2576 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2577 {
2578 	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2579 }
2580 
2581 /*
2582  * Validation of super block at write time.
2583  * Some checks like bytenr check will be skipped as their values will be
2584  * overwritten soon.
2585  * Extra checks like csum type and incompat flags will be done here.
2586  */
btrfs_validate_write_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb)2587 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2588 				      struct btrfs_super_block *sb)
2589 {
2590 	int ret;
2591 
2592 	ret = btrfs_validate_super(fs_info, sb, -1);
2593 	if (ret < 0)
2594 		goto out;
2595 	if (unlikely(!btrfs_supported_super_csum(btrfs_super_csum_type(sb)))) {
2596 		ret = -EUCLEAN;
2597 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2598 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2599 		goto out;
2600 	}
2601 	if (unlikely(btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP)) {
2602 		ret = -EUCLEAN;
2603 		btrfs_err(fs_info,
2604 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2605 			  btrfs_super_incompat_flags(sb),
2606 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2607 		goto out;
2608 	}
2609 out:
2610 	if (ret < 0)
2611 		btrfs_err(fs_info,
2612 		"super block corruption detected before writing it to disk");
2613 	return ret;
2614 }
2615 
load_super_root(struct btrfs_root * root,u64 bytenr,u64 gen,int level)2616 static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2617 {
2618 	struct btrfs_tree_parent_check check = {
2619 		.level = level,
2620 		.transid = gen,
2621 		.owner_root = btrfs_root_id(root)
2622 	};
2623 	int ret = 0;
2624 
2625 	root->node = read_tree_block(root->fs_info, bytenr, &check);
2626 	if (IS_ERR(root->node)) {
2627 		ret = PTR_ERR(root->node);
2628 		root->node = NULL;
2629 		return ret;
2630 	}
2631 	if (unlikely(!extent_buffer_uptodate(root->node))) {
2632 		free_extent_buffer(root->node);
2633 		root->node = NULL;
2634 		return -EIO;
2635 	}
2636 
2637 	btrfs_set_root_node(&root->root_item, root->node);
2638 	root->commit_root = btrfs_root_node(root);
2639 	btrfs_set_root_refs(&root->root_item, 1);
2640 	return ret;
2641 }
2642 
load_important_roots(struct btrfs_fs_info * fs_info)2643 static int load_important_roots(struct btrfs_fs_info *fs_info)
2644 {
2645 	struct btrfs_super_block *sb = fs_info->super_copy;
2646 	u64 gen, bytenr;
2647 	int level, ret;
2648 
2649 	bytenr = btrfs_super_root(sb);
2650 	gen = btrfs_super_generation(sb);
2651 	level = btrfs_super_root_level(sb);
2652 	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2653 	if (ret) {
2654 		btrfs_warn(fs_info, "couldn't read tree root");
2655 		return ret;
2656 	}
2657 
2658 	if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
2659 		bytenr = btrfs_super_remap_root(sb);
2660 		gen = btrfs_super_remap_root_generation(sb);
2661 		level = btrfs_super_remap_root_level(sb);
2662 		ret = load_super_root(fs_info->remap_root, bytenr, gen, level);
2663 		if (ret) {
2664 			btrfs_warn(fs_info, "couldn't read remap root");
2665 			return ret;
2666 		}
2667 	}
2668 
2669 	return 0;
2670 }
2671 
init_tree_roots(struct btrfs_fs_info * fs_info)2672 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2673 {
2674 	int backup_index = find_newest_super_backup(fs_info);
2675 	struct btrfs_super_block *sb = fs_info->super_copy;
2676 	struct btrfs_root *tree_root = fs_info->tree_root;
2677 	bool handle_error = false;
2678 	int ret = 0;
2679 	int i;
2680 
2681 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2682 		if (handle_error) {
2683 			if (!IS_ERR(tree_root->node))
2684 				free_extent_buffer(tree_root->node);
2685 			tree_root->node = NULL;
2686 
2687 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2688 				break;
2689 
2690 			free_root_pointers(fs_info, 0);
2691 
2692 			/*
2693 			 * Don't use the log in recovery mode, it won't be
2694 			 * valid
2695 			 */
2696 			btrfs_set_super_log_root(sb, 0);
2697 
2698 			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2699 			ret = read_backup_root(fs_info, i);
2700 			backup_index = ret;
2701 			if (ret < 0)
2702 				return ret;
2703 		}
2704 
2705 		ret = load_important_roots(fs_info);
2706 		if (ret) {
2707 			handle_error = true;
2708 			continue;
2709 		}
2710 
2711 		/*
2712 		 * No need to hold btrfs_root::objectid_mutex since the fs
2713 		 * hasn't been fully initialised and we are the only user
2714 		 */
2715 		ret = btrfs_init_root_free_objectid(tree_root);
2716 		if (ret < 0) {
2717 			handle_error = true;
2718 			continue;
2719 		}
2720 
2721 		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2722 
2723 		ret = btrfs_read_roots(fs_info);
2724 		if (ret < 0) {
2725 			handle_error = true;
2726 			continue;
2727 		}
2728 
2729 		/* All successful */
2730 		fs_info->generation = btrfs_header_generation(tree_root->node);
2731 		btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2732 		fs_info->last_reloc_trans = 0;
2733 
2734 		/* Always begin writing backup roots after the one being used */
2735 		if (backup_index < 0) {
2736 			fs_info->backup_root_index = 0;
2737 		} else {
2738 			fs_info->backup_root_index = backup_index + 1;
2739 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2740 		}
2741 		break;
2742 	}
2743 
2744 	return ret;
2745 }
2746 
2747 /*
2748  * Lockdep gets confused between our buffer_tree which requires IRQ locking because
2749  * we modify marks in the IRQ context, and our delayed inode xarray which doesn't
2750  * have these requirements. Use a class key so lockdep doesn't get them mixed up.
2751  */
2752 static struct lock_class_key buffer_xa_class;
2753 
btrfs_init_fs_info(struct btrfs_fs_info * fs_info)2754 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2755 {
2756 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2757 
2758 	/* Use the same flags as mapping->i_pages. */
2759 	xa_init_flags(&fs_info->buffer_tree, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
2760 	lockdep_set_class(&fs_info->buffer_tree.xa_lock, &buffer_xa_class);
2761 
2762 	INIT_LIST_HEAD(&fs_info->trans_list);
2763 	INIT_LIST_HEAD(&fs_info->dead_roots);
2764 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2765 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2766 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2767 	spin_lock_init(&fs_info->delalloc_root_lock);
2768 	spin_lock_init(&fs_info->trans_lock);
2769 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2770 	spin_lock_init(&fs_info->delayed_iput_lock);
2771 	spin_lock_init(&fs_info->defrag_inodes_lock);
2772 	spin_lock_init(&fs_info->super_lock);
2773 	spin_lock_init(&fs_info->unused_bgs_lock);
2774 	spin_lock_init(&fs_info->treelog_bg_lock);
2775 	spin_lock_init(&fs_info->zone_active_bgs_lock);
2776 	spin_lock_init(&fs_info->relocation_bg_lock);
2777 	rwlock_init(&fs_info->tree_mod_log_lock);
2778 	rwlock_init(&fs_info->global_root_lock);
2779 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2780 	mutex_init(&fs_info->reclaim_bgs_lock);
2781 	mutex_init(&fs_info->reloc_mutex);
2782 	mutex_init(&fs_info->delalloc_root_mutex);
2783 	mutex_init(&fs_info->zoned_meta_io_lock);
2784 	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2785 	seqlock_init(&fs_info->profiles_lock);
2786 
2787 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2788 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2789 	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2790 	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2791 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2792 				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2793 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2794 				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2795 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2796 				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2797 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2798 				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2799 
2800 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2801 	INIT_LIST_HEAD(&fs_info->space_info);
2802 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2803 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2804 	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2805 	INIT_LIST_HEAD(&fs_info->fully_remapped_bgs);
2806 	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2807 #ifdef CONFIG_BTRFS_DEBUG
2808 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2809 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2810 	spin_lock_init(&fs_info->eb_leak_lock);
2811 #endif
2812 	fs_info->mapping_tree = RB_ROOT_CACHED;
2813 	rwlock_init(&fs_info->mapping_tree_lock);
2814 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2815 			     BTRFS_BLOCK_RSV_GLOBAL);
2816 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2817 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2818 	btrfs_init_block_rsv(&fs_info->remap_block_rsv, BTRFS_BLOCK_RSV_REMAP);
2819 	btrfs_init_block_rsv(&fs_info->treelog_rsv, BTRFS_BLOCK_RSV_TREELOG);
2820 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2821 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2822 			     BTRFS_BLOCK_RSV_DELOPS);
2823 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2824 			     BTRFS_BLOCK_RSV_DELREFS);
2825 
2826 	atomic_set(&fs_info->async_delalloc_pages, 0);
2827 	atomic_set(&fs_info->defrag_running, 0);
2828 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2829 	atomic64_set(&fs_info->tree_mod_seq, 0);
2830 	fs_info->global_root_tree = RB_ROOT;
2831 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2832 	fs_info->metadata_ratio = 0;
2833 	fs_info->defrag_inodes = RB_ROOT;
2834 	atomic64_set(&fs_info->free_chunk_space, 0);
2835 	fs_info->tree_mod_log = RB_ROOT;
2836 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2837 	btrfs_init_ref_verify(fs_info);
2838 
2839 	fs_info->thread_pool_size = min_t(unsigned long,
2840 					  num_online_cpus() + 2, 8);
2841 
2842 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2843 	spin_lock_init(&fs_info->ordered_root_lock);
2844 
2845 	btrfs_init_scrub(fs_info);
2846 	btrfs_init_balance(fs_info);
2847 	btrfs_init_async_reclaim_work(fs_info);
2848 	btrfs_init_extent_map_shrinker_work(fs_info);
2849 
2850 	rwlock_init(&fs_info->block_group_cache_lock);
2851 	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2852 
2853 	btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2854 				  IO_TREE_FS_EXCLUDED_EXTENTS);
2855 
2856 	mutex_init(&fs_info->ordered_operations_mutex);
2857 	mutex_init(&fs_info->tree_log_mutex);
2858 	mutex_init(&fs_info->chunk_mutex);
2859 	mutex_init(&fs_info->transaction_kthread_mutex);
2860 	mutex_init(&fs_info->cleaner_mutex);
2861 	mutex_init(&fs_info->remap_mutex);
2862 	mutex_init(&fs_info->ro_block_group_mutex);
2863 	init_rwsem(&fs_info->commit_root_sem);
2864 	init_rwsem(&fs_info->cleanup_work_sem);
2865 	init_rwsem(&fs_info->subvol_sem);
2866 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2867 
2868 	btrfs_init_dev_replace_locks(fs_info);
2869 	btrfs_init_qgroup(fs_info);
2870 	btrfs_discard_init(fs_info);
2871 
2872 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2873 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2874 
2875 	init_waitqueue_head(&fs_info->transaction_throttle);
2876 	init_waitqueue_head(&fs_info->transaction_wait);
2877 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2878 	init_waitqueue_head(&fs_info->async_submit_wait);
2879 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2880 
2881 	/* Usable values until the real ones are cached from the superblock */
2882 	fs_info->nodesize = 4096;
2883 	fs_info->sectorsize = 4096;
2884 	fs_info->sectorsize_bits = ilog2(4096);
2885 	fs_info->stripesize = 4096;
2886 
2887 	/* Default compress algorithm when user does -o compress */
2888 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2889 
2890 	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2891 
2892 	spin_lock_init(&fs_info->swapfile_pins_lock);
2893 	fs_info->swapfile_pins = RB_ROOT;
2894 
2895 	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2896 	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2897 }
2898 
init_mount_fs_info(struct btrfs_fs_info * fs_info,struct super_block * sb)2899 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2900 {
2901 	int ret;
2902 
2903 	fs_info->sb = sb;
2904 	/* Temporary fixed values for block size until we read the superblock. */
2905 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2906 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2907 
2908 	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2909 	if (ret)
2910 		return ret;
2911 
2912 	ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2913 	if (ret)
2914 		return ret;
2915 
2916 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2917 	if (ret)
2918 		return ret;
2919 
2920 	ret = percpu_counter_init(&fs_info->stats_read_blocks, 0, GFP_KERNEL);
2921 	if (ret)
2922 		return ret;
2923 
2924 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2925 					(1 + ilog2(nr_cpu_ids));
2926 
2927 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2928 	if (ret)
2929 		return ret;
2930 
2931 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2932 			GFP_KERNEL);
2933 	if (ret)
2934 		return ret;
2935 
2936 	btrfs_init_delayed_root(&fs_info->delayed_root);
2937 
2938 	if (sb_rdonly(sb))
2939 		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2940 	if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
2941 		set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
2942 
2943 	return btrfs_alloc_stripe_hash_table(fs_info);
2944 }
2945 
btrfs_uuid_rescan_kthread(void * data)2946 static int btrfs_uuid_rescan_kthread(void *data)
2947 {
2948 	struct btrfs_fs_info *fs_info = data;
2949 	int ret;
2950 
2951 	/*
2952 	 * 1st step is to iterate through the existing UUID tree and
2953 	 * to delete all entries that contain outdated data.
2954 	 * 2nd step is to add all missing entries to the UUID tree.
2955 	 */
2956 	ret = btrfs_uuid_tree_iterate(fs_info);
2957 	if (ret < 0) {
2958 		if (ret != -EINTR)
2959 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2960 				   ret);
2961 		up(&fs_info->uuid_tree_rescan_sem);
2962 		return ret;
2963 	}
2964 	return btrfs_uuid_scan_kthread(data);
2965 }
2966 
btrfs_check_uuid_tree(struct btrfs_fs_info * fs_info)2967 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2968 {
2969 	struct task_struct *task;
2970 
2971 	down(&fs_info->uuid_tree_rescan_sem);
2972 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2973 	if (IS_ERR(task)) {
2974 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2975 		up(&fs_info->uuid_tree_rescan_sem);
2976 		return PTR_ERR(task);
2977 	}
2978 
2979 	return 0;
2980 }
2981 
btrfs_cleanup_fs_roots(struct btrfs_fs_info * fs_info)2982 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2983 {
2984 	u64 root_objectid = 0;
2985 	struct btrfs_root *gang[8];
2986 	int ret = 0;
2987 
2988 	while (1) {
2989 		unsigned int found;
2990 
2991 		spin_lock(&fs_info->fs_roots_radix_lock);
2992 		found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2993 					     (void **)gang, root_objectid,
2994 					     ARRAY_SIZE(gang));
2995 		if (!found) {
2996 			spin_unlock(&fs_info->fs_roots_radix_lock);
2997 			break;
2998 		}
2999 		root_objectid = btrfs_root_id(gang[found - 1]) + 1;
3000 
3001 		for (int i = 0; i < found; i++) {
3002 			/* Avoid to grab roots in dead_roots. */
3003 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3004 				gang[i] = NULL;
3005 				continue;
3006 			}
3007 			/* Grab all the search result for later use. */
3008 			gang[i] = btrfs_grab_root(gang[i]);
3009 		}
3010 		spin_unlock(&fs_info->fs_roots_radix_lock);
3011 
3012 		for (int i = 0; i < found; i++) {
3013 			if (!gang[i])
3014 				continue;
3015 			root_objectid = btrfs_root_id(gang[i]);
3016 			/*
3017 			 * Continue to release the remaining roots after the first
3018 			 * error without cleanup and preserve the first error
3019 			 * for the return.
3020 			 */
3021 			if (!ret)
3022 				ret = btrfs_orphan_cleanup(gang[i]);
3023 			btrfs_put_root(gang[i]);
3024 		}
3025 		if (ret)
3026 			break;
3027 
3028 		root_objectid++;
3029 	}
3030 	return ret;
3031 }
3032 
3033 /*
3034  * Mounting logic specific to read-write file systems. Shared by open_ctree
3035  * and btrfs_remount when remounting from read-only to read-write.
3036  */
btrfs_start_pre_rw_mount(struct btrfs_fs_info * fs_info)3037 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
3038 {
3039 	int ret;
3040 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
3041 	bool rebuild_free_space_tree = false;
3042 
3043 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3044 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3045 		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
3046 			btrfs_warn(fs_info,
3047 				   "'clear_cache' option is ignored with extent tree v2");
3048 		else if (btrfs_fs_incompat(fs_info, REMAP_TREE))
3049 			btrfs_warn(fs_info, "'clear_cache' option is ignored with remap tree");
3050 		else
3051 			rebuild_free_space_tree = true;
3052 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3053 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3054 		btrfs_warn(fs_info, "free space tree is invalid");
3055 		rebuild_free_space_tree = true;
3056 	}
3057 
3058 	if (rebuild_free_space_tree) {
3059 		btrfs_info(fs_info, "rebuilding free space tree");
3060 		ret = btrfs_rebuild_free_space_tree(fs_info);
3061 		if (ret) {
3062 			btrfs_warn(fs_info,
3063 				   "failed to rebuild free space tree: %d", ret);
3064 			return ret;
3065 		}
3066 	}
3067 
3068 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3069 	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3070 		btrfs_info(fs_info, "disabling free space tree");
3071 		ret = btrfs_delete_free_space_tree(fs_info);
3072 		if (ret) {
3073 			btrfs_warn(fs_info,
3074 				   "failed to disable free space tree: %d", ret);
3075 			return ret;
3076 		}
3077 	}
3078 
3079 	/*
3080 	 * Before btrfs-progs v6.16.1 mkfs.btrfs can leave free space entries
3081 	 * for deleted temporary chunks. Delete them if they exist.
3082 	 */
3083 	ret = btrfs_delete_orphan_free_space_entries(fs_info);
3084 	if (ret < 0) {
3085 		btrfs_err(fs_info, "failed to delete orphan free space tree entries: %d", ret);
3086 		return ret;
3087 	}
3088 	/*
3089 	 * btrfs_find_orphan_roots() is responsible for finding all the dead
3090 	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3091 	 * them into the fs_info->fs_roots_radix tree. This must be done before
3092 	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3093 	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3094 	 * item before the root's tree is deleted - this means that if we unmount
3095 	 * or crash before the deletion completes, on the next mount we will not
3096 	 * delete what remains of the tree because the orphan item does not
3097 	 * exists anymore, which is what tells us we have a pending deletion.
3098 	 */
3099 	ret = btrfs_find_orphan_roots(fs_info);
3100 	if (ret)
3101 		return ret;
3102 
3103 	ret = btrfs_cleanup_fs_roots(fs_info);
3104 	if (ret)
3105 		return ret;
3106 
3107 	down_read(&fs_info->cleanup_work_sem);
3108 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3109 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3110 		up_read(&fs_info->cleanup_work_sem);
3111 		return ret;
3112 	}
3113 	up_read(&fs_info->cleanup_work_sem);
3114 
3115 	mutex_lock(&fs_info->cleaner_mutex);
3116 	ret = btrfs_recover_relocation(fs_info);
3117 	mutex_unlock(&fs_info->cleaner_mutex);
3118 	if (ret < 0) {
3119 		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3120 		return ret;
3121 	}
3122 
3123 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3124 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3125 		btrfs_info(fs_info, "creating free space tree");
3126 		ret = btrfs_create_free_space_tree(fs_info);
3127 		if (ret) {
3128 			btrfs_warn(fs_info,
3129 				"failed to create free space tree: %d", ret);
3130 			return ret;
3131 		}
3132 	}
3133 
3134 	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3135 		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3136 		if (ret)
3137 			return ret;
3138 	}
3139 
3140 	ret = btrfs_resume_balance_async(fs_info);
3141 	if (ret)
3142 		return ret;
3143 
3144 	ret = btrfs_resume_dev_replace_async(fs_info);
3145 	if (ret) {
3146 		btrfs_warn(fs_info, "failed to resume dev_replace");
3147 		return ret;
3148 	}
3149 
3150 	btrfs_qgroup_rescan_resume(fs_info);
3151 
3152 	if (!fs_info->uuid_root) {
3153 		btrfs_info(fs_info, "creating UUID tree");
3154 		ret = btrfs_create_uuid_tree(fs_info);
3155 		if (ret) {
3156 			btrfs_warn(fs_info,
3157 				   "failed to create the UUID tree %d", ret);
3158 			return ret;
3159 		}
3160 	}
3161 
3162 	return 0;
3163 }
3164 
3165 /*
3166  * Do various sanity and dependency checks of different features.
3167  *
3168  * @is_rw_mount:	If the mount is read-write.
3169  *
3170  * This is the place for less strict checks (like for subpage or artificial
3171  * feature dependencies).
3172  *
3173  * For strict checks or possible corruption detection, see
3174  * btrfs_validate_super().
3175  *
3176  * This should be called after btrfs_parse_options(), as some mount options
3177  * (space cache related) can modify on-disk format like free space tree and
3178  * screw up certain feature dependencies.
3179  */
btrfs_check_features(struct btrfs_fs_info * fs_info,bool is_rw_mount)3180 int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3181 {
3182 	struct btrfs_super_block *disk_super = fs_info->super_copy;
3183 	u64 incompat = btrfs_super_incompat_flags(disk_super);
3184 	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3185 	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3186 
3187 	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3188 		btrfs_err(fs_info,
3189 		"cannot mount because of unknown incompat features (0x%llx)",
3190 		    incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
3191 		return -EINVAL;
3192 	}
3193 
3194 	/* Runtime limitation for mixed block groups. */
3195 	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3196 	    (fs_info->sectorsize != fs_info->nodesize)) {
3197 		btrfs_err(fs_info,
3198 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3199 			fs_info->nodesize, fs_info->sectorsize);
3200 		return -EINVAL;
3201 	}
3202 
3203 	/* Mixed backref is an always-enabled feature. */
3204 	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3205 
3206 	/* Set compression related flags just in case. */
3207 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3208 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3209 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3210 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3211 
3212 	/*
3213 	 * An ancient flag, which should really be marked deprecated.
3214 	 * Such runtime limitation doesn't really need a incompat flag.
3215 	 */
3216 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3217 		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3218 
3219 	if (compat_ro_unsupp && is_rw_mount) {
3220 		btrfs_err(fs_info,
3221 	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3222 		       compat_ro_unsupp);
3223 		return -EINVAL;
3224 	}
3225 
3226 	/*
3227 	 * We have unsupported RO compat features, although RO mounted, we
3228 	 * should not cause any metadata writes, including log replay.
3229 	 * Or we could screw up whatever the new feature requires.
3230 	 */
3231 	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3232 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3233 		btrfs_err(fs_info,
3234 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3235 			  compat_ro_unsupp);
3236 		return -EINVAL;
3237 	}
3238 
3239 	/*
3240 	 * Artificial limitations for block group tree, to force
3241 	 * block-group-tree to rely on no-holes and free-space-tree.
3242 	 */
3243 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3244 	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3245 	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3246 		btrfs_err(fs_info,
3247 "block-group-tree feature requires no-holes and free-space-tree features");
3248 		return -EINVAL;
3249 	}
3250 
3251 	/*
3252 	 * Subpage/bs > ps runtime limitation on v1 cache.
3253 	 *
3254 	 * V1 space cache still has some hard coded PAGE_SIZE usage, while
3255 	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3256 	 * going to be deprecated anyway.
3257 	 */
3258 	if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3259 		btrfs_warn(fs_info,
3260 	"v1 space cache is not supported for page size %lu with sectorsize %u",
3261 			   PAGE_SIZE, fs_info->sectorsize);
3262 		return -EINVAL;
3263 	}
3264 
3265 	/* This can be called by remount, we need to protect the super block. */
3266 	spin_lock(&fs_info->super_lock);
3267 	btrfs_set_super_incompat_flags(disk_super, incompat);
3268 	spin_unlock(&fs_info->super_lock);
3269 
3270 	return 0;
3271 }
3272 
fs_is_full_ro(const struct btrfs_fs_info * fs_info)3273 static bool fs_is_full_ro(const struct btrfs_fs_info *fs_info)
3274 {
3275 	if (!sb_rdonly(fs_info->sb))
3276 		return false;
3277 	if (unlikely(fs_info->mount_opt & BTRFS_MOUNT_FULL_RO_MASK))
3278 		return true;
3279 	return false;
3280 }
3281 
open_ctree(struct super_block * sb,struct btrfs_fs_devices * fs_devices)3282 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
3283 {
3284 	u32 sectorsize;
3285 	u32 nodesize;
3286 	u32 stripesize;
3287 	u64 generation;
3288 	u16 csum_type;
3289 	struct btrfs_super_block *disk_super;
3290 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3291 	struct btrfs_root *tree_root;
3292 	struct btrfs_root *chunk_root;
3293 	struct btrfs_root *remap_root;
3294 	int ret;
3295 	int level;
3296 
3297 	ret = init_mount_fs_info(fs_info, sb);
3298 	if (ret)
3299 		goto fail;
3300 
3301 	/* These need to be init'ed before we start creating inodes and such. */
3302 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3303 				     GFP_KERNEL);
3304 	fs_info->tree_root = tree_root;
3305 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3306 				      GFP_KERNEL);
3307 	fs_info->chunk_root = chunk_root;
3308 	if (!tree_root || !chunk_root) {
3309 		ret = -ENOMEM;
3310 		goto fail;
3311 	}
3312 
3313 	ret = btrfs_init_btree_inode(sb);
3314 	if (ret)
3315 		goto fail;
3316 
3317 	invalidate_bdev(fs_devices->latest_dev->bdev);
3318 
3319 	/*
3320 	 * Read super block and check the signature bytes only
3321 	 */
3322 	disk_super = btrfs_read_disk_super(fs_devices->latest_dev->bdev, 0, false);
3323 	if (IS_ERR(disk_super)) {
3324 		ret = PTR_ERR(disk_super);
3325 		goto fail_alloc;
3326 	}
3327 
3328 	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3329 	/*
3330 	 * Verify the type first, if that or the checksum value are
3331 	 * corrupted, we'll find out
3332 	 */
3333 	csum_type = btrfs_super_csum_type(disk_super);
3334 	if (!btrfs_supported_super_csum(csum_type)) {
3335 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3336 			  csum_type);
3337 		ret = -EINVAL;
3338 		btrfs_release_disk_super(disk_super);
3339 		goto fail_alloc;
3340 	}
3341 
3342 	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3343 	fs_info->csum_type = csum_type;
3344 
3345 	btrfs_init_csum_hash(fs_info, csum_type);
3346 
3347 	/*
3348 	 * We want to check superblock checksum, the type is stored inside.
3349 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3350 	 */
3351 	if (btrfs_check_super_csum(fs_info, disk_super)) {
3352 		btrfs_err(fs_info, "superblock checksum mismatch");
3353 		ret = -EINVAL;
3354 		btrfs_release_disk_super(disk_super);
3355 		goto fail_alloc;
3356 	}
3357 
3358 	/*
3359 	 * super_copy is zeroed at allocation time and we never touch the
3360 	 * following bytes up to INFO_SIZE, the checksum is calculated from
3361 	 * the whole block of INFO_SIZE
3362 	 */
3363 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3364 	btrfs_release_disk_super(disk_super);
3365 
3366 	disk_super = fs_info->super_copy;
3367 
3368 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3369 	       sizeof(*fs_info->super_for_commit));
3370 
3371 	ret = btrfs_validate_mount_super(fs_info);
3372 	if (ret) {
3373 		btrfs_err(fs_info, "superblock contains fatal errors");
3374 		ret = -EINVAL;
3375 		goto fail_alloc;
3376 	}
3377 
3378 	if (!btrfs_super_root(disk_super)) {
3379 		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3380 		ret = -EINVAL;
3381 		goto fail_alloc;
3382 	}
3383 
3384 	/* check FS state, whether FS is broken. */
3385 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3386 		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3387 
3388 	/* If the fs has any rescue options, no transaction is allowed. */
3389 	if (fs_is_full_ro(fs_info))
3390 		WRITE_ONCE(fs_info->fs_error, -EROFS);
3391 
3392 	/* Set up fs_info before parsing mount options */
3393 	nodesize = btrfs_super_nodesize(disk_super);
3394 	sectorsize = btrfs_super_sectorsize(disk_super);
3395 	stripesize = sectorsize;
3396 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3397 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3398 
3399 	fs_info->nodesize = nodesize;
3400 	fs_info->nodesize_bits = ilog2(nodesize);
3401 	fs_info->sectorsize = sectorsize;
3402 	fs_info->sectorsize_bits = ilog2(sectorsize);
3403 	fs_info->block_min_order = ilog2(round_up(sectorsize, PAGE_SIZE) >> PAGE_SHIFT);
3404 	fs_info->block_max_order = ilog2((BITS_PER_LONG << fs_info->sectorsize_bits) >> PAGE_SHIFT);
3405 	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3406 	fs_info->stripesize = stripesize;
3407 	fs_info->fs_devices->fs_info = fs_info;
3408 
3409 	if (fs_info->sectorsize > PAGE_SIZE)
3410 		btrfs_warn(fs_info,
3411 			   "support for block size %u with page size %lu is experimental, some features may be missing",
3412 			   fs_info->sectorsize, PAGE_SIZE);
3413 	/*
3414 	 * Handle the space caching options appropriately now that we have the
3415 	 * super block loaded and validated.
3416 	 */
3417 	btrfs_set_free_space_cache_settings(fs_info);
3418 
3419 	if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3420 		ret = -EINVAL;
3421 		goto fail_alloc;
3422 	}
3423 
3424 	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3425 	if (ret < 0)
3426 		goto fail_alloc;
3427 
3428 	if (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_REMAP_TREE) {
3429 		remap_root = btrfs_alloc_root(fs_info, BTRFS_REMAP_TREE_OBJECTID,
3430 					      GFP_KERNEL);
3431 		fs_info->remap_root = remap_root;
3432 		if (!remap_root) {
3433 			ret = -ENOMEM;
3434 			goto fail_alloc;
3435 		}
3436 	}
3437 
3438 	/*
3439 	 * At this point our mount options are validated, if we set ->max_inline
3440 	 * to something non-standard make sure we truncate it to sectorsize.
3441 	 */
3442 	fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3443 
3444 	ret = btrfs_alloc_compress_wsm(fs_info);
3445 	if (ret)
3446 		goto fail_sb_buffer;
3447 	ret = btrfs_init_workqueues(fs_info);
3448 	if (ret)
3449 		goto fail_sb_buffer;
3450 
3451 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3452 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3453 
3454 	/* Update the values for the current filesystem. */
3455 	sb->s_blocksize = sectorsize;
3456 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3457 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3458 
3459 	mutex_lock(&fs_info->chunk_mutex);
3460 	ret = btrfs_read_sys_array(fs_info);
3461 	mutex_unlock(&fs_info->chunk_mutex);
3462 	if (ret) {
3463 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3464 		goto fail_sb_buffer;
3465 	}
3466 
3467 	generation = btrfs_super_chunk_root_generation(disk_super);
3468 	level = btrfs_super_chunk_root_level(disk_super);
3469 	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3470 			      generation, level);
3471 	if (ret) {
3472 		btrfs_err(fs_info, "failed to read chunk root");
3473 		goto fail_tree_roots;
3474 	}
3475 
3476 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3477 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3478 			   BTRFS_UUID_SIZE);
3479 
3480 	ret = btrfs_read_chunk_tree(fs_info);
3481 	if (ret) {
3482 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3483 		goto fail_tree_roots;
3484 	}
3485 
3486 	/*
3487 	 * At this point we know all the devices that make this filesystem,
3488 	 * including the seed devices but we don't know yet if the replace
3489 	 * target is required. So free devices that are not part of this
3490 	 * filesystem but skip the replace target device which is checked
3491 	 * below in btrfs_init_dev_replace().
3492 	 */
3493 	btrfs_free_extra_devids(fs_devices);
3494 	if (unlikely(!fs_devices->latest_dev->bdev)) {
3495 		btrfs_err(fs_info, "failed to read devices");
3496 		ret = -EIO;
3497 		goto fail_tree_roots;
3498 	}
3499 
3500 	ret = init_tree_roots(fs_info);
3501 	if (ret)
3502 		goto fail_tree_roots;
3503 
3504 	/*
3505 	 * Get zone type information of zoned block devices. This will also
3506 	 * handle emulation of a zoned filesystem if a regular device has the
3507 	 * zoned incompat feature flag set.
3508 	 */
3509 	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3510 	if (ret) {
3511 		btrfs_err(fs_info,
3512 			  "zoned: failed to read device zone info: %d", ret);
3513 		goto fail_block_groups;
3514 	}
3515 
3516 	/*
3517 	 * If we have a uuid root and we're not being told to rescan we need to
3518 	 * check the generation here so we can set the
3519 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3520 	 * transaction during a balance or the log replay without updating the
3521 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3522 	 * even though it was perfectly fine.
3523 	 */
3524 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3525 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3526 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3527 
3528 	if (unlikely(btrfs_verify_dev_items(fs_info))) {
3529 		ret = -EUCLEAN;
3530 		goto fail_block_groups;
3531 	}
3532 	ret = btrfs_verify_dev_extents(fs_info);
3533 	if (ret) {
3534 		btrfs_err(fs_info,
3535 			  "failed to verify dev extents against chunks: %d",
3536 			  ret);
3537 		goto fail_block_groups;
3538 	}
3539 	ret = btrfs_recover_balance(fs_info);
3540 	if (ret) {
3541 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3542 		goto fail_block_groups;
3543 	}
3544 
3545 	ret = btrfs_init_dev_stats(fs_info);
3546 	if (ret) {
3547 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3548 		goto fail_block_groups;
3549 	}
3550 
3551 	ret = btrfs_init_dev_replace(fs_info);
3552 	if (ret) {
3553 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3554 		goto fail_block_groups;
3555 	}
3556 
3557 	ret = btrfs_check_zoned_mode(fs_info);
3558 	if (ret) {
3559 		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3560 			  ret);
3561 		goto fail_block_groups;
3562 	}
3563 
3564 	ret = btrfs_sysfs_add_fsid(fs_devices);
3565 	if (ret) {
3566 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3567 				ret);
3568 		goto fail_block_groups;
3569 	}
3570 
3571 	ret = btrfs_sysfs_add_mounted(fs_info);
3572 	if (ret) {
3573 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3574 		goto fail_fsdev_sysfs;
3575 	}
3576 
3577 	ret = btrfs_init_space_info(fs_info);
3578 	if (ret) {
3579 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3580 		goto fail_sysfs;
3581 	}
3582 
3583 	ret = btrfs_read_block_groups(fs_info);
3584 	if (ret) {
3585 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3586 		goto fail_sysfs;
3587 	}
3588 
3589 	if (btrfs_fs_incompat(fs_info, REMAP_TREE)) {
3590 		ret = btrfs_populate_fully_remapped_bgs_list(fs_info);
3591 		if (ret) {
3592 			btrfs_err(fs_info, "failed to populate fully_remapped_bgs list: %d", ret);
3593 			goto fail_sysfs;
3594 		}
3595 	}
3596 
3597 	btrfs_zoned_reserve_data_reloc_bg(fs_info);
3598 	btrfs_free_zone_cache(fs_info);
3599 
3600 	btrfs_check_active_zone_reservation(fs_info);
3601 
3602 	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3603 	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3604 		btrfs_warn(fs_info,
3605 		"writable mount is not allowed due to too many missing devices");
3606 		ret = -EINVAL;
3607 		goto fail_sysfs;
3608 	}
3609 
3610 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3611 					       "btrfs-cleaner");
3612 	if (IS_ERR(fs_info->cleaner_kthread)) {
3613 		ret = PTR_ERR(fs_info->cleaner_kthread);
3614 		goto fail_sysfs;
3615 	}
3616 
3617 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3618 						   tree_root,
3619 						   "btrfs-transaction");
3620 	if (IS_ERR(fs_info->transaction_kthread)) {
3621 		ret = PTR_ERR(fs_info->transaction_kthread);
3622 		goto fail_cleaner;
3623 	}
3624 
3625 	ret = btrfs_read_qgroup_config(fs_info);
3626 	if (ret)
3627 		goto fail_trans_kthread;
3628 
3629 	if (btrfs_build_ref_tree(fs_info))
3630 		btrfs_err(fs_info, "couldn't build ref tree");
3631 
3632 	/* do not make disk changes in broken FS or nologreplay is given */
3633 	if (btrfs_super_log_root(disk_super) != 0 &&
3634 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3635 		btrfs_info(fs_info, "start tree-log replay");
3636 		ret = btrfs_replay_log(fs_info, fs_devices);
3637 		if (ret)
3638 			goto fail_qgroup;
3639 	}
3640 
3641 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3642 	if (IS_ERR(fs_info->fs_root)) {
3643 		ret = PTR_ERR(fs_info->fs_root);
3644 		btrfs_err(fs_info, "failed to read fs tree: %d", ret);
3645 		fs_info->fs_root = NULL;
3646 		goto fail_qgroup;
3647 	}
3648 
3649 	if (sb_rdonly(sb))
3650 		return 0;
3651 
3652 	ret = btrfs_start_pre_rw_mount(fs_info);
3653 	if (ret) {
3654 		close_ctree(fs_info);
3655 		return ret;
3656 	}
3657 	btrfs_discard_resume(fs_info);
3658 
3659 	if (fs_info->uuid_root &&
3660 	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3661 	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3662 		btrfs_info(fs_info, "checking UUID tree");
3663 		ret = btrfs_check_uuid_tree(fs_info);
3664 		if (ret) {
3665 			btrfs_err(fs_info, "failed to check the UUID tree: %d", ret);
3666 			close_ctree(fs_info);
3667 			return ret;
3668 		}
3669 	}
3670 
3671 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3672 
3673 	/* Kick the cleaner thread so it'll start deleting snapshots. */
3674 	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3675 		wake_up_process(fs_info->cleaner_kthread);
3676 
3677 	return 0;
3678 
3679 fail_qgroup:
3680 	btrfs_free_qgroup_config(fs_info);
3681 fail_trans_kthread:
3682 	kthread_stop(fs_info->transaction_kthread);
3683 	btrfs_cleanup_transaction(fs_info);
3684 	btrfs_free_fs_roots(fs_info);
3685 fail_cleaner:
3686 	kthread_stop(fs_info->cleaner_kthread);
3687 
3688 	/*
3689 	 * make sure we're done with the btree inode before we stop our
3690 	 * kthreads
3691 	 */
3692 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3693 
3694 fail_sysfs:
3695 	btrfs_sysfs_remove_mounted(fs_info);
3696 
3697 fail_fsdev_sysfs:
3698 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3699 
3700 fail_block_groups:
3701 	btrfs_put_block_group_cache(fs_info);
3702 
3703 fail_tree_roots:
3704 	if (fs_info->data_reloc_root)
3705 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3706 	free_root_pointers(fs_info, true);
3707 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3708 
3709 fail_sb_buffer:
3710 	btrfs_stop_all_workers(fs_info);
3711 	btrfs_free_block_groups(fs_info);
3712 fail_alloc:
3713 	btrfs_mapping_tree_free(fs_info);
3714 
3715 	iput(fs_info->btree_inode);
3716 fail:
3717 	ASSERT(ret < 0);
3718 	return ret;
3719 }
3720 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3721 
btrfs_end_super_write(struct bio * bio)3722 static void btrfs_end_super_write(struct bio *bio)
3723 {
3724 	struct btrfs_device *device = bio->bi_private;
3725 	struct folio_iter fi;
3726 
3727 	bio_for_each_folio_all(fi, bio) {
3728 		if (bio->bi_status) {
3729 			btrfs_warn_rl(device->fs_info,
3730 				"lost super block write due to IO error on %s (%d)",
3731 				btrfs_dev_name(device),
3732 				blk_status_to_errno(bio->bi_status));
3733 			btrfs_dev_stat_inc_and_print(device,
3734 						     BTRFS_DEV_STAT_WRITE_ERRS);
3735 			/* Ensure failure if the primary sb fails. */
3736 			if (bio->bi_opf & REQ_FUA)
3737 				atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3738 					   &device->sb_write_errors);
3739 			else
3740 				atomic_inc(&device->sb_write_errors);
3741 		}
3742 		folio_unlock(fi.folio);
3743 		folio_put(fi.folio);
3744 	}
3745 
3746 	bio_put(bio);
3747 }
3748 
3749 /*
3750  * Write superblock @sb to the @device. Do not wait for completion, all the
3751  * folios we use for writing are locked.
3752  *
3753  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3754  * the expected device size at commit time. Note that max_mirrors must be
3755  * same for write and wait phases.
3756  *
3757  * Return number of errors when folio is not found or submission fails.
3758  */
write_dev_supers(struct btrfs_device * device,struct btrfs_super_block * sb,int max_mirrors)3759 static int write_dev_supers(struct btrfs_device *device,
3760 			    struct btrfs_super_block *sb, int max_mirrors)
3761 {
3762 	struct btrfs_fs_info *fs_info = device->fs_info;
3763 	struct address_space *mapping = device->bdev->bd_mapping;
3764 	int i;
3765 	int ret;
3766 	u64 bytenr, bytenr_orig;
3767 
3768 	atomic_set(&device->sb_write_errors, 0);
3769 
3770 	if (max_mirrors == 0)
3771 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3772 
3773 	for (i = 0; i < max_mirrors; i++) {
3774 		struct folio *folio;
3775 		struct bio *bio;
3776 		struct btrfs_super_block *disk_super;
3777 		size_t offset;
3778 
3779 		bytenr_orig = btrfs_sb_offset(i);
3780 		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3781 		if (ret == -ENOENT) {
3782 			continue;
3783 		} else if (ret < 0) {
3784 			btrfs_err(device->fs_info,
3785 			  "couldn't get super block location for mirror %d error %d",
3786 			  i, ret);
3787 			atomic_inc(&device->sb_write_errors);
3788 			continue;
3789 		}
3790 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3791 		    device->commit_total_bytes)
3792 			break;
3793 
3794 		btrfs_set_super_bytenr(sb, bytenr_orig);
3795 
3796 		btrfs_csum(fs_info->csum_type, (const u8 *)sb + BTRFS_CSUM_SIZE,
3797 			   BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, sb->csum);
3798 
3799 		folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3800 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3801 					    GFP_NOFS);
3802 		if (IS_ERR(folio)) {
3803 			btrfs_err(device->fs_info,
3804 			  "couldn't get super block page for bytenr %llu error %ld",
3805 			  bytenr, PTR_ERR(folio));
3806 			atomic_inc(&device->sb_write_errors);
3807 			continue;
3808 		}
3809 
3810 		offset = offset_in_folio(folio, bytenr);
3811 		disk_super = folio_address(folio) + offset;
3812 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3813 
3814 		/*
3815 		 * Directly use bios here instead of relying on the page cache
3816 		 * to do I/O, so we don't lose the ability to do integrity
3817 		 * checking.
3818 		 */
3819 		bio = bio_alloc(device->bdev, 1,
3820 				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3821 				GFP_NOFS);
3822 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3823 		bio->bi_private = device;
3824 		bio->bi_end_io = btrfs_end_super_write;
3825 		bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
3826 
3827 		/*
3828 		 * We FUA only the first super block.  The others we allow to
3829 		 * go down lazy and there's a short window where the on-disk
3830 		 * copies might still contain the older version.
3831 		 */
3832 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3833 			bio->bi_opf |= REQ_FUA;
3834 		submit_bio(bio);
3835 
3836 		if (btrfs_advance_sb_log(device, i))
3837 			atomic_inc(&device->sb_write_errors);
3838 	}
3839 	return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3840 }
3841 
3842 /*
3843  * Wait for write completion of superblocks done by write_dev_supers,
3844  * @max_mirrors same for write and wait phases.
3845  *
3846  * Return -1 if primary super block write failed or when there were no super block
3847  * copies written. Otherwise 0.
3848  */
wait_dev_supers(struct btrfs_device * device,int max_mirrors)3849 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3850 {
3851 	int i;
3852 	int errors = 0;
3853 	bool primary_failed = false;
3854 	int ret;
3855 	u64 bytenr;
3856 
3857 	if (max_mirrors == 0)
3858 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3859 
3860 	for (i = 0; i < max_mirrors; i++) {
3861 		struct folio *folio;
3862 
3863 		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3864 		if (ret == -ENOENT) {
3865 			break;
3866 		} else if (ret < 0) {
3867 			errors++;
3868 			if (i == 0)
3869 				primary_failed = true;
3870 			continue;
3871 		}
3872 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3873 		    device->commit_total_bytes)
3874 			break;
3875 
3876 		folio = filemap_get_folio(device->bdev->bd_mapping,
3877 					  bytenr >> PAGE_SHIFT);
3878 		/* If the folio has been removed, then we know it completed. */
3879 		if (IS_ERR(folio))
3880 			continue;
3881 
3882 		/* Folio will be unlocked once the write completes. */
3883 		folio_wait_locked(folio);
3884 		folio_put(folio);
3885 	}
3886 
3887 	errors += atomic_read(&device->sb_write_errors);
3888 	if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3889 		primary_failed = true;
3890 	if (primary_failed) {
3891 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3892 			  device->devid);
3893 		return -1;
3894 	}
3895 
3896 	return errors < i ? 0 : -1;
3897 }
3898 
3899 /*
3900  * endio for the write_dev_flush, this will wake anyone waiting
3901  * for the barrier when it is done
3902  */
btrfs_end_empty_barrier(struct bio * bio)3903 static void btrfs_end_empty_barrier(struct bio *bio)
3904 {
3905 	bio_uninit(bio);
3906 	complete(bio->bi_private);
3907 }
3908 
3909 /*
3910  * Submit a flush request to the device if it supports it. Error handling is
3911  * done in the waiting counterpart.
3912  */
write_dev_flush(struct btrfs_device * device)3913 static void write_dev_flush(struct btrfs_device *device)
3914 {
3915 	struct bio *bio = &device->flush_bio;
3916 
3917 	clear_bit(BTRFS_DEV_STATE_FLUSH_FAILED, &device->dev_state);
3918 
3919 	bio_init(bio, device->bdev, NULL, 0,
3920 		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3921 	bio->bi_end_io = btrfs_end_empty_barrier;
3922 	init_completion(&device->flush_wait);
3923 	bio->bi_private = &device->flush_wait;
3924 	submit_bio(bio);
3925 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3926 }
3927 
3928 /*
3929  * If the flush bio has been submitted by write_dev_flush, wait for it.
3930  * Return true for any error, and false otherwise.
3931  */
wait_dev_flush(struct btrfs_device * device)3932 static bool wait_dev_flush(struct btrfs_device *device)
3933 {
3934 	struct bio *bio = &device->flush_bio;
3935 
3936 	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3937 		return false;
3938 
3939 	wait_for_completion_io(&device->flush_wait);
3940 
3941 	if (bio->bi_status) {
3942 		set_bit(BTRFS_DEV_STATE_FLUSH_FAILED, &device->dev_state);
3943 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3944 		return true;
3945 	}
3946 
3947 	return false;
3948 }
3949 
3950 /*
3951  * send an empty flush down to each device in parallel,
3952  * then wait for them
3953  */
barrier_all_devices(struct btrfs_fs_info * info)3954 static int barrier_all_devices(struct btrfs_fs_info *info)
3955 {
3956 	struct list_head *head;
3957 	struct btrfs_device *dev;
3958 	int errors_wait = 0;
3959 
3960 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3961 	/* send down all the barriers */
3962 	head = &info->fs_devices->devices;
3963 	list_for_each_entry(dev, head, dev_list) {
3964 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3965 			continue;
3966 		if (!dev->bdev)
3967 			continue;
3968 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3969 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3970 			continue;
3971 
3972 		write_dev_flush(dev);
3973 	}
3974 
3975 	/* wait for all the barriers */
3976 	list_for_each_entry(dev, head, dev_list) {
3977 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3978 			continue;
3979 		if (!dev->bdev) {
3980 			errors_wait++;
3981 			continue;
3982 		}
3983 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3984 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3985 			continue;
3986 
3987 		if (wait_dev_flush(dev))
3988 			errors_wait++;
3989 	}
3990 
3991 	/*
3992 	 * Checks flush failure of disks in order to determine the device
3993 	 * state.
3994 	 */
3995 	if (unlikely(errors_wait && !btrfs_check_rw_degradable(info, NULL)))
3996 		return -EIO;
3997 
3998 	return 0;
3999 }
4000 
btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)4001 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
4002 {
4003 	int raid_type;
4004 	int min_tolerated = INT_MAX;
4005 
4006 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
4007 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
4008 		min_tolerated = min_t(int, min_tolerated,
4009 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
4010 				    tolerated_failures);
4011 
4012 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4013 		if (raid_type == BTRFS_RAID_SINGLE)
4014 			continue;
4015 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
4016 			continue;
4017 		min_tolerated = min_t(int, min_tolerated,
4018 				    btrfs_raid_array[raid_type].
4019 				    tolerated_failures);
4020 	}
4021 
4022 	if (min_tolerated == INT_MAX) {
4023 		btrfs_warn(NULL, "unknown raid flag: %llu", flags);
4024 		min_tolerated = 0;
4025 	}
4026 
4027 	return min_tolerated;
4028 }
4029 
write_all_supers(struct btrfs_fs_info * fs_info,int max_mirrors)4030 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4031 {
4032 	struct list_head *head;
4033 	struct btrfs_device *dev;
4034 	struct btrfs_super_block *sb;
4035 	struct btrfs_dev_item *dev_item;
4036 	int ret;
4037 	int do_barriers;
4038 	int max_errors;
4039 	int total_errors = 0;
4040 	u64 flags;
4041 
4042 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4043 
4044 	/*
4045 	 * max_mirrors == 0 indicates we're from commit_transaction,
4046 	 * not from fsync where the tree roots in fs_info have not
4047 	 * been consistent on disk.
4048 	 */
4049 	if (max_mirrors == 0)
4050 		backup_super_roots(fs_info);
4051 
4052 	sb = fs_info->super_for_commit;
4053 	dev_item = &sb->dev_item;
4054 
4055 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4056 	head = &fs_info->fs_devices->devices;
4057 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4058 
4059 	if (do_barriers) {
4060 		ret = barrier_all_devices(fs_info);
4061 		if (ret) {
4062 			mutex_unlock(
4063 				&fs_info->fs_devices->device_list_mutex);
4064 			btrfs_handle_fs_error(fs_info, ret,
4065 					      "errors while submitting device barriers.");
4066 			return ret;
4067 		}
4068 	}
4069 
4070 	list_for_each_entry(dev, head, dev_list) {
4071 		if (!dev->bdev) {
4072 			total_errors++;
4073 			continue;
4074 		}
4075 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4076 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4077 			continue;
4078 
4079 		btrfs_set_stack_device_generation(dev_item, 0);
4080 		btrfs_set_stack_device_type(dev_item, dev->type);
4081 		btrfs_set_stack_device_id(dev_item, dev->devid);
4082 		btrfs_set_stack_device_total_bytes(dev_item,
4083 						   dev->commit_total_bytes);
4084 		btrfs_set_stack_device_bytes_used(dev_item,
4085 						  dev->commit_bytes_used);
4086 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4087 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4088 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4089 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4090 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4091 		       BTRFS_FSID_SIZE);
4092 
4093 		flags = btrfs_super_flags(sb);
4094 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4095 
4096 		ret = btrfs_validate_write_super(fs_info, sb);
4097 		if (unlikely(ret < 0)) {
4098 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4099 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4100 				"unexpected superblock corruption detected");
4101 			return -EUCLEAN;
4102 		}
4103 
4104 		ret = write_dev_supers(dev, sb, max_mirrors);
4105 		if (ret)
4106 			total_errors++;
4107 	}
4108 	if (unlikely(total_errors > max_errors)) {
4109 		btrfs_err(fs_info, "%d errors while writing supers",
4110 			  total_errors);
4111 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4112 
4113 		/* FUA is masked off if unsupported and can't be the reason */
4114 		btrfs_handle_fs_error(fs_info, -EIO,
4115 				      "%d errors while writing supers",
4116 				      total_errors);
4117 		return -EIO;
4118 	}
4119 
4120 	total_errors = 0;
4121 	list_for_each_entry(dev, head, dev_list) {
4122 		if (!dev->bdev)
4123 			continue;
4124 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4125 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4126 			continue;
4127 
4128 		ret = wait_dev_supers(dev, max_mirrors);
4129 		if (ret)
4130 			total_errors++;
4131 	}
4132 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4133 	if (unlikely(total_errors > max_errors)) {
4134 		btrfs_handle_fs_error(fs_info, -EIO,
4135 				      "%d errors while writing supers",
4136 				      total_errors);
4137 		return -EIO;
4138 	}
4139 	return 0;
4140 }
4141 
4142 /* Drop a fs root from the radix tree and free it. */
btrfs_drop_and_free_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)4143 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4144 				  struct btrfs_root *root)
4145 {
4146 	bool drop_ref = false;
4147 
4148 	spin_lock(&fs_info->fs_roots_radix_lock);
4149 	radix_tree_delete(&fs_info->fs_roots_radix,
4150 			  (unsigned long)btrfs_root_id(root));
4151 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4152 		drop_ref = true;
4153 	spin_unlock(&fs_info->fs_roots_radix_lock);
4154 
4155 	if (BTRFS_FS_ERROR(fs_info)) {
4156 		ASSERT(root->log_root == NULL);
4157 		if (root->reloc_root) {
4158 			btrfs_put_root(root->reloc_root);
4159 			root->reloc_root = NULL;
4160 		}
4161 	}
4162 
4163 	if (drop_ref)
4164 		btrfs_put_root(root);
4165 }
4166 
btrfs_commit_super(struct btrfs_fs_info * fs_info)4167 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4168 {
4169 	mutex_lock(&fs_info->cleaner_mutex);
4170 	btrfs_run_delayed_iputs(fs_info);
4171 	mutex_unlock(&fs_info->cleaner_mutex);
4172 	wake_up_process(fs_info->cleaner_kthread);
4173 
4174 	/* wait until ongoing cleanup work done */
4175 	down_write(&fs_info->cleanup_work_sem);
4176 	up_write(&fs_info->cleanup_work_sem);
4177 
4178 	return btrfs_commit_current_transaction(fs_info->tree_root);
4179 }
4180 
warn_about_uncommitted_trans(struct btrfs_fs_info * fs_info)4181 static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4182 {
4183 	struct btrfs_transaction *trans;
4184 	struct btrfs_transaction *tmp;
4185 	bool found = false;
4186 
4187 	/*
4188 	 * This function is only called at the very end of close_ctree(),
4189 	 * thus no other running transaction, no need to take trans_lock.
4190 	 */
4191 	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4192 	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4193 		struct extent_state *cached = NULL;
4194 		u64 dirty_bytes = 0;
4195 		u64 cur = 0;
4196 		u64 found_start;
4197 		u64 found_end;
4198 
4199 		found = true;
4200 		while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur,
4201 						   &found_start, &found_end,
4202 						   EXTENT_DIRTY, &cached)) {
4203 			dirty_bytes += found_end + 1 - found_start;
4204 			cur = found_end + 1;
4205 		}
4206 		btrfs_warn(fs_info,
4207 	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4208 			   trans->transid, dirty_bytes);
4209 		btrfs_cleanup_one_transaction(trans);
4210 
4211 		if (trans == fs_info->running_transaction)
4212 			fs_info->running_transaction = NULL;
4213 		list_del_init(&trans->list);
4214 
4215 		btrfs_put_transaction(trans);
4216 		trace_btrfs_transaction_commit(fs_info);
4217 	}
4218 	ASSERT(!found);
4219 }
4220 
close_ctree(struct btrfs_fs_info * fs_info)4221 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4222 {
4223 	int ret;
4224 
4225 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4226 
4227 	/*
4228 	 * If we had UNFINISHED_DROPS we could still be processing them, so
4229 	 * clear that bit and wake up relocation so it can stop.
4230 	 * We must do this before stopping the block group reclaim task, because
4231 	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4232 	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4233 	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4234 	 * return 1.
4235 	 */
4236 	btrfs_wake_unfinished_drop(fs_info);
4237 
4238 	/*
4239 	 * We may have the reclaim task running and relocating a data block group,
4240 	 * in which case it may create delayed iputs. So stop it before we park
4241 	 * the cleaner kthread otherwise we can get new delayed iputs after
4242 	 * parking the cleaner, and that can make the async reclaim task to hang
4243 	 * if it's waiting for delayed iputs to complete, since the cleaner is
4244 	 * parked and can not run delayed iputs - this will make us hang when
4245 	 * trying to stop the async reclaim task.
4246 	 */
4247 	cancel_work_sync(&fs_info->reclaim_bgs_work);
4248 	/*
4249 	 * We don't want the cleaner to start new transactions, add more delayed
4250 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4251 	 * because that frees the task_struct, and the transaction kthread might
4252 	 * still try to wake up the cleaner.
4253 	 */
4254 	kthread_park(fs_info->cleaner_kthread);
4255 
4256 	/* wait for the qgroup rescan worker to stop */
4257 	btrfs_qgroup_wait_for_completion(fs_info, false);
4258 
4259 	/* wait for the uuid_scan task to finish */
4260 	down(&fs_info->uuid_tree_rescan_sem);
4261 	/* avoid complains from lockdep et al., set sem back to initial state */
4262 	up(&fs_info->uuid_tree_rescan_sem);
4263 
4264 	/* pause restriper - we want to resume on mount */
4265 	btrfs_pause_balance(fs_info);
4266 
4267 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4268 
4269 	btrfs_scrub_cancel(fs_info);
4270 
4271 	/* wait for any defraggers to finish */
4272 	wait_event(fs_info->transaction_wait,
4273 		   (atomic_read(&fs_info->defrag_running) == 0));
4274 
4275 	/* clear out the rbtree of defraggable inodes */
4276 	btrfs_cleanup_defrag_inodes(fs_info);
4277 
4278 	/*
4279 	 * Handle the error fs first, as it will flush and wait for all ordered
4280 	 * extents.  This will generate delayed iputs, thus we want to handle
4281 	 * it first.
4282 	 */
4283 	if (unlikely(BTRFS_FS_ERROR(fs_info)))
4284 		btrfs_error_commit_super(fs_info);
4285 
4286 	/*
4287 	 * Wait for any fixup workers to complete.
4288 	 * If we don't wait for them here and they are still running by the time
4289 	 * we call kthread_stop() against the cleaner kthread further below, we
4290 	 * get an use-after-free on the cleaner because the fixup worker adds an
4291 	 * inode to the list of delayed iputs and then attempts to wakeup the
4292 	 * cleaner kthread, which was already stopped and destroyed. We parked
4293 	 * already the cleaner, but below we run all pending delayed iputs.
4294 	 */
4295 	btrfs_flush_workqueue(fs_info->fixup_workers);
4296 	/*
4297 	 * Similar case here, we have to wait for delalloc workers before we
4298 	 * proceed below and stop the cleaner kthread, otherwise we trigger a
4299 	 * use-after-tree on the cleaner kthread task_struct when a delalloc
4300 	 * worker running submit_compressed_extents() adds a delayed iput, which
4301 	 * does a wake up on the cleaner kthread, which was already freed below
4302 	 * when we call kthread_stop().
4303 	 */
4304 	btrfs_flush_workqueue(fs_info->delalloc_workers);
4305 
4306 	/*
4307 	 * We can have ordered extents getting their last reference dropped from
4308 	 * the fs_info->workers queue because for async writes for data bios we
4309 	 * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
4310 	 * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
4311 	 * has an error, and that later function can do the final
4312 	 * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
4313 	 * which adds a delayed iput for the inode. So we must flush the queue
4314 	 * so that we don't have delayed iputs after committing the current
4315 	 * transaction below and stopping the cleaner and transaction kthreads.
4316 	 */
4317 	btrfs_flush_workqueue(fs_info->workers);
4318 
4319 	/*
4320 	 * When finishing a compressed write bio we schedule a work queue item
4321 	 * to finish an ordered extent - end_bbio_compressed_write()
4322 	 * calls btrfs_finish_ordered_extent() which in turns does a call to
4323 	 * btrfs_queue_ordered_fn(), and that queues the ordered extent
4324 	 * completion either in the endio_write_workers work queue or in the
4325 	 * fs_info->endio_freespace_worker work queue. We flush those queues
4326 	 * below, so before we flush them we must flush this queue for the
4327 	 * workers of compressed writes.
4328 	 */
4329 	flush_workqueue(fs_info->endio_workers);
4330 
4331 	/*
4332 	 * After we parked the cleaner kthread, ordered extents may have
4333 	 * completed and created new delayed iputs. If one of the async reclaim
4334 	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4335 	 * can hang forever trying to stop it, because if a delayed iput is
4336 	 * added after it ran btrfs_run_delayed_iputs() and before it called
4337 	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4338 	 * no one else to run iputs.
4339 	 *
4340 	 * So wait for all ongoing ordered extents to complete and then run
4341 	 * delayed iputs. This works because once we reach this point no one
4342 	 * can create new ordered extents, but delayed iputs can still be added
4343 	 * by a reclaim worker (see comments further below).
4344 	 *
4345 	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4346 	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4347 	 * but the delayed iput for the respective inode is made only when doing
4348 	 * the final btrfs_put_ordered_extent() (which must happen at
4349 	 * btrfs_finish_ordered_io() when we are unmounting).
4350 	 */
4351 	btrfs_flush_workqueue(fs_info->endio_write_workers);
4352 	/* Ordered extents for free space inodes. */
4353 	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4354 	/*
4355 	 * Run delayed iputs in case an async reclaim worker is waiting for them
4356 	 * to be run as mentioned above.
4357 	 */
4358 	btrfs_run_delayed_iputs(fs_info);
4359 
4360 	cancel_work_sync(&fs_info->async_reclaim_work);
4361 	cancel_work_sync(&fs_info->async_data_reclaim_work);
4362 	cancel_work_sync(&fs_info->preempt_reclaim_work);
4363 	cancel_work_sync(&fs_info->em_shrinker_work);
4364 
4365 	/*
4366 	 * Run delayed iputs again because an async reclaim worker may have
4367 	 * added new ones if it was flushing delalloc:
4368 	 *
4369 	 * shrink_delalloc() -> btrfs_start_delalloc_roots() ->
4370 	 *    start_delalloc_inodes() -> btrfs_add_delayed_iput()
4371 	 */
4372 	btrfs_run_delayed_iputs(fs_info);
4373 
4374 	/* There should be no more workload to generate new delayed iputs. */
4375 	set_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state);
4376 
4377 	/* Cancel or finish ongoing discard work */
4378 	btrfs_discard_cleanup(fs_info);
4379 
4380 	if (!sb_rdonly(fs_info->sb)) {
4381 		/*
4382 		 * The cleaner kthread is stopped, so do one final pass over
4383 		 * unused block groups.
4384 		 */
4385 		btrfs_delete_unused_bgs(fs_info);
4386 
4387 		/*
4388 		 * There might be existing delayed inode workers still running
4389 		 * and holding an empty delayed inode item. We must wait for
4390 		 * them to complete first because they can create a transaction.
4391 		 * This happens when someone calls btrfs_balance_delayed_items()
4392 		 * and then a transaction commit runs the same delayed nodes
4393 		 * before any delayed worker has done something with the nodes.
4394 		 * We must wait for any worker here and not at transaction
4395 		 * commit time since that could cause a deadlock.
4396 		 * This is a very rare case.
4397 		 */
4398 		btrfs_flush_workqueue(fs_info->delayed_workers);
4399 
4400 		/*
4401 		 * If the filesystem is shutdown, then an attempt to commit the
4402 		 * super block (or any write) will just fail. Since we freeze
4403 		 * the filesystem before shutting it down, the filesystem is in
4404 		 * a consistent state and we don't need to commit super blocks.
4405 		 */
4406 		if (!btrfs_is_shutdown(fs_info)) {
4407 			ret = btrfs_commit_super(fs_info);
4408 			if (ret)
4409 				btrfs_err(fs_info, "commit super block returned %d", ret);
4410 		}
4411 	}
4412 
4413 	kthread_stop(fs_info->transaction_kthread);
4414 	kthread_stop(fs_info->cleaner_kthread);
4415 
4416 	ASSERT(list_empty(&fs_info->delayed_iputs));
4417 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4418 
4419 	if (btrfs_check_quota_leak(fs_info)) {
4420 		DEBUG_WARN("qgroup reserved space leaked");
4421 		btrfs_err(fs_info, "qgroup reserved space leaked");
4422 	}
4423 
4424 	btrfs_free_qgroup_config(fs_info);
4425 	ASSERT(list_empty(&fs_info->delalloc_roots));
4426 
4427 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4428 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4429 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4430 	}
4431 
4432 	if (percpu_counter_sum(&fs_info->ordered_bytes))
4433 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4434 			   percpu_counter_sum(&fs_info->ordered_bytes));
4435 
4436 	btrfs_sysfs_remove_mounted(fs_info);
4437 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4438 
4439 	btrfs_put_block_group_cache(fs_info);
4440 
4441 	/*
4442 	 * we must make sure there is not any read request to
4443 	 * submit after we stopping all workers.
4444 	 */
4445 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4446 	btrfs_stop_all_workers(fs_info);
4447 
4448 	/* We shouldn't have any transaction open at this point */
4449 	warn_about_uncommitted_trans(fs_info);
4450 
4451 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4452 	free_root_pointers(fs_info, true);
4453 	btrfs_free_fs_roots(fs_info);
4454 
4455 	/*
4456 	 * We must free the block groups after dropping the fs_roots as we could
4457 	 * have had an IO error and have left over tree log blocks that aren't
4458 	 * cleaned up until the fs roots are freed.  This makes the block group
4459 	 * accounting appear to be wrong because there's pending reserved bytes,
4460 	 * so make sure we do the block group cleanup afterwards.
4461 	 */
4462 	btrfs_free_block_groups(fs_info);
4463 
4464 	iput(fs_info->btree_inode);
4465 
4466 	btrfs_mapping_tree_free(fs_info);
4467 }
4468 
btrfs_mark_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * buf)4469 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4470 			     struct extent_buffer *buf)
4471 {
4472 	struct btrfs_fs_info *fs_info = buf->fs_info;
4473 	u64 transid = btrfs_header_generation(buf);
4474 
4475 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4476 	/*
4477 	 * This is a fast path so only do this check if we have sanity tests
4478 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4479 	 * outside of the sanity tests.
4480 	 */
4481 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4482 		return;
4483 #endif
4484 	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4485 	ASSERT(trans->transid == fs_info->generation);
4486 	btrfs_assert_tree_write_locked(buf);
4487 	if (unlikely(transid != fs_info->generation)) {
4488 		btrfs_abort_transaction(trans, -EUCLEAN);
4489 		btrfs_crit(fs_info,
4490 "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4491 			   buf->start, transid, fs_info->generation);
4492 	}
4493 	set_extent_buffer_dirty(buf);
4494 }
4495 
__btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info,int flush_delayed)4496 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4497 					int flush_delayed)
4498 {
4499 	/*
4500 	 * looks as though older kernels can get into trouble with
4501 	 * this code, they end up stuck in balance_dirty_pages forever
4502 	 */
4503 	int ret;
4504 
4505 	if (current->flags & PF_MEMALLOC)
4506 		return;
4507 
4508 	if (flush_delayed)
4509 		btrfs_balance_delayed_items(fs_info);
4510 
4511 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4512 				     BTRFS_DIRTY_METADATA_THRESH,
4513 				     fs_info->dirty_metadata_batch);
4514 	if (ret > 0) {
4515 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4516 	}
4517 }
4518 
btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info)4519 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4520 {
4521 	__btrfs_btree_balance_dirty(fs_info, 1);
4522 }
4523 
btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info * fs_info)4524 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4525 {
4526 	__btrfs_btree_balance_dirty(fs_info, 0);
4527 }
4528 
btrfs_error_commit_super(struct btrfs_fs_info * fs_info)4529 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4530 {
4531 	/* cleanup FS via transaction */
4532 	btrfs_cleanup_transaction(fs_info);
4533 
4534 	down_write(&fs_info->cleanup_work_sem);
4535 	up_write(&fs_info->cleanup_work_sem);
4536 }
4537 
btrfs_drop_all_logs(struct btrfs_fs_info * fs_info)4538 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4539 {
4540 	struct btrfs_root *gang[8];
4541 	u64 root_objectid = 0;
4542 	int ret;
4543 
4544 	spin_lock(&fs_info->fs_roots_radix_lock);
4545 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4546 					     (void **)gang, root_objectid,
4547 					     ARRAY_SIZE(gang))) != 0) {
4548 		int i;
4549 
4550 		for (i = 0; i < ret; i++)
4551 			gang[i] = btrfs_grab_root(gang[i]);
4552 		spin_unlock(&fs_info->fs_roots_radix_lock);
4553 
4554 		for (i = 0; i < ret; i++) {
4555 			if (!gang[i])
4556 				continue;
4557 			root_objectid = btrfs_root_id(gang[i]);
4558 			btrfs_free_log(NULL, gang[i]);
4559 			btrfs_put_root(gang[i]);
4560 		}
4561 		root_objectid++;
4562 		spin_lock(&fs_info->fs_roots_radix_lock);
4563 	}
4564 	spin_unlock(&fs_info->fs_roots_radix_lock);
4565 	btrfs_free_log_root_tree(NULL, fs_info);
4566 }
4567 
btrfs_destroy_ordered_extents(struct btrfs_root * root)4568 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4569 {
4570 	struct btrfs_ordered_extent *ordered;
4571 
4572 	spin_lock(&root->ordered_extent_lock);
4573 	/*
4574 	 * This will just short circuit the ordered completion stuff which will
4575 	 * make sure the ordered extent gets properly cleaned up.
4576 	 */
4577 	list_for_each_entry(ordered, &root->ordered_extents,
4578 			    root_extent_list)
4579 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4580 	spin_unlock(&root->ordered_extent_lock);
4581 }
4582 
btrfs_destroy_all_ordered_extents(struct btrfs_fs_info * fs_info)4583 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4584 {
4585 	struct btrfs_root *root;
4586 	LIST_HEAD(splice);
4587 
4588 	spin_lock(&fs_info->ordered_root_lock);
4589 	list_splice_init(&fs_info->ordered_roots, &splice);
4590 	while (!list_empty(&splice)) {
4591 		root = list_first_entry(&splice, struct btrfs_root,
4592 					ordered_root);
4593 		list_move_tail(&root->ordered_root,
4594 			       &fs_info->ordered_roots);
4595 
4596 		spin_unlock(&fs_info->ordered_root_lock);
4597 		btrfs_destroy_ordered_extents(root);
4598 
4599 		cond_resched();
4600 		spin_lock(&fs_info->ordered_root_lock);
4601 	}
4602 	spin_unlock(&fs_info->ordered_root_lock);
4603 
4604 	/*
4605 	 * We need this here because if we've been flipped read-only we won't
4606 	 * get sync() from the umount, so we need to make sure any ordered
4607 	 * extents that haven't had their dirty pages IO start writeout yet
4608 	 * actually get run and error out properly.
4609 	 */
4610 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
4611 }
4612 
btrfs_destroy_delalloc_inodes(struct btrfs_root * root)4613 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4614 {
4615 	struct btrfs_inode *btrfs_inode;
4616 	LIST_HEAD(splice);
4617 
4618 	spin_lock(&root->delalloc_lock);
4619 	list_splice_init(&root->delalloc_inodes, &splice);
4620 
4621 	while (!list_empty(&splice)) {
4622 		struct inode *inode = NULL;
4623 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4624 					       delalloc_inodes);
4625 		btrfs_del_delalloc_inode(btrfs_inode);
4626 		spin_unlock(&root->delalloc_lock);
4627 
4628 		/*
4629 		 * Make sure we get a live inode and that it'll not disappear
4630 		 * meanwhile.
4631 		 */
4632 		inode = igrab(&btrfs_inode->vfs_inode);
4633 		if (inode) {
4634 			unsigned int nofs_flag;
4635 
4636 			nofs_flag = memalloc_nofs_save();
4637 			invalidate_inode_pages2(inode->i_mapping);
4638 			memalloc_nofs_restore(nofs_flag);
4639 			iput(inode);
4640 		}
4641 		spin_lock(&root->delalloc_lock);
4642 	}
4643 	spin_unlock(&root->delalloc_lock);
4644 }
4645 
btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info * fs_info)4646 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4647 {
4648 	struct btrfs_root *root;
4649 	LIST_HEAD(splice);
4650 
4651 	spin_lock(&fs_info->delalloc_root_lock);
4652 	list_splice_init(&fs_info->delalloc_roots, &splice);
4653 	while (!list_empty(&splice)) {
4654 		root = list_first_entry(&splice, struct btrfs_root,
4655 					 delalloc_root);
4656 		root = btrfs_grab_root(root);
4657 		BUG_ON(!root);
4658 		spin_unlock(&fs_info->delalloc_root_lock);
4659 
4660 		btrfs_destroy_delalloc_inodes(root);
4661 		btrfs_put_root(root);
4662 
4663 		spin_lock(&fs_info->delalloc_root_lock);
4664 	}
4665 	spin_unlock(&fs_info->delalloc_root_lock);
4666 }
4667 
btrfs_destroy_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)4668 static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4669 					 struct extent_io_tree *dirty_pages,
4670 					 int mark)
4671 {
4672 	struct extent_buffer *eb;
4673 	u64 start = 0;
4674 	u64 end;
4675 
4676 	while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end,
4677 					   mark, NULL)) {
4678 		btrfs_clear_extent_bit(dirty_pages, start, end, mark, NULL);
4679 		while (start <= end) {
4680 			eb = find_extent_buffer(fs_info, start);
4681 			start += fs_info->nodesize;
4682 			if (!eb)
4683 				continue;
4684 
4685 			btrfs_tree_lock(eb);
4686 			wait_on_extent_buffer_writeback(eb);
4687 			btrfs_clear_buffer_dirty(NULL, eb);
4688 			btrfs_tree_unlock(eb);
4689 
4690 			free_extent_buffer_stale(eb);
4691 		}
4692 	}
4693 }
4694 
btrfs_destroy_pinned_extent(struct btrfs_fs_info * fs_info,struct extent_io_tree * unpin)4695 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4696 					struct extent_io_tree *unpin)
4697 {
4698 	u64 start;
4699 	u64 end;
4700 
4701 	while (1) {
4702 		struct extent_state *cached_state = NULL;
4703 
4704 		/*
4705 		 * The btrfs_finish_extent_commit() may get the same range as
4706 		 * ours between find_first_extent_bit and clear_extent_dirty.
4707 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4708 		 * the same extent range.
4709 		 */
4710 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4711 		if (!btrfs_find_first_extent_bit(unpin, 0, &start, &end,
4712 						 EXTENT_DIRTY, &cached_state)) {
4713 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4714 			break;
4715 		}
4716 
4717 		btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
4718 		btrfs_free_extent_state(cached_state);
4719 		btrfs_error_unpin_extent_range(fs_info, start, end);
4720 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4721 		cond_resched();
4722 	}
4723 }
4724 
btrfs_cleanup_bg_io(struct btrfs_block_group * cache)4725 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4726 {
4727 	struct inode *inode;
4728 
4729 	inode = cache->io_ctl.inode;
4730 	if (inode) {
4731 		unsigned int nofs_flag;
4732 
4733 		nofs_flag = memalloc_nofs_save();
4734 		invalidate_inode_pages2(inode->i_mapping);
4735 		memalloc_nofs_restore(nofs_flag);
4736 
4737 		BTRFS_I(inode)->generation = 0;
4738 		cache->io_ctl.inode = NULL;
4739 		iput(inode);
4740 	}
4741 	ASSERT(cache->io_ctl.pages == NULL);
4742 	btrfs_put_block_group(cache);
4743 }
4744 
btrfs_cleanup_dirty_bgs(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4745 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4746 			     struct btrfs_fs_info *fs_info)
4747 {
4748 	struct btrfs_block_group *cache;
4749 
4750 	spin_lock(&cur_trans->dirty_bgs_lock);
4751 	while (!list_empty(&cur_trans->dirty_bgs)) {
4752 		cache = list_first_entry(&cur_trans->dirty_bgs,
4753 					 struct btrfs_block_group,
4754 					 dirty_list);
4755 
4756 		if (!list_empty(&cache->io_list)) {
4757 			spin_unlock(&cur_trans->dirty_bgs_lock);
4758 			list_del_init(&cache->io_list);
4759 			btrfs_cleanup_bg_io(cache);
4760 			spin_lock(&cur_trans->dirty_bgs_lock);
4761 		}
4762 
4763 		list_del_init(&cache->dirty_list);
4764 		spin_lock(&cache->lock);
4765 		cache->disk_cache_state = BTRFS_DC_ERROR;
4766 		spin_unlock(&cache->lock);
4767 
4768 		spin_unlock(&cur_trans->dirty_bgs_lock);
4769 		btrfs_put_block_group(cache);
4770 		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4771 		spin_lock(&cur_trans->dirty_bgs_lock);
4772 	}
4773 	spin_unlock(&cur_trans->dirty_bgs_lock);
4774 
4775 	/*
4776 	 * Refer to the definition of io_bgs member for details why it's safe
4777 	 * to use it without any locking
4778 	 */
4779 	while (!list_empty(&cur_trans->io_bgs)) {
4780 		cache = list_first_entry(&cur_trans->io_bgs,
4781 					 struct btrfs_block_group,
4782 					 io_list);
4783 
4784 		list_del_init(&cache->io_list);
4785 		spin_lock(&cache->lock);
4786 		cache->disk_cache_state = BTRFS_DC_ERROR;
4787 		spin_unlock(&cache->lock);
4788 		btrfs_cleanup_bg_io(cache);
4789 	}
4790 }
4791 
btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info * fs_info)4792 static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4793 {
4794 	struct btrfs_root *gang[8];
4795 	int i;
4796 	int ret;
4797 
4798 	spin_lock(&fs_info->fs_roots_radix_lock);
4799 	while (1) {
4800 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4801 						 (void **)gang, 0,
4802 						 ARRAY_SIZE(gang),
4803 						 BTRFS_ROOT_TRANS_TAG);
4804 		if (ret == 0)
4805 			break;
4806 		for (i = 0; i < ret; i++) {
4807 			struct btrfs_root *root = gang[i];
4808 
4809 			btrfs_qgroup_free_meta_all_pertrans(root);
4810 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4811 					(unsigned long)btrfs_root_id(root),
4812 					BTRFS_ROOT_TRANS_TAG);
4813 		}
4814 	}
4815 	spin_unlock(&fs_info->fs_roots_radix_lock);
4816 }
4817 
btrfs_cleanup_one_transaction(struct btrfs_transaction * cur_trans)4818 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans)
4819 {
4820 	struct btrfs_fs_info *fs_info = cur_trans->fs_info;
4821 	struct btrfs_device *dev, *tmp;
4822 
4823 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4824 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4825 	ASSERT(list_empty(&cur_trans->io_bgs));
4826 
4827 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4828 				 post_commit_list) {
4829 		list_del_init(&dev->post_commit_list);
4830 	}
4831 
4832 	btrfs_destroy_delayed_refs(cur_trans);
4833 
4834 	cur_trans->state = TRANS_STATE_COMMIT_START;
4835 	wake_up(&fs_info->transaction_blocked_wait);
4836 
4837 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4838 	wake_up(&fs_info->transaction_wait);
4839 
4840 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4841 				     EXTENT_DIRTY);
4842 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4843 
4844 	cur_trans->state =TRANS_STATE_COMPLETED;
4845 	wake_up(&cur_trans->commit_wait);
4846 }
4847 
btrfs_cleanup_transaction(struct btrfs_fs_info * fs_info)4848 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4849 {
4850 	struct btrfs_transaction *t;
4851 
4852 	mutex_lock(&fs_info->transaction_kthread_mutex);
4853 
4854 	spin_lock(&fs_info->trans_lock);
4855 	while (!list_empty(&fs_info->trans_list)) {
4856 		t = list_first_entry(&fs_info->trans_list,
4857 				     struct btrfs_transaction, list);
4858 		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4859 			refcount_inc(&t->use_count);
4860 			spin_unlock(&fs_info->trans_lock);
4861 			btrfs_wait_for_commit(fs_info, t->transid);
4862 			btrfs_put_transaction(t);
4863 			spin_lock(&fs_info->trans_lock);
4864 			continue;
4865 		}
4866 		if (t == fs_info->running_transaction) {
4867 			t->state = TRANS_STATE_COMMIT_DOING;
4868 			spin_unlock(&fs_info->trans_lock);
4869 			/*
4870 			 * We wait for 0 num_writers since we don't hold a trans
4871 			 * handle open currently for this transaction.
4872 			 */
4873 			wait_event(t->writer_wait,
4874 				   atomic_read(&t->num_writers) == 0);
4875 		} else {
4876 			spin_unlock(&fs_info->trans_lock);
4877 		}
4878 		btrfs_cleanup_one_transaction(t);
4879 
4880 		spin_lock(&fs_info->trans_lock);
4881 		if (t == fs_info->running_transaction)
4882 			fs_info->running_transaction = NULL;
4883 		list_del_init(&t->list);
4884 		spin_unlock(&fs_info->trans_lock);
4885 
4886 		btrfs_put_transaction(t);
4887 		trace_btrfs_transaction_commit(fs_info);
4888 		spin_lock(&fs_info->trans_lock);
4889 	}
4890 	spin_unlock(&fs_info->trans_lock);
4891 	btrfs_destroy_all_ordered_extents(fs_info);
4892 	btrfs_destroy_delayed_inodes(fs_info);
4893 	btrfs_assert_delayed_root_empty(fs_info);
4894 	btrfs_destroy_all_delalloc_inodes(fs_info);
4895 	btrfs_drop_all_logs(fs_info);
4896 	btrfs_free_all_qgroup_pertrans(fs_info);
4897 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4898 
4899 	return 0;
4900 }
4901 
btrfs_init_root_free_objectid(struct btrfs_root * root)4902 int btrfs_init_root_free_objectid(struct btrfs_root *root)
4903 {
4904 	BTRFS_PATH_AUTO_FREE(path);
4905 	int ret;
4906 	struct extent_buffer *l;
4907 	struct btrfs_key search_key;
4908 	struct btrfs_key found_key;
4909 	int slot;
4910 
4911 	path = btrfs_alloc_path();
4912 	if (!path)
4913 		return -ENOMEM;
4914 
4915 	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4916 	search_key.type = -1;
4917 	search_key.offset = (u64)-1;
4918 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4919 	if (ret < 0)
4920 		return ret;
4921 	if (unlikely(ret == 0)) {
4922 		/*
4923 		 * Key with offset -1 found, there would have to exist a root
4924 		 * with such id, but this is out of valid range.
4925 		 */
4926 		return -EUCLEAN;
4927 	}
4928 	if (path->slots[0] > 0) {
4929 		slot = path->slots[0] - 1;
4930 		l = path->nodes[0];
4931 		btrfs_item_key_to_cpu(l, &found_key, slot);
4932 		root->free_objectid = max_t(u64, found_key.objectid + 1,
4933 					    BTRFS_FIRST_FREE_OBJECTID);
4934 	} else {
4935 		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4936 	}
4937 
4938 	return 0;
4939 }
4940 
btrfs_get_free_objectid(struct btrfs_root * root,u64 * objectid)4941 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4942 {
4943 	int ret;
4944 	mutex_lock(&root->objectid_mutex);
4945 
4946 	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4947 		btrfs_warn(root->fs_info,
4948 			   "the objectid of root %llu reaches its highest value",
4949 			   btrfs_root_id(root));
4950 		ret = -ENOSPC;
4951 		goto out;
4952 	}
4953 
4954 	*objectid = root->free_objectid++;
4955 	ret = 0;
4956 out:
4957 	mutex_unlock(&root->objectid_mutex);
4958 	return ret;
4959 }
4960