xref: /linux/fs/btrfs/extent_io.c (revision eed4edda910fe34dfae8c6bfbcf57f4593a54295)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "misc.h"
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
21 #include "ctree.h"
22 #include "btrfs_inode.h"
23 #include "bio.h"
24 #include "locking.h"
25 #include "rcu-string.h"
26 #include "backref.h"
27 #include "disk-io.h"
28 #include "subpage.h"
29 #include "zoned.h"
30 #include "block-group.h"
31 #include "compression.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "file-item.h"
35 #include "file.h"
36 #include "dev-replace.h"
37 #include "super.h"
38 #include "transaction.h"
39 
40 static struct kmem_cache *extent_buffer_cache;
41 
42 #ifdef CONFIG_BTRFS_DEBUG
43 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
44 {
45 	struct btrfs_fs_info *fs_info = eb->fs_info;
46 	unsigned long flags;
47 
48 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
49 	list_add(&eb->leak_list, &fs_info->allocated_ebs);
50 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
51 }
52 
53 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
54 {
55 	struct btrfs_fs_info *fs_info = eb->fs_info;
56 	unsigned long flags;
57 
58 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
59 	list_del(&eb->leak_list);
60 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
61 }
62 
63 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
64 {
65 	struct extent_buffer *eb;
66 	unsigned long flags;
67 
68 	/*
69 	 * If we didn't get into open_ctree our allocated_ebs will not be
70 	 * initialized, so just skip this.
71 	 */
72 	if (!fs_info->allocated_ebs.next)
73 		return;
74 
75 	WARN_ON(!list_empty(&fs_info->allocated_ebs));
76 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
77 	while (!list_empty(&fs_info->allocated_ebs)) {
78 		eb = list_first_entry(&fs_info->allocated_ebs,
79 				      struct extent_buffer, leak_list);
80 		pr_err(
81 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
82 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
83 		       btrfs_header_owner(eb));
84 		list_del(&eb->leak_list);
85 		kmem_cache_free(extent_buffer_cache, eb);
86 	}
87 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
88 }
89 #else
90 #define btrfs_leak_debug_add_eb(eb)			do {} while (0)
91 #define btrfs_leak_debug_del_eb(eb)			do {} while (0)
92 #endif
93 
94 /*
95  * Structure to record info about the bio being assembled, and other info like
96  * how many bytes are there before stripe/ordered extent boundary.
97  */
98 struct btrfs_bio_ctrl {
99 	struct btrfs_bio *bbio;
100 	enum btrfs_compression_type compress_type;
101 	u32 len_to_oe_boundary;
102 	blk_opf_t opf;
103 	btrfs_bio_end_io_t end_io_func;
104 	struct writeback_control *wbc;
105 };
106 
107 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
108 {
109 	struct btrfs_bio *bbio = bio_ctrl->bbio;
110 
111 	if (!bbio)
112 		return;
113 
114 	/* Caller should ensure the bio has at least some range added */
115 	ASSERT(bbio->bio.bi_iter.bi_size);
116 
117 	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
118 	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
119 		btrfs_submit_compressed_read(bbio);
120 	else
121 		btrfs_submit_bio(bbio, 0);
122 
123 	/* The bbio is owned by the end_io handler now */
124 	bio_ctrl->bbio = NULL;
125 }
126 
127 /*
128  * Submit or fail the current bio in the bio_ctrl structure.
129  */
130 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
131 {
132 	struct btrfs_bio *bbio = bio_ctrl->bbio;
133 
134 	if (!bbio)
135 		return;
136 
137 	if (ret) {
138 		ASSERT(ret < 0);
139 		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
140 		/* The bio is owned by the end_io handler now */
141 		bio_ctrl->bbio = NULL;
142 	} else {
143 		submit_one_bio(bio_ctrl);
144 	}
145 }
146 
147 int __init extent_buffer_init_cachep(void)
148 {
149 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
150 			sizeof(struct extent_buffer), 0,
151 			SLAB_MEM_SPREAD, NULL);
152 	if (!extent_buffer_cache)
153 		return -ENOMEM;
154 
155 	return 0;
156 }
157 
158 void __cold extent_buffer_free_cachep(void)
159 {
160 	/*
161 	 * Make sure all delayed rcu free are flushed before we
162 	 * destroy caches.
163 	 */
164 	rcu_barrier();
165 	kmem_cache_destroy(extent_buffer_cache);
166 }
167 
168 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
169 {
170 	unsigned long index = start >> PAGE_SHIFT;
171 	unsigned long end_index = end >> PAGE_SHIFT;
172 	struct page *page;
173 
174 	while (index <= end_index) {
175 		page = find_get_page(inode->i_mapping, index);
176 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
177 		clear_page_dirty_for_io(page);
178 		put_page(page);
179 		index++;
180 	}
181 }
182 
183 static void process_one_page(struct btrfs_fs_info *fs_info,
184 			     struct page *page, struct page *locked_page,
185 			     unsigned long page_ops, u64 start, u64 end)
186 {
187 	struct folio *folio = page_folio(page);
188 	u32 len;
189 
190 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
191 	len = end + 1 - start;
192 
193 	if (page_ops & PAGE_SET_ORDERED)
194 		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
195 	if (page_ops & PAGE_START_WRITEBACK) {
196 		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
197 		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
198 	}
199 	if (page_ops & PAGE_END_WRITEBACK)
200 		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
201 
202 	if (page != locked_page && (page_ops & PAGE_UNLOCK))
203 		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
204 }
205 
206 static void __process_pages_contig(struct address_space *mapping,
207 				   struct page *locked_page, u64 start, u64 end,
208 				   unsigned long page_ops)
209 {
210 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
211 	pgoff_t start_index = start >> PAGE_SHIFT;
212 	pgoff_t end_index = end >> PAGE_SHIFT;
213 	pgoff_t index = start_index;
214 	struct folio_batch fbatch;
215 	int i;
216 
217 	folio_batch_init(&fbatch);
218 	while (index <= end_index) {
219 		int found_folios;
220 
221 		found_folios = filemap_get_folios_contig(mapping, &index,
222 				end_index, &fbatch);
223 		for (i = 0; i < found_folios; i++) {
224 			struct folio *folio = fbatch.folios[i];
225 
226 			process_one_page(fs_info, &folio->page, locked_page,
227 					 page_ops, start, end);
228 		}
229 		folio_batch_release(&fbatch);
230 		cond_resched();
231 	}
232 }
233 
234 static noinline void __unlock_for_delalloc(struct inode *inode,
235 					   struct page *locked_page,
236 					   u64 start, u64 end)
237 {
238 	unsigned long index = start >> PAGE_SHIFT;
239 	unsigned long end_index = end >> PAGE_SHIFT;
240 
241 	ASSERT(locked_page);
242 	if (index == locked_page->index && end_index == index)
243 		return;
244 
245 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
246 			       PAGE_UNLOCK);
247 }
248 
249 static noinline int lock_delalloc_pages(struct inode *inode,
250 					struct page *locked_page,
251 					u64 start,
252 					u64 end)
253 {
254 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
255 	struct address_space *mapping = inode->i_mapping;
256 	pgoff_t start_index = start >> PAGE_SHIFT;
257 	pgoff_t end_index = end >> PAGE_SHIFT;
258 	pgoff_t index = start_index;
259 	u64 processed_end = start;
260 	struct folio_batch fbatch;
261 
262 	if (index == locked_page->index && index == end_index)
263 		return 0;
264 
265 	folio_batch_init(&fbatch);
266 	while (index <= end_index) {
267 		unsigned int found_folios, i;
268 
269 		found_folios = filemap_get_folios_contig(mapping, &index,
270 				end_index, &fbatch);
271 		if (found_folios == 0)
272 			goto out;
273 
274 		for (i = 0; i < found_folios; i++) {
275 			struct folio *folio = fbatch.folios[i];
276 			struct page *page = folio_page(folio, 0);
277 			u32 len = end + 1 - start;
278 
279 			if (page == locked_page)
280 				continue;
281 
282 			if (btrfs_folio_start_writer_lock(fs_info, folio, start,
283 							  len))
284 				goto out;
285 
286 			if (!PageDirty(page) || page->mapping != mapping) {
287 				btrfs_folio_end_writer_lock(fs_info, folio, start,
288 							    len);
289 				goto out;
290 			}
291 
292 			processed_end = page_offset(page) + PAGE_SIZE - 1;
293 		}
294 		folio_batch_release(&fbatch);
295 		cond_resched();
296 	}
297 
298 	return 0;
299 out:
300 	folio_batch_release(&fbatch);
301 	if (processed_end > start)
302 		__unlock_for_delalloc(inode, locked_page, start, processed_end);
303 	return -EAGAIN;
304 }
305 
306 /*
307  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
308  * more than @max_bytes.
309  *
310  * @start:	The original start bytenr to search.
311  *		Will store the extent range start bytenr.
312  * @end:	The original end bytenr of the search range
313  *		Will store the extent range end bytenr.
314  *
315  * Return true if we find a delalloc range which starts inside the original
316  * range, and @start/@end will store the delalloc range start/end.
317  *
318  * Return false if we can't find any delalloc range which starts inside the
319  * original range, and @start/@end will be the non-delalloc range start/end.
320  */
321 EXPORT_FOR_TESTS
322 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
323 				    struct page *locked_page, u64 *start,
324 				    u64 *end)
325 {
326 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
327 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
328 	const u64 orig_start = *start;
329 	const u64 orig_end = *end;
330 	/* The sanity tests may not set a valid fs_info. */
331 	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
332 	u64 delalloc_start;
333 	u64 delalloc_end;
334 	bool found;
335 	struct extent_state *cached_state = NULL;
336 	int ret;
337 	int loops = 0;
338 
339 	/* Caller should pass a valid @end to indicate the search range end */
340 	ASSERT(orig_end > orig_start);
341 
342 	/* The range should at least cover part of the page */
343 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
344 		 orig_end <= page_offset(locked_page)));
345 again:
346 	/* step one, find a bunch of delalloc bytes starting at start */
347 	delalloc_start = *start;
348 	delalloc_end = 0;
349 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
350 					  max_bytes, &cached_state);
351 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
352 		*start = delalloc_start;
353 
354 		/* @delalloc_end can be -1, never go beyond @orig_end */
355 		*end = min(delalloc_end, orig_end);
356 		free_extent_state(cached_state);
357 		return false;
358 	}
359 
360 	/*
361 	 * start comes from the offset of locked_page.  We have to lock
362 	 * pages in order, so we can't process delalloc bytes before
363 	 * locked_page
364 	 */
365 	if (delalloc_start < *start)
366 		delalloc_start = *start;
367 
368 	/*
369 	 * make sure to limit the number of pages we try to lock down
370 	 */
371 	if (delalloc_end + 1 - delalloc_start > max_bytes)
372 		delalloc_end = delalloc_start + max_bytes - 1;
373 
374 	/* step two, lock all the pages after the page that has start */
375 	ret = lock_delalloc_pages(inode, locked_page,
376 				  delalloc_start, delalloc_end);
377 	ASSERT(!ret || ret == -EAGAIN);
378 	if (ret == -EAGAIN) {
379 		/* some of the pages are gone, lets avoid looping by
380 		 * shortening the size of the delalloc range we're searching
381 		 */
382 		free_extent_state(cached_state);
383 		cached_state = NULL;
384 		if (!loops) {
385 			max_bytes = PAGE_SIZE;
386 			loops = 1;
387 			goto again;
388 		} else {
389 			found = false;
390 			goto out_failed;
391 		}
392 	}
393 
394 	/* step three, lock the state bits for the whole range */
395 	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
396 
397 	/* then test to make sure it is all still delalloc */
398 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
399 			     EXTENT_DELALLOC, cached_state);
400 	if (!ret) {
401 		unlock_extent(tree, delalloc_start, delalloc_end,
402 			      &cached_state);
403 		__unlock_for_delalloc(inode, locked_page,
404 			      delalloc_start, delalloc_end);
405 		cond_resched();
406 		goto again;
407 	}
408 	free_extent_state(cached_state);
409 	*start = delalloc_start;
410 	*end = delalloc_end;
411 out_failed:
412 	return found;
413 }
414 
415 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
416 				  struct page *locked_page,
417 				  u32 clear_bits, unsigned long page_ops)
418 {
419 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
420 
421 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
422 			       start, end, page_ops);
423 }
424 
425 static bool btrfs_verify_page(struct page *page, u64 start)
426 {
427 	if (!fsverity_active(page->mapping->host) ||
428 	    PageUptodate(page) ||
429 	    start >= i_size_read(page->mapping->host))
430 		return true;
431 	return fsverity_verify_page(page);
432 }
433 
434 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
435 {
436 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
437 	struct folio *folio = page_folio(page);
438 
439 	ASSERT(page_offset(page) <= start &&
440 	       start + len <= page_offset(page) + PAGE_SIZE);
441 
442 	if (uptodate && btrfs_verify_page(page, start))
443 		btrfs_folio_set_uptodate(fs_info, folio, start, len);
444 	else
445 		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
446 
447 	if (!btrfs_is_subpage(fs_info, page->mapping))
448 		unlock_page(page);
449 	else
450 		btrfs_subpage_end_reader(fs_info, folio, start, len);
451 }
452 
453 /*
454  * After a write IO is done, we need to:
455  *
456  * - clear the uptodate bits on error
457  * - clear the writeback bits in the extent tree for the range
458  * - filio_end_writeback()  if there is no more pending io for the folio
459  *
460  * Scheduling is not allowed, so the extent state tree is expected
461  * to have one and only one object corresponding to this IO.
462  */
463 static void end_bbio_data_write(struct btrfs_bio *bbio)
464 {
465 	struct bio *bio = &bbio->bio;
466 	int error = blk_status_to_errno(bio->bi_status);
467 	struct folio_iter fi;
468 
469 	ASSERT(!bio_flagged(bio, BIO_CLONED));
470 	bio_for_each_folio_all(fi, bio) {
471 		struct folio *folio = fi.folio;
472 		struct inode *inode = folio->mapping->host;
473 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
474 		const u32 sectorsize = fs_info->sectorsize;
475 		u64 start = folio_pos(folio) + fi.offset;
476 		u32 len = fi.length;
477 
478 		/* Only order 0 (single page) folios are allowed for data. */
479 		ASSERT(folio_order(folio) == 0);
480 
481 		/* Our read/write should always be sector aligned. */
482 		if (!IS_ALIGNED(fi.offset, sectorsize))
483 			btrfs_err(fs_info,
484 		"partial page write in btrfs with offset %zu and length %zu",
485 				  fi.offset, fi.length);
486 		else if (!IS_ALIGNED(fi.length, sectorsize))
487 			btrfs_info(fs_info,
488 		"incomplete page write with offset %zu and length %zu",
489 				   fi.offset, fi.length);
490 
491 		btrfs_finish_ordered_extent(bbio->ordered,
492 				folio_page(folio, 0), start, len, !error);
493 		if (error)
494 			mapping_set_error(folio->mapping, error);
495 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
496 	}
497 
498 	bio_put(bio);
499 }
500 
501 /*
502  * Record previously processed extent range
503  *
504  * For endio_readpage_release_extent() to handle a full extent range, reducing
505  * the extent io operations.
506  */
507 struct processed_extent {
508 	struct btrfs_inode *inode;
509 	/* Start of the range in @inode */
510 	u64 start;
511 	/* End of the range in @inode */
512 	u64 end;
513 	bool uptodate;
514 };
515 
516 /*
517  * Try to release processed extent range
518  *
519  * May not release the extent range right now if the current range is
520  * contiguous to processed extent.
521  *
522  * Will release processed extent when any of @inode, @uptodate, the range is
523  * no longer contiguous to the processed range.
524  *
525  * Passing @inode == NULL will force processed extent to be released.
526  */
527 static void endio_readpage_release_extent(struct processed_extent *processed,
528 			      struct btrfs_inode *inode, u64 start, u64 end,
529 			      bool uptodate)
530 {
531 	struct extent_state *cached = NULL;
532 	struct extent_io_tree *tree;
533 
534 	/* The first extent, initialize @processed */
535 	if (!processed->inode)
536 		goto update;
537 
538 	/*
539 	 * Contiguous to processed extent, just uptodate the end.
540 	 *
541 	 * Several things to notice:
542 	 *
543 	 * - bio can be merged as long as on-disk bytenr is contiguous
544 	 *   This means we can have page belonging to other inodes, thus need to
545 	 *   check if the inode still matches.
546 	 * - bvec can contain range beyond current page for multi-page bvec
547 	 *   Thus we need to do processed->end + 1 >= start check
548 	 */
549 	if (processed->inode == inode && processed->uptodate == uptodate &&
550 	    processed->end + 1 >= start && end >= processed->end) {
551 		processed->end = end;
552 		return;
553 	}
554 
555 	tree = &processed->inode->io_tree;
556 	/*
557 	 * Now we don't have range contiguous to the processed range, release
558 	 * the processed range now.
559 	 */
560 	unlock_extent(tree, processed->start, processed->end, &cached);
561 
562 update:
563 	/* Update processed to current range */
564 	processed->inode = inode;
565 	processed->start = start;
566 	processed->end = end;
567 	processed->uptodate = uptodate;
568 }
569 
570 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
571 {
572 	struct folio *folio = page_folio(page);
573 
574 	ASSERT(folio_test_locked(folio));
575 	if (!btrfs_is_subpage(fs_info, folio->mapping))
576 		return;
577 
578 	ASSERT(folio_test_private(folio));
579 	btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
580 }
581 
582 /*
583  * After a data read IO is done, we need to:
584  *
585  * - clear the uptodate bits on error
586  * - set the uptodate bits if things worked
587  * - set the folio up to date if all extents in the tree are uptodate
588  * - clear the lock bit in the extent tree
589  * - unlock the folio if there are no other extents locked for it
590  *
591  * Scheduling is not allowed, so the extent state tree is expected
592  * to have one and only one object corresponding to this IO.
593  */
594 static void end_bbio_data_read(struct btrfs_bio *bbio)
595 {
596 	struct bio *bio = &bbio->bio;
597 	struct processed_extent processed = { 0 };
598 	struct folio_iter fi;
599 	/*
600 	 * The offset to the beginning of a bio, since one bio can never be
601 	 * larger than UINT_MAX, u32 here is enough.
602 	 */
603 	u32 bio_offset = 0;
604 
605 	ASSERT(!bio_flagged(bio, BIO_CLONED));
606 	bio_for_each_folio_all(fi, &bbio->bio) {
607 		bool uptodate = !bio->bi_status;
608 		struct folio *folio = fi.folio;
609 		struct inode *inode = folio->mapping->host;
610 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
611 		const u32 sectorsize = fs_info->sectorsize;
612 		u64 start;
613 		u64 end;
614 		u32 len;
615 
616 		/* For now only order 0 folios are supported for data. */
617 		ASSERT(folio_order(folio) == 0);
618 		btrfs_debug(fs_info,
619 			"%s: bi_sector=%llu, err=%d, mirror=%u",
620 			__func__, bio->bi_iter.bi_sector, bio->bi_status,
621 			bbio->mirror_num);
622 
623 		/*
624 		 * We always issue full-sector reads, but if some block in a
625 		 * folio fails to read, blk_update_request() will advance
626 		 * bv_offset and adjust bv_len to compensate.  Print a warning
627 		 * for unaligned offsets, and an error if they don't add up to
628 		 * a full sector.
629 		 */
630 		if (!IS_ALIGNED(fi.offset, sectorsize))
631 			btrfs_err(fs_info,
632 		"partial page read in btrfs with offset %zu and length %zu",
633 				  fi.offset, fi.length);
634 		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
635 			btrfs_info(fs_info,
636 		"incomplete page read with offset %zu and length %zu",
637 				   fi.offset, fi.length);
638 
639 		start = folio_pos(folio) + fi.offset;
640 		end = start + fi.length - 1;
641 		len = fi.length;
642 
643 		if (likely(uptodate)) {
644 			loff_t i_size = i_size_read(inode);
645 			pgoff_t end_index = i_size >> folio_shift(folio);
646 
647 			/*
648 			 * Zero out the remaining part if this range straddles
649 			 * i_size.
650 			 *
651 			 * Here we should only zero the range inside the folio,
652 			 * not touch anything else.
653 			 *
654 			 * NOTE: i_size is exclusive while end is inclusive.
655 			 */
656 			if (folio_index(folio) == end_index && i_size <= end) {
657 				u32 zero_start = max(offset_in_folio(folio, i_size),
658 						     offset_in_folio(folio, start));
659 				u32 zero_len = offset_in_folio(folio, end) + 1 -
660 					       zero_start;
661 
662 				folio_zero_range(folio, zero_start, zero_len);
663 			}
664 		}
665 
666 		/* Update page status and unlock. */
667 		end_page_read(folio_page(folio, 0), uptodate, start, len);
668 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
669 					      start, end, uptodate);
670 
671 		ASSERT(bio_offset + len > bio_offset);
672 		bio_offset += len;
673 
674 	}
675 	/* Release the last extent */
676 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
677 	bio_put(bio);
678 }
679 
680 /*
681  * Populate every free slot in a provided array with pages.
682  *
683  * @nr_pages:   number of pages to allocate
684  * @page_array: the array to fill with pages; any existing non-null entries in
685  * 		the array will be skipped
686  * @extra_gfp:	the extra GFP flags for the allocation.
687  *
688  * Return: 0        if all pages were able to be allocated;
689  *         -ENOMEM  otherwise, the partially allocated pages would be freed and
690  *                  the array slots zeroed
691  */
692 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
693 			   gfp_t extra_gfp)
694 {
695 	unsigned int allocated;
696 
697 	for (allocated = 0; allocated < nr_pages;) {
698 		unsigned int last = allocated;
699 
700 		allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
701 						   nr_pages, page_array);
702 
703 		if (allocated == nr_pages)
704 			return 0;
705 
706 		/*
707 		 * During this iteration, no page could be allocated, even
708 		 * though alloc_pages_bulk_array() falls back to alloc_page()
709 		 * if  it could not bulk-allocate. So we must be out of memory.
710 		 */
711 		if (allocated == last) {
712 			for (int i = 0; i < allocated; i++) {
713 				__free_page(page_array[i]);
714 				page_array[i] = NULL;
715 			}
716 			return -ENOMEM;
717 		}
718 
719 		memalloc_retry_wait(GFP_NOFS);
720 	}
721 	return 0;
722 }
723 
724 /*
725  * Populate needed folios for the extent buffer.
726  *
727  * For now, the folios populated are always in order 0 (aka, single page).
728  */
729 static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
730 {
731 	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
732 	int num_pages = num_extent_pages(eb);
733 	int ret;
734 
735 	ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
736 	if (ret < 0)
737 		return ret;
738 
739 	for (int i = 0; i < num_pages; i++)
740 		eb->folios[i] = page_folio(page_array[i]);
741 	return 0;
742 }
743 
744 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
745 				struct page *page, u64 disk_bytenr,
746 				unsigned int pg_offset)
747 {
748 	struct bio *bio = &bio_ctrl->bbio->bio;
749 	struct bio_vec *bvec = bio_last_bvec_all(bio);
750 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
751 
752 	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
753 		/*
754 		 * For compression, all IO should have its logical bytenr set
755 		 * to the starting bytenr of the compressed extent.
756 		 */
757 		return bio->bi_iter.bi_sector == sector;
758 	}
759 
760 	/*
761 	 * The contig check requires the following conditions to be met:
762 	 *
763 	 * 1) The pages are belonging to the same inode
764 	 *    This is implied by the call chain.
765 	 *
766 	 * 2) The range has adjacent logical bytenr
767 	 *
768 	 * 3) The range has adjacent file offset
769 	 *    This is required for the usage of btrfs_bio->file_offset.
770 	 */
771 	return bio_end_sector(bio) == sector &&
772 		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
773 		page_offset(page) + pg_offset;
774 }
775 
776 static void alloc_new_bio(struct btrfs_inode *inode,
777 			  struct btrfs_bio_ctrl *bio_ctrl,
778 			  u64 disk_bytenr, u64 file_offset)
779 {
780 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
781 	struct btrfs_bio *bbio;
782 
783 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
784 			       bio_ctrl->end_io_func, NULL);
785 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
786 	bbio->inode = inode;
787 	bbio->file_offset = file_offset;
788 	bio_ctrl->bbio = bbio;
789 	bio_ctrl->len_to_oe_boundary = U32_MAX;
790 
791 	/* Limit data write bios to the ordered boundary. */
792 	if (bio_ctrl->wbc) {
793 		struct btrfs_ordered_extent *ordered;
794 
795 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
796 		if (ordered) {
797 			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
798 					ordered->file_offset +
799 					ordered->disk_num_bytes - file_offset);
800 			bbio->ordered = ordered;
801 		}
802 
803 		/*
804 		 * Pick the last added device to support cgroup writeback.  For
805 		 * multi-device file systems this means blk-cgroup policies have
806 		 * to always be set on the last added/replaced device.
807 		 * This is a bit odd but has been like that for a long time.
808 		 */
809 		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
810 		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
811 	}
812 }
813 
814 /*
815  * @disk_bytenr: logical bytenr where the write will be
816  * @page:	page to add to the bio
817  * @size:	portion of page that we want to write to
818  * @pg_offset:	offset of the new bio or to check whether we are adding
819  *              a contiguous page to the previous one
820  *
821  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
822  * new one in @bio_ctrl->bbio.
823  * The mirror number for this IO should already be initizlied in
824  * @bio_ctrl->mirror_num.
825  */
826 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
827 			       u64 disk_bytenr, struct page *page,
828 			       size_t size, unsigned long pg_offset)
829 {
830 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
831 
832 	ASSERT(pg_offset + size <= PAGE_SIZE);
833 	ASSERT(bio_ctrl->end_io_func);
834 
835 	if (bio_ctrl->bbio &&
836 	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
837 		submit_one_bio(bio_ctrl);
838 
839 	do {
840 		u32 len = size;
841 
842 		/* Allocate new bio if needed */
843 		if (!bio_ctrl->bbio) {
844 			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
845 				      page_offset(page) + pg_offset);
846 		}
847 
848 		/* Cap to the current ordered extent boundary if there is one. */
849 		if (len > bio_ctrl->len_to_oe_boundary) {
850 			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
851 			ASSERT(is_data_inode(&inode->vfs_inode));
852 			len = bio_ctrl->len_to_oe_boundary;
853 		}
854 
855 		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
856 			/* bio full: move on to a new one */
857 			submit_one_bio(bio_ctrl);
858 			continue;
859 		}
860 
861 		if (bio_ctrl->wbc)
862 			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
863 
864 		size -= len;
865 		pg_offset += len;
866 		disk_bytenr += len;
867 
868 		/*
869 		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
870 		 * sector aligned.  alloc_new_bio() then sets it to the end of
871 		 * our ordered extent for writes into zoned devices.
872 		 *
873 		 * When len_to_oe_boundary is tracking an ordered extent, we
874 		 * trust the ordered extent code to align things properly, and
875 		 * the check above to cap our write to the ordered extent
876 		 * boundary is correct.
877 		 *
878 		 * When len_to_oe_boundary is U32_MAX, the cap above would
879 		 * result in a 4095 byte IO for the last page right before
880 		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
881 		 * the checks required to make sure we don't overflow the bio,
882 		 * and we should just ignore len_to_oe_boundary completely
883 		 * unless we're using it to track an ordered extent.
884 		 *
885 		 * It's pretty hard to make a bio sized U32_MAX, but it can
886 		 * happen when the page cache is able to feed us contiguous
887 		 * pages for large extents.
888 		 */
889 		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
890 			bio_ctrl->len_to_oe_boundary -= len;
891 
892 		/* Ordered extent boundary: move on to a new bio. */
893 		if (bio_ctrl->len_to_oe_boundary == 0)
894 			submit_one_bio(bio_ctrl);
895 	} while (size);
896 }
897 
898 static int attach_extent_buffer_folio(struct extent_buffer *eb,
899 				      struct folio *folio,
900 				      struct btrfs_subpage *prealloc)
901 {
902 	struct btrfs_fs_info *fs_info = eb->fs_info;
903 	int ret = 0;
904 
905 	/*
906 	 * If the page is mapped to btree inode, we should hold the private
907 	 * lock to prevent race.
908 	 * For cloned or dummy extent buffers, their pages are not mapped and
909 	 * will not race with any other ebs.
910 	 */
911 	if (folio->mapping)
912 		lockdep_assert_held(&folio->mapping->i_private_lock);
913 
914 	if (fs_info->nodesize >= PAGE_SIZE) {
915 		if (!folio_test_private(folio))
916 			folio_attach_private(folio, eb);
917 		else
918 			WARN_ON(folio_get_private(folio) != eb);
919 		return 0;
920 	}
921 
922 	/* Already mapped, just free prealloc */
923 	if (folio_test_private(folio)) {
924 		btrfs_free_subpage(prealloc);
925 		return 0;
926 	}
927 
928 	if (prealloc)
929 		/* Has preallocated memory for subpage */
930 		folio_attach_private(folio, prealloc);
931 	else
932 		/* Do new allocation to attach subpage */
933 		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
934 	return ret;
935 }
936 
937 int set_page_extent_mapped(struct page *page)
938 {
939 	struct folio *folio = page_folio(page);
940 	struct btrfs_fs_info *fs_info;
941 
942 	ASSERT(page->mapping);
943 
944 	if (folio_test_private(folio))
945 		return 0;
946 
947 	fs_info = btrfs_sb(page->mapping->host->i_sb);
948 
949 	if (btrfs_is_subpage(fs_info, page->mapping))
950 		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
951 
952 	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
953 	return 0;
954 }
955 
956 void clear_page_extent_mapped(struct page *page)
957 {
958 	struct folio *folio = page_folio(page);
959 	struct btrfs_fs_info *fs_info;
960 
961 	ASSERT(page->mapping);
962 
963 	if (!folio_test_private(folio))
964 		return;
965 
966 	fs_info = btrfs_sb(page->mapping->host->i_sb);
967 	if (btrfs_is_subpage(fs_info, page->mapping))
968 		return btrfs_detach_subpage(fs_info, folio);
969 
970 	folio_detach_private(folio);
971 }
972 
973 static struct extent_map *
974 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
975 		 u64 start, u64 len, struct extent_map **em_cached)
976 {
977 	struct extent_map *em;
978 
979 	if (em_cached && *em_cached) {
980 		em = *em_cached;
981 		if (extent_map_in_tree(em) && start >= em->start &&
982 		    start < extent_map_end(em)) {
983 			refcount_inc(&em->refs);
984 			return em;
985 		}
986 
987 		free_extent_map(em);
988 		*em_cached = NULL;
989 	}
990 
991 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
992 	if (em_cached && !IS_ERR(em)) {
993 		BUG_ON(*em_cached);
994 		refcount_inc(&em->refs);
995 		*em_cached = em;
996 	}
997 	return em;
998 }
999 /*
1000  * basic readpage implementation.  Locked extent state structs are inserted
1001  * into the tree that are removed when the IO is done (by the end_io
1002  * handlers)
1003  * XXX JDM: This needs looking at to ensure proper page locking
1004  * return 0 on success, otherwise return error
1005  */
1006 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1007 		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
1008 {
1009 	struct inode *inode = page->mapping->host;
1010 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1011 	u64 start = page_offset(page);
1012 	const u64 end = start + PAGE_SIZE - 1;
1013 	u64 cur = start;
1014 	u64 extent_offset;
1015 	u64 last_byte = i_size_read(inode);
1016 	u64 block_start;
1017 	struct extent_map *em;
1018 	int ret = 0;
1019 	size_t pg_offset = 0;
1020 	size_t iosize;
1021 	size_t blocksize = inode->i_sb->s_blocksize;
1022 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1023 
1024 	ret = set_page_extent_mapped(page);
1025 	if (ret < 0) {
1026 		unlock_extent(tree, start, end, NULL);
1027 		unlock_page(page);
1028 		return ret;
1029 	}
1030 
1031 	if (page->index == last_byte >> PAGE_SHIFT) {
1032 		size_t zero_offset = offset_in_page(last_byte);
1033 
1034 		if (zero_offset) {
1035 			iosize = PAGE_SIZE - zero_offset;
1036 			memzero_page(page, zero_offset, iosize);
1037 		}
1038 	}
1039 	bio_ctrl->end_io_func = end_bbio_data_read;
1040 	begin_page_read(fs_info, page);
1041 	while (cur <= end) {
1042 		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1043 		bool force_bio_submit = false;
1044 		u64 disk_bytenr;
1045 
1046 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1047 		if (cur >= last_byte) {
1048 			iosize = PAGE_SIZE - pg_offset;
1049 			memzero_page(page, pg_offset, iosize);
1050 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1051 			end_page_read(page, true, cur, iosize);
1052 			break;
1053 		}
1054 		em = __get_extent_map(inode, page, pg_offset, cur,
1055 				      end - cur + 1, em_cached);
1056 		if (IS_ERR(em)) {
1057 			unlock_extent(tree, cur, end, NULL);
1058 			end_page_read(page, false, cur, end + 1 - cur);
1059 			return PTR_ERR(em);
1060 		}
1061 		extent_offset = cur - em->start;
1062 		BUG_ON(extent_map_end(em) <= cur);
1063 		BUG_ON(end < cur);
1064 
1065 		compress_type = extent_map_compression(em);
1066 
1067 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1068 		iosize = ALIGN(iosize, blocksize);
1069 		if (compress_type != BTRFS_COMPRESS_NONE)
1070 			disk_bytenr = em->block_start;
1071 		else
1072 			disk_bytenr = em->block_start + extent_offset;
1073 		block_start = em->block_start;
1074 		if (em->flags & EXTENT_FLAG_PREALLOC)
1075 			block_start = EXTENT_MAP_HOLE;
1076 
1077 		/*
1078 		 * If we have a file range that points to a compressed extent
1079 		 * and it's followed by a consecutive file range that points
1080 		 * to the same compressed extent (possibly with a different
1081 		 * offset and/or length, so it either points to the whole extent
1082 		 * or only part of it), we must make sure we do not submit a
1083 		 * single bio to populate the pages for the 2 ranges because
1084 		 * this makes the compressed extent read zero out the pages
1085 		 * belonging to the 2nd range. Imagine the following scenario:
1086 		 *
1087 		 *  File layout
1088 		 *  [0 - 8K]                     [8K - 24K]
1089 		 *    |                               |
1090 		 *    |                               |
1091 		 * points to extent X,         points to extent X,
1092 		 * offset 4K, length of 8K     offset 0, length 16K
1093 		 *
1094 		 * [extent X, compressed length = 4K uncompressed length = 16K]
1095 		 *
1096 		 * If the bio to read the compressed extent covers both ranges,
1097 		 * it will decompress extent X into the pages belonging to the
1098 		 * first range and then it will stop, zeroing out the remaining
1099 		 * pages that belong to the other range that points to extent X.
1100 		 * So here we make sure we submit 2 bios, one for the first
1101 		 * range and another one for the third range. Both will target
1102 		 * the same physical extent from disk, but we can't currently
1103 		 * make the compressed bio endio callback populate the pages
1104 		 * for both ranges because each compressed bio is tightly
1105 		 * coupled with a single extent map, and each range can have
1106 		 * an extent map with a different offset value relative to the
1107 		 * uncompressed data of our extent and different lengths. This
1108 		 * is a corner case so we prioritize correctness over
1109 		 * non-optimal behavior (submitting 2 bios for the same extent).
1110 		 */
1111 		if (compress_type != BTRFS_COMPRESS_NONE &&
1112 		    prev_em_start && *prev_em_start != (u64)-1 &&
1113 		    *prev_em_start != em->start)
1114 			force_bio_submit = true;
1115 
1116 		if (prev_em_start)
1117 			*prev_em_start = em->start;
1118 
1119 		free_extent_map(em);
1120 		em = NULL;
1121 
1122 		/* we've found a hole, just zero and go on */
1123 		if (block_start == EXTENT_MAP_HOLE) {
1124 			memzero_page(page, pg_offset, iosize);
1125 
1126 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1127 			end_page_read(page, true, cur, iosize);
1128 			cur = cur + iosize;
1129 			pg_offset += iosize;
1130 			continue;
1131 		}
1132 		/* the get_extent function already copied into the page */
1133 		if (block_start == EXTENT_MAP_INLINE) {
1134 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1135 			end_page_read(page, true, cur, iosize);
1136 			cur = cur + iosize;
1137 			pg_offset += iosize;
1138 			continue;
1139 		}
1140 
1141 		if (bio_ctrl->compress_type != compress_type) {
1142 			submit_one_bio(bio_ctrl);
1143 			bio_ctrl->compress_type = compress_type;
1144 		}
1145 
1146 		if (force_bio_submit)
1147 			submit_one_bio(bio_ctrl);
1148 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1149 				   pg_offset);
1150 		cur = cur + iosize;
1151 		pg_offset += iosize;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 int btrfs_read_folio(struct file *file, struct folio *folio)
1158 {
1159 	struct page *page = &folio->page;
1160 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1161 	u64 start = page_offset(page);
1162 	u64 end = start + PAGE_SIZE - 1;
1163 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1164 	int ret;
1165 
1166 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1167 
1168 	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1169 	/*
1170 	 * If btrfs_do_readpage() failed we will want to submit the assembled
1171 	 * bio to do the cleanup.
1172 	 */
1173 	submit_one_bio(&bio_ctrl);
1174 	return ret;
1175 }
1176 
1177 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1178 					u64 start, u64 end,
1179 					struct extent_map **em_cached,
1180 					struct btrfs_bio_ctrl *bio_ctrl,
1181 					u64 *prev_em_start)
1182 {
1183 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1184 	int index;
1185 
1186 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1187 
1188 	for (index = 0; index < nr_pages; index++) {
1189 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1190 				  prev_em_start);
1191 		put_page(pages[index]);
1192 	}
1193 }
1194 
1195 /*
1196  * helper for __extent_writepage, doing all of the delayed allocation setup.
1197  *
1198  * This returns 1 if btrfs_run_delalloc_range function did all the work required
1199  * to write the page (copy into inline extent).  In this case the IO has
1200  * been started and the page is already unlocked.
1201  *
1202  * This returns 0 if all went well (page still locked)
1203  * This returns < 0 if there were errors (page still locked)
1204  */
1205 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1206 		struct page *page, struct writeback_control *wbc)
1207 {
1208 	const u64 page_start = page_offset(page);
1209 	const u64 page_end = page_start + PAGE_SIZE - 1;
1210 	u64 delalloc_start = page_start;
1211 	u64 delalloc_end = page_end;
1212 	u64 delalloc_to_write = 0;
1213 	int ret = 0;
1214 
1215 	while (delalloc_start < page_end) {
1216 		delalloc_end = page_end;
1217 		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1218 					      &delalloc_start, &delalloc_end)) {
1219 			delalloc_start = delalloc_end + 1;
1220 			continue;
1221 		}
1222 
1223 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1224 					       delalloc_end, wbc);
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		delalloc_start = delalloc_end + 1;
1229 	}
1230 
1231 	/*
1232 	 * delalloc_end is already one less than the total length, so
1233 	 * we don't subtract one from PAGE_SIZE
1234 	 */
1235 	delalloc_to_write +=
1236 		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1237 
1238 	/*
1239 	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1240 	 * the pages, we just need to account for them here.
1241 	 */
1242 	if (ret == 1) {
1243 		wbc->nr_to_write -= delalloc_to_write;
1244 		return 1;
1245 	}
1246 
1247 	if (wbc->nr_to_write < delalloc_to_write) {
1248 		int thresh = 8192;
1249 
1250 		if (delalloc_to_write < thresh * 2)
1251 			thresh = delalloc_to_write;
1252 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1253 					 thresh);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 /*
1260  * Find the first byte we need to write.
1261  *
1262  * For subpage, one page can contain several sectors, and
1263  * __extent_writepage_io() will just grab all extent maps in the page
1264  * range and try to submit all non-inline/non-compressed extents.
1265  *
1266  * This is a big problem for subpage, we shouldn't re-submit already written
1267  * data at all.
1268  * This function will lookup subpage dirty bit to find which range we really
1269  * need to submit.
1270  *
1271  * Return the next dirty range in [@start, @end).
1272  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1273  */
1274 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1275 				 struct page *page, u64 *start, u64 *end)
1276 {
1277 	struct folio *folio = page_folio(page);
1278 	struct btrfs_subpage *subpage = folio_get_private(folio);
1279 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1280 	u64 orig_start = *start;
1281 	/* Declare as unsigned long so we can use bitmap ops */
1282 	unsigned long flags;
1283 	int range_start_bit;
1284 	int range_end_bit;
1285 
1286 	/*
1287 	 * For regular sector size == page size case, since one page only
1288 	 * contains one sector, we return the page offset directly.
1289 	 */
1290 	if (!btrfs_is_subpage(fs_info, page->mapping)) {
1291 		*start = page_offset(page);
1292 		*end = page_offset(page) + PAGE_SIZE;
1293 		return;
1294 	}
1295 
1296 	range_start_bit = spi->dirty_offset +
1297 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1298 
1299 	/* We should have the page locked, but just in case */
1300 	spin_lock_irqsave(&subpage->lock, flags);
1301 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1302 			       spi->dirty_offset + spi->bitmap_nr_bits);
1303 	spin_unlock_irqrestore(&subpage->lock, flags);
1304 
1305 	range_start_bit -= spi->dirty_offset;
1306 	range_end_bit -= spi->dirty_offset;
1307 
1308 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1309 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1310 }
1311 
1312 /*
1313  * helper for __extent_writepage.  This calls the writepage start hooks,
1314  * and does the loop to map the page into extents and bios.
1315  *
1316  * We return 1 if the IO is started and the page is unlocked,
1317  * 0 if all went well (page still locked)
1318  * < 0 if there were errors (page still locked)
1319  */
1320 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1321 				 struct page *page,
1322 				 struct btrfs_bio_ctrl *bio_ctrl,
1323 				 loff_t i_size,
1324 				 int *nr_ret)
1325 {
1326 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1327 	u64 cur = page_offset(page);
1328 	u64 end = cur + PAGE_SIZE - 1;
1329 	u64 extent_offset;
1330 	u64 block_start;
1331 	struct extent_map *em;
1332 	int ret = 0;
1333 	int nr = 0;
1334 
1335 	ret = btrfs_writepage_cow_fixup(page);
1336 	if (ret) {
1337 		/* Fixup worker will requeue */
1338 		redirty_page_for_writepage(bio_ctrl->wbc, page);
1339 		unlock_page(page);
1340 		return 1;
1341 	}
1342 
1343 	bio_ctrl->end_io_func = end_bbio_data_write;
1344 	while (cur <= end) {
1345 		u32 len = end - cur + 1;
1346 		u64 disk_bytenr;
1347 		u64 em_end;
1348 		u64 dirty_range_start = cur;
1349 		u64 dirty_range_end;
1350 		u32 iosize;
1351 
1352 		if (cur >= i_size) {
1353 			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1354 						       true);
1355 			/*
1356 			 * This range is beyond i_size, thus we don't need to
1357 			 * bother writing back.
1358 			 * But we still need to clear the dirty subpage bit, or
1359 			 * the next time the page gets dirtied, we will try to
1360 			 * writeback the sectors with subpage dirty bits,
1361 			 * causing writeback without ordered extent.
1362 			 */
1363 			btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1364 			break;
1365 		}
1366 
1367 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1368 				     &dirty_range_end);
1369 		if (cur < dirty_range_start) {
1370 			cur = dirty_range_start;
1371 			continue;
1372 		}
1373 
1374 		em = btrfs_get_extent(inode, NULL, 0, cur, len);
1375 		if (IS_ERR(em)) {
1376 			ret = PTR_ERR_OR_ZERO(em);
1377 			goto out_error;
1378 		}
1379 
1380 		extent_offset = cur - em->start;
1381 		em_end = extent_map_end(em);
1382 		ASSERT(cur <= em_end);
1383 		ASSERT(cur < end);
1384 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1385 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1386 
1387 		block_start = em->block_start;
1388 		disk_bytenr = em->block_start + extent_offset;
1389 
1390 		ASSERT(!extent_map_is_compressed(em));
1391 		ASSERT(block_start != EXTENT_MAP_HOLE);
1392 		ASSERT(block_start != EXTENT_MAP_INLINE);
1393 
1394 		/*
1395 		 * Note that em_end from extent_map_end() and dirty_range_end from
1396 		 * find_next_dirty_byte() are all exclusive
1397 		 */
1398 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1399 		free_extent_map(em);
1400 		em = NULL;
1401 
1402 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1403 		if (!PageWriteback(page)) {
1404 			btrfs_err(inode->root->fs_info,
1405 				   "page %lu not writeback, cur %llu end %llu",
1406 			       page->index, cur, end);
1407 		}
1408 
1409 		/*
1410 		 * Although the PageDirty bit is cleared before entering this
1411 		 * function, subpage dirty bit is not cleared.
1412 		 * So clear subpage dirty bit here so next time we won't submit
1413 		 * page for range already written to disk.
1414 		 */
1415 		btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1416 
1417 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1418 				   cur - page_offset(page));
1419 		cur += iosize;
1420 		nr++;
1421 	}
1422 
1423 	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1424 	*nr_ret = nr;
1425 	return 0;
1426 
1427 out_error:
1428 	/*
1429 	 * If we finish without problem, we should not only clear page dirty,
1430 	 * but also empty subpage dirty bits
1431 	 */
1432 	*nr_ret = nr;
1433 	return ret;
1434 }
1435 
1436 /*
1437  * the writepage semantics are similar to regular writepage.  extent
1438  * records are inserted to lock ranges in the tree, and as dirty areas
1439  * are found, they are marked writeback.  Then the lock bits are removed
1440  * and the end_io handler clears the writeback ranges
1441  *
1442  * Return 0 if everything goes well.
1443  * Return <0 for error.
1444  */
1445 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1446 {
1447 	struct folio *folio = page_folio(page);
1448 	struct inode *inode = page->mapping->host;
1449 	const u64 page_start = page_offset(page);
1450 	int ret;
1451 	int nr = 0;
1452 	size_t pg_offset;
1453 	loff_t i_size = i_size_read(inode);
1454 	unsigned long end_index = i_size >> PAGE_SHIFT;
1455 
1456 	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1457 
1458 	WARN_ON(!PageLocked(page));
1459 
1460 	pg_offset = offset_in_page(i_size);
1461 	if (page->index > end_index ||
1462 	   (page->index == end_index && !pg_offset)) {
1463 		folio_invalidate(folio, 0, folio_size(folio));
1464 		folio_unlock(folio);
1465 		return 0;
1466 	}
1467 
1468 	if (page->index == end_index)
1469 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1470 
1471 	ret = set_page_extent_mapped(page);
1472 	if (ret < 0)
1473 		goto done;
1474 
1475 	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1476 	if (ret == 1)
1477 		return 0;
1478 	if (ret)
1479 		goto done;
1480 
1481 	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1482 	if (ret == 1)
1483 		return 0;
1484 
1485 	bio_ctrl->wbc->nr_to_write--;
1486 
1487 done:
1488 	if (nr == 0) {
1489 		/* make sure the mapping tag for page dirty gets cleared */
1490 		set_page_writeback(page);
1491 		end_page_writeback(page);
1492 	}
1493 	if (ret) {
1494 		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1495 					       PAGE_SIZE, !ret);
1496 		mapping_set_error(page->mapping, ret);
1497 	}
1498 	unlock_page(page);
1499 	ASSERT(ret <= 0);
1500 	return ret;
1501 }
1502 
1503 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1504 {
1505 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1506 		       TASK_UNINTERRUPTIBLE);
1507 }
1508 
1509 /*
1510  * Lock extent buffer status and pages for writeback.
1511  *
1512  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1513  * extent buffer is not dirty)
1514  * Return %true is the extent buffer is submitted to bio.
1515  */
1516 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1517 			  struct writeback_control *wbc)
1518 {
1519 	struct btrfs_fs_info *fs_info = eb->fs_info;
1520 	bool ret = false;
1521 
1522 	btrfs_tree_lock(eb);
1523 	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1524 		btrfs_tree_unlock(eb);
1525 		if (wbc->sync_mode != WB_SYNC_ALL)
1526 			return false;
1527 		wait_on_extent_buffer_writeback(eb);
1528 		btrfs_tree_lock(eb);
1529 	}
1530 
1531 	/*
1532 	 * We need to do this to prevent races in people who check if the eb is
1533 	 * under IO since we can end up having no IO bits set for a short period
1534 	 * of time.
1535 	 */
1536 	spin_lock(&eb->refs_lock);
1537 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1538 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1539 		spin_unlock(&eb->refs_lock);
1540 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1541 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1542 					 -eb->len,
1543 					 fs_info->dirty_metadata_batch);
1544 		ret = true;
1545 	} else {
1546 		spin_unlock(&eb->refs_lock);
1547 	}
1548 	btrfs_tree_unlock(eb);
1549 	return ret;
1550 }
1551 
1552 static void set_btree_ioerr(struct extent_buffer *eb)
1553 {
1554 	struct btrfs_fs_info *fs_info = eb->fs_info;
1555 
1556 	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1557 
1558 	/*
1559 	 * A read may stumble upon this buffer later, make sure that it gets an
1560 	 * error and knows there was an error.
1561 	 */
1562 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1563 
1564 	/*
1565 	 * We need to set the mapping with the io error as well because a write
1566 	 * error will flip the file system readonly, and then syncfs() will
1567 	 * return a 0 because we are readonly if we don't modify the err seq for
1568 	 * the superblock.
1569 	 */
1570 	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1571 
1572 	/*
1573 	 * If writeback for a btree extent that doesn't belong to a log tree
1574 	 * failed, increment the counter transaction->eb_write_errors.
1575 	 * We do this because while the transaction is running and before it's
1576 	 * committing (when we call filemap_fdata[write|wait]_range against
1577 	 * the btree inode), we might have
1578 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1579 	 * returns an error or an error happens during writeback, when we're
1580 	 * committing the transaction we wouldn't know about it, since the pages
1581 	 * can be no longer dirty nor marked anymore for writeback (if a
1582 	 * subsequent modification to the extent buffer didn't happen before the
1583 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1584 	 * able to find the pages tagged with SetPageError at transaction
1585 	 * commit time. So if this happens we must abort the transaction,
1586 	 * otherwise we commit a super block with btree roots that point to
1587 	 * btree nodes/leafs whose content on disk is invalid - either garbage
1588 	 * or the content of some node/leaf from a past generation that got
1589 	 * cowed or deleted and is no longer valid.
1590 	 *
1591 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1592 	 * not be enough - we need to distinguish between log tree extents vs
1593 	 * non-log tree extents, and the next filemap_fdatawait_range() call
1594 	 * will catch and clear such errors in the mapping - and that call might
1595 	 * be from a log sync and not from a transaction commit. Also, checking
1596 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1597 	 * not done and would not be reliable - the eb might have been released
1598 	 * from memory and reading it back again means that flag would not be
1599 	 * set (since it's a runtime flag, not persisted on disk).
1600 	 *
1601 	 * Using the flags below in the btree inode also makes us achieve the
1602 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1603 	 * writeback for all dirty pages and before filemap_fdatawait_range()
1604 	 * is called, the writeback for all dirty pages had already finished
1605 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1606 	 * filemap_fdatawait_range() would return success, as it could not know
1607 	 * that writeback errors happened (the pages were no longer tagged for
1608 	 * writeback).
1609 	 */
1610 	switch (eb->log_index) {
1611 	case -1:
1612 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1613 		break;
1614 	case 0:
1615 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1616 		break;
1617 	case 1:
1618 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1619 		break;
1620 	default:
1621 		BUG(); /* unexpected, logic error */
1622 	}
1623 }
1624 
1625 /*
1626  * The endio specific version which won't touch any unsafe spinlock in endio
1627  * context.
1628  */
1629 static struct extent_buffer *find_extent_buffer_nolock(
1630 		struct btrfs_fs_info *fs_info, u64 start)
1631 {
1632 	struct extent_buffer *eb;
1633 
1634 	rcu_read_lock();
1635 	eb = radix_tree_lookup(&fs_info->buffer_radix,
1636 			       start >> fs_info->sectorsize_bits);
1637 	if (eb && atomic_inc_not_zero(&eb->refs)) {
1638 		rcu_read_unlock();
1639 		return eb;
1640 	}
1641 	rcu_read_unlock();
1642 	return NULL;
1643 }
1644 
1645 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1646 {
1647 	struct extent_buffer *eb = bbio->private;
1648 	struct btrfs_fs_info *fs_info = eb->fs_info;
1649 	bool uptodate = !bbio->bio.bi_status;
1650 	struct folio_iter fi;
1651 	u32 bio_offset = 0;
1652 
1653 	if (!uptodate)
1654 		set_btree_ioerr(eb);
1655 
1656 	bio_for_each_folio_all(fi, &bbio->bio) {
1657 		u64 start = eb->start + bio_offset;
1658 		struct folio *folio = fi.folio;
1659 		u32 len = fi.length;
1660 
1661 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1662 		bio_offset += len;
1663 	}
1664 
1665 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1666 	smp_mb__after_atomic();
1667 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1668 
1669 	bio_put(&bbio->bio);
1670 }
1671 
1672 static void prepare_eb_write(struct extent_buffer *eb)
1673 {
1674 	u32 nritems;
1675 	unsigned long start;
1676 	unsigned long end;
1677 
1678 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1679 
1680 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1681 	nritems = btrfs_header_nritems(eb);
1682 	if (btrfs_header_level(eb) > 0) {
1683 		end = btrfs_node_key_ptr_offset(eb, nritems);
1684 		memzero_extent_buffer(eb, end, eb->len - end);
1685 	} else {
1686 		/*
1687 		 * Leaf:
1688 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1689 		 */
1690 		start = btrfs_item_nr_offset(eb, nritems);
1691 		end = btrfs_item_nr_offset(eb, 0);
1692 		if (nritems == 0)
1693 			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1694 		else
1695 			end += btrfs_item_offset(eb, nritems - 1);
1696 		memzero_extent_buffer(eb, start, end - start);
1697 	}
1698 }
1699 
1700 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1701 					    struct writeback_control *wbc)
1702 {
1703 	struct btrfs_fs_info *fs_info = eb->fs_info;
1704 	struct btrfs_bio *bbio;
1705 
1706 	prepare_eb_write(eb);
1707 
1708 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1709 			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1710 			       eb->fs_info, end_bbio_meta_write, eb);
1711 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1712 	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1713 	wbc_init_bio(wbc, &bbio->bio);
1714 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1715 	bbio->file_offset = eb->start;
1716 	if (fs_info->nodesize < PAGE_SIZE) {
1717 		struct folio *folio = eb->folios[0];
1718 		bool ret;
1719 
1720 		folio_lock(folio);
1721 		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1722 		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1723 						       eb->len)) {
1724 			folio_clear_dirty_for_io(folio);
1725 			wbc->nr_to_write--;
1726 		}
1727 		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1728 				    eb->start - folio_pos(folio));
1729 		ASSERT(ret);
1730 		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1731 		folio_unlock(folio);
1732 	} else {
1733 		int num_folios = num_extent_folios(eb);
1734 
1735 		for (int i = 0; i < num_folios; i++) {
1736 			struct folio *folio = eb->folios[i];
1737 			bool ret;
1738 
1739 			folio_lock(folio);
1740 			folio_clear_dirty_for_io(folio);
1741 			folio_start_writeback(folio);
1742 			ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
1743 			ASSERT(ret);
1744 			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1745 						 folio_size(folio));
1746 			wbc->nr_to_write -= folio_nr_pages(folio);
1747 			folio_unlock(folio);
1748 		}
1749 	}
1750 	btrfs_submit_bio(bbio, 0);
1751 }
1752 
1753 /*
1754  * Submit one subpage btree page.
1755  *
1756  * The main difference to submit_eb_page() is:
1757  * - Page locking
1758  *   For subpage, we don't rely on page locking at all.
1759  *
1760  * - Flush write bio
1761  *   We only flush bio if we may be unable to fit current extent buffers into
1762  *   current bio.
1763  *
1764  * Return >=0 for the number of submitted extent buffers.
1765  * Return <0 for fatal error.
1766  */
1767 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1768 {
1769 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1770 	struct folio *folio = page_folio(page);
1771 	int submitted = 0;
1772 	u64 page_start = page_offset(page);
1773 	int bit_start = 0;
1774 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1775 
1776 	/* Lock and write each dirty extent buffers in the range */
1777 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1778 		struct btrfs_subpage *subpage = folio_get_private(folio);
1779 		struct extent_buffer *eb;
1780 		unsigned long flags;
1781 		u64 start;
1782 
1783 		/*
1784 		 * Take private lock to ensure the subpage won't be detached
1785 		 * in the meantime.
1786 		 */
1787 		spin_lock(&page->mapping->i_private_lock);
1788 		if (!folio_test_private(folio)) {
1789 			spin_unlock(&page->mapping->i_private_lock);
1790 			break;
1791 		}
1792 		spin_lock_irqsave(&subpage->lock, flags);
1793 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1794 			      subpage->bitmaps)) {
1795 			spin_unlock_irqrestore(&subpage->lock, flags);
1796 			spin_unlock(&page->mapping->i_private_lock);
1797 			bit_start++;
1798 			continue;
1799 		}
1800 
1801 		start = page_start + bit_start * fs_info->sectorsize;
1802 		bit_start += sectors_per_node;
1803 
1804 		/*
1805 		 * Here we just want to grab the eb without touching extra
1806 		 * spin locks, so call find_extent_buffer_nolock().
1807 		 */
1808 		eb = find_extent_buffer_nolock(fs_info, start);
1809 		spin_unlock_irqrestore(&subpage->lock, flags);
1810 		spin_unlock(&page->mapping->i_private_lock);
1811 
1812 		/*
1813 		 * The eb has already reached 0 refs thus find_extent_buffer()
1814 		 * doesn't return it. We don't need to write back such eb
1815 		 * anyway.
1816 		 */
1817 		if (!eb)
1818 			continue;
1819 
1820 		if (lock_extent_buffer_for_io(eb, wbc)) {
1821 			write_one_eb(eb, wbc);
1822 			submitted++;
1823 		}
1824 		free_extent_buffer(eb);
1825 	}
1826 	return submitted;
1827 }
1828 
1829 /*
1830  * Submit all page(s) of one extent buffer.
1831  *
1832  * @page:	the page of one extent buffer
1833  * @eb_context:	to determine if we need to submit this page, if current page
1834  *		belongs to this eb, we don't need to submit
1835  *
1836  * The caller should pass each page in their bytenr order, and here we use
1837  * @eb_context to determine if we have submitted pages of one extent buffer.
1838  *
1839  * If we have, we just skip until we hit a new page that doesn't belong to
1840  * current @eb_context.
1841  *
1842  * If not, we submit all the page(s) of the extent buffer.
1843  *
1844  * Return >0 if we have submitted the extent buffer successfully.
1845  * Return 0 if we don't need to submit the page, as it's already submitted by
1846  * previous call.
1847  * Return <0 for fatal error.
1848  */
1849 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1850 {
1851 	struct writeback_control *wbc = ctx->wbc;
1852 	struct address_space *mapping = page->mapping;
1853 	struct folio *folio = page_folio(page);
1854 	struct extent_buffer *eb;
1855 	int ret;
1856 
1857 	if (!folio_test_private(folio))
1858 		return 0;
1859 
1860 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1861 		return submit_eb_subpage(page, wbc);
1862 
1863 	spin_lock(&mapping->i_private_lock);
1864 	if (!folio_test_private(folio)) {
1865 		spin_unlock(&mapping->i_private_lock);
1866 		return 0;
1867 	}
1868 
1869 	eb = folio_get_private(folio);
1870 
1871 	/*
1872 	 * Shouldn't happen and normally this would be a BUG_ON but no point
1873 	 * crashing the machine for something we can survive anyway.
1874 	 */
1875 	if (WARN_ON(!eb)) {
1876 		spin_unlock(&mapping->i_private_lock);
1877 		return 0;
1878 	}
1879 
1880 	if (eb == ctx->eb) {
1881 		spin_unlock(&mapping->i_private_lock);
1882 		return 0;
1883 	}
1884 	ret = atomic_inc_not_zero(&eb->refs);
1885 	spin_unlock(&mapping->i_private_lock);
1886 	if (!ret)
1887 		return 0;
1888 
1889 	ctx->eb = eb;
1890 
1891 	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1892 	if (ret) {
1893 		if (ret == -EBUSY)
1894 			ret = 0;
1895 		free_extent_buffer(eb);
1896 		return ret;
1897 	}
1898 
1899 	if (!lock_extent_buffer_for_io(eb, wbc)) {
1900 		free_extent_buffer(eb);
1901 		return 0;
1902 	}
1903 	/* Implies write in zoned mode. */
1904 	if (ctx->zoned_bg) {
1905 		/* Mark the last eb in the block group. */
1906 		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1907 		ctx->zoned_bg->meta_write_pointer += eb->len;
1908 	}
1909 	write_one_eb(eb, wbc);
1910 	free_extent_buffer(eb);
1911 	return 1;
1912 }
1913 
1914 int btree_write_cache_pages(struct address_space *mapping,
1915 				   struct writeback_control *wbc)
1916 {
1917 	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1918 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1919 	int ret = 0;
1920 	int done = 0;
1921 	int nr_to_write_done = 0;
1922 	struct folio_batch fbatch;
1923 	unsigned int nr_folios;
1924 	pgoff_t index;
1925 	pgoff_t end;		/* Inclusive */
1926 	int scanned = 0;
1927 	xa_mark_t tag;
1928 
1929 	folio_batch_init(&fbatch);
1930 	if (wbc->range_cyclic) {
1931 		index = mapping->writeback_index; /* Start from prev offset */
1932 		end = -1;
1933 		/*
1934 		 * Start from the beginning does not need to cycle over the
1935 		 * range, mark it as scanned.
1936 		 */
1937 		scanned = (index == 0);
1938 	} else {
1939 		index = wbc->range_start >> PAGE_SHIFT;
1940 		end = wbc->range_end >> PAGE_SHIFT;
1941 		scanned = 1;
1942 	}
1943 	if (wbc->sync_mode == WB_SYNC_ALL)
1944 		tag = PAGECACHE_TAG_TOWRITE;
1945 	else
1946 		tag = PAGECACHE_TAG_DIRTY;
1947 	btrfs_zoned_meta_io_lock(fs_info);
1948 retry:
1949 	if (wbc->sync_mode == WB_SYNC_ALL)
1950 		tag_pages_for_writeback(mapping, index, end);
1951 	while (!done && !nr_to_write_done && (index <= end) &&
1952 	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1953 					    tag, &fbatch))) {
1954 		unsigned i;
1955 
1956 		for (i = 0; i < nr_folios; i++) {
1957 			struct folio *folio = fbatch.folios[i];
1958 
1959 			ret = submit_eb_page(&folio->page, &ctx);
1960 			if (ret == 0)
1961 				continue;
1962 			if (ret < 0) {
1963 				done = 1;
1964 				break;
1965 			}
1966 
1967 			/*
1968 			 * the filesystem may choose to bump up nr_to_write.
1969 			 * We have to make sure to honor the new nr_to_write
1970 			 * at any time
1971 			 */
1972 			nr_to_write_done = wbc->nr_to_write <= 0;
1973 		}
1974 		folio_batch_release(&fbatch);
1975 		cond_resched();
1976 	}
1977 	if (!scanned && !done) {
1978 		/*
1979 		 * We hit the last page and there is more work to be done: wrap
1980 		 * back to the start of the file
1981 		 */
1982 		scanned = 1;
1983 		index = 0;
1984 		goto retry;
1985 	}
1986 	/*
1987 	 * If something went wrong, don't allow any metadata write bio to be
1988 	 * submitted.
1989 	 *
1990 	 * This would prevent use-after-free if we had dirty pages not
1991 	 * cleaned up, which can still happen by fuzzed images.
1992 	 *
1993 	 * - Bad extent tree
1994 	 *   Allowing existing tree block to be allocated for other trees.
1995 	 *
1996 	 * - Log tree operations
1997 	 *   Exiting tree blocks get allocated to log tree, bumps its
1998 	 *   generation, then get cleaned in tree re-balance.
1999 	 *   Such tree block will not be written back, since it's clean,
2000 	 *   thus no WRITTEN flag set.
2001 	 *   And after log writes back, this tree block is not traced by
2002 	 *   any dirty extent_io_tree.
2003 	 *
2004 	 * - Offending tree block gets re-dirtied from its original owner
2005 	 *   Since it has bumped generation, no WRITTEN flag, it can be
2006 	 *   reused without COWing. This tree block will not be traced
2007 	 *   by btrfs_transaction::dirty_pages.
2008 	 *
2009 	 *   Now such dirty tree block will not be cleaned by any dirty
2010 	 *   extent io tree. Thus we don't want to submit such wild eb
2011 	 *   if the fs already has error.
2012 	 *
2013 	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2014 	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2015 	 */
2016 	if (ret > 0)
2017 		ret = 0;
2018 	if (!ret && BTRFS_FS_ERROR(fs_info))
2019 		ret = -EROFS;
2020 
2021 	if (ctx.zoned_bg)
2022 		btrfs_put_block_group(ctx.zoned_bg);
2023 	btrfs_zoned_meta_io_unlock(fs_info);
2024 	return ret;
2025 }
2026 
2027 /*
2028  * Walk the list of dirty pages of the given address space and write all of them.
2029  *
2030  * @mapping:   address space structure to write
2031  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2032  * @bio_ctrl:  holds context for the write, namely the bio
2033  *
2034  * If a page is already under I/O, write_cache_pages() skips it, even
2035  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2036  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2037  * and msync() need to guarantee that all the data which was dirty at the time
2038  * the call was made get new I/O started against them.  If wbc->sync_mode is
2039  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2040  * existing IO to complete.
2041  */
2042 static int extent_write_cache_pages(struct address_space *mapping,
2043 			     struct btrfs_bio_ctrl *bio_ctrl)
2044 {
2045 	struct writeback_control *wbc = bio_ctrl->wbc;
2046 	struct inode *inode = mapping->host;
2047 	int ret = 0;
2048 	int done = 0;
2049 	int nr_to_write_done = 0;
2050 	struct folio_batch fbatch;
2051 	unsigned int nr_folios;
2052 	pgoff_t index;
2053 	pgoff_t end;		/* Inclusive */
2054 	pgoff_t done_index;
2055 	int range_whole = 0;
2056 	int scanned = 0;
2057 	xa_mark_t tag;
2058 
2059 	/*
2060 	 * We have to hold onto the inode so that ordered extents can do their
2061 	 * work when the IO finishes.  The alternative to this is failing to add
2062 	 * an ordered extent if the igrab() fails there and that is a huge pain
2063 	 * to deal with, so instead just hold onto the inode throughout the
2064 	 * writepages operation.  If it fails here we are freeing up the inode
2065 	 * anyway and we'd rather not waste our time writing out stuff that is
2066 	 * going to be truncated anyway.
2067 	 */
2068 	if (!igrab(inode))
2069 		return 0;
2070 
2071 	folio_batch_init(&fbatch);
2072 	if (wbc->range_cyclic) {
2073 		index = mapping->writeback_index; /* Start from prev offset */
2074 		end = -1;
2075 		/*
2076 		 * Start from the beginning does not need to cycle over the
2077 		 * range, mark it as scanned.
2078 		 */
2079 		scanned = (index == 0);
2080 	} else {
2081 		index = wbc->range_start >> PAGE_SHIFT;
2082 		end = wbc->range_end >> PAGE_SHIFT;
2083 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2084 			range_whole = 1;
2085 		scanned = 1;
2086 	}
2087 
2088 	/*
2089 	 * We do the tagged writepage as long as the snapshot flush bit is set
2090 	 * and we are the first one who do the filemap_flush() on this inode.
2091 	 *
2092 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2093 	 * not race in and drop the bit.
2094 	 */
2095 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2096 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2097 			       &BTRFS_I(inode)->runtime_flags))
2098 		wbc->tagged_writepages = 1;
2099 
2100 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2101 		tag = PAGECACHE_TAG_TOWRITE;
2102 	else
2103 		tag = PAGECACHE_TAG_DIRTY;
2104 retry:
2105 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2106 		tag_pages_for_writeback(mapping, index, end);
2107 	done_index = index;
2108 	while (!done && !nr_to_write_done && (index <= end) &&
2109 			(nr_folios = filemap_get_folios_tag(mapping, &index,
2110 							end, tag, &fbatch))) {
2111 		unsigned i;
2112 
2113 		for (i = 0; i < nr_folios; i++) {
2114 			struct folio *folio = fbatch.folios[i];
2115 
2116 			done_index = folio_next_index(folio);
2117 			/*
2118 			 * At this point we hold neither the i_pages lock nor
2119 			 * the page lock: the page may be truncated or
2120 			 * invalidated (changing page->mapping to NULL),
2121 			 * or even swizzled back from swapper_space to
2122 			 * tmpfs file mapping
2123 			 */
2124 			if (!folio_trylock(folio)) {
2125 				submit_write_bio(bio_ctrl, 0);
2126 				folio_lock(folio);
2127 			}
2128 
2129 			if (unlikely(folio->mapping != mapping)) {
2130 				folio_unlock(folio);
2131 				continue;
2132 			}
2133 
2134 			if (!folio_test_dirty(folio)) {
2135 				/* Someone wrote it for us. */
2136 				folio_unlock(folio);
2137 				continue;
2138 			}
2139 
2140 			if (wbc->sync_mode != WB_SYNC_NONE) {
2141 				if (folio_test_writeback(folio))
2142 					submit_write_bio(bio_ctrl, 0);
2143 				folio_wait_writeback(folio);
2144 			}
2145 
2146 			if (folio_test_writeback(folio) ||
2147 			    !folio_clear_dirty_for_io(folio)) {
2148 				folio_unlock(folio);
2149 				continue;
2150 			}
2151 
2152 			ret = __extent_writepage(&folio->page, bio_ctrl);
2153 			if (ret < 0) {
2154 				done = 1;
2155 				break;
2156 			}
2157 
2158 			/*
2159 			 * The filesystem may choose to bump up nr_to_write.
2160 			 * We have to make sure to honor the new nr_to_write
2161 			 * at any time.
2162 			 */
2163 			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2164 					    wbc->nr_to_write <= 0);
2165 		}
2166 		folio_batch_release(&fbatch);
2167 		cond_resched();
2168 	}
2169 	if (!scanned && !done) {
2170 		/*
2171 		 * We hit the last page and there is more work to be done: wrap
2172 		 * back to the start of the file
2173 		 */
2174 		scanned = 1;
2175 		index = 0;
2176 
2177 		/*
2178 		 * If we're looping we could run into a page that is locked by a
2179 		 * writer and that writer could be waiting on writeback for a
2180 		 * page in our current bio, and thus deadlock, so flush the
2181 		 * write bio here.
2182 		 */
2183 		submit_write_bio(bio_ctrl, 0);
2184 		goto retry;
2185 	}
2186 
2187 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2188 		mapping->writeback_index = done_index;
2189 
2190 	btrfs_add_delayed_iput(BTRFS_I(inode));
2191 	return ret;
2192 }
2193 
2194 /*
2195  * Submit the pages in the range to bio for call sites which delalloc range has
2196  * already been ran (aka, ordered extent inserted) and all pages are still
2197  * locked.
2198  */
2199 void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2200 			       u64 start, u64 end, struct writeback_control *wbc,
2201 			       bool pages_dirty)
2202 {
2203 	bool found_error = false;
2204 	int ret = 0;
2205 	struct address_space *mapping = inode->i_mapping;
2206 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2207 	const u32 sectorsize = fs_info->sectorsize;
2208 	loff_t i_size = i_size_read(inode);
2209 	u64 cur = start;
2210 	struct btrfs_bio_ctrl bio_ctrl = {
2211 		.wbc = wbc,
2212 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2213 	};
2214 
2215 	if (wbc->no_cgroup_owner)
2216 		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2217 
2218 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2219 
2220 	while (cur <= end) {
2221 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2222 		u32 cur_len = cur_end + 1 - cur;
2223 		struct page *page;
2224 		int nr = 0;
2225 
2226 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2227 		ASSERT(PageLocked(page));
2228 		if (pages_dirty && page != locked_page) {
2229 			ASSERT(PageDirty(page));
2230 			clear_page_dirty_for_io(page);
2231 		}
2232 
2233 		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2234 					    i_size, &nr);
2235 		if (ret == 1)
2236 			goto next_page;
2237 
2238 		/* Make sure the mapping tag for page dirty gets cleared. */
2239 		if (nr == 0) {
2240 			set_page_writeback(page);
2241 			end_page_writeback(page);
2242 		}
2243 		if (ret) {
2244 			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2245 						       cur, cur_len, !ret);
2246 			mapping_set_error(page->mapping, ret);
2247 		}
2248 		btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2249 		if (ret < 0)
2250 			found_error = true;
2251 next_page:
2252 		put_page(page);
2253 		cur = cur_end + 1;
2254 	}
2255 
2256 	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2257 }
2258 
2259 int extent_writepages(struct address_space *mapping,
2260 		      struct writeback_control *wbc)
2261 {
2262 	struct inode *inode = mapping->host;
2263 	int ret = 0;
2264 	struct btrfs_bio_ctrl bio_ctrl = {
2265 		.wbc = wbc,
2266 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2267 	};
2268 
2269 	/*
2270 	 * Allow only a single thread to do the reloc work in zoned mode to
2271 	 * protect the write pointer updates.
2272 	 */
2273 	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2274 	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2275 	submit_write_bio(&bio_ctrl, ret);
2276 	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2277 	return ret;
2278 }
2279 
2280 void extent_readahead(struct readahead_control *rac)
2281 {
2282 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2283 	struct page *pagepool[16];
2284 	struct extent_map *em_cached = NULL;
2285 	u64 prev_em_start = (u64)-1;
2286 	int nr;
2287 
2288 	while ((nr = readahead_page_batch(rac, pagepool))) {
2289 		u64 contig_start = readahead_pos(rac);
2290 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2291 
2292 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2293 				&em_cached, &bio_ctrl, &prev_em_start);
2294 	}
2295 
2296 	if (em_cached)
2297 		free_extent_map(em_cached);
2298 	submit_one_bio(&bio_ctrl);
2299 }
2300 
2301 /*
2302  * basic invalidate_folio code, this waits on any locked or writeback
2303  * ranges corresponding to the folio, and then deletes any extent state
2304  * records from the tree
2305  */
2306 int extent_invalidate_folio(struct extent_io_tree *tree,
2307 			  struct folio *folio, size_t offset)
2308 {
2309 	struct extent_state *cached_state = NULL;
2310 	u64 start = folio_pos(folio);
2311 	u64 end = start + folio_size(folio) - 1;
2312 	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
2313 
2314 	/* This function is only called for the btree inode */
2315 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2316 
2317 	start += ALIGN(offset, blocksize);
2318 	if (start > end)
2319 		return 0;
2320 
2321 	lock_extent(tree, start, end, &cached_state);
2322 	folio_wait_writeback(folio);
2323 
2324 	/*
2325 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2326 	 * so here we only need to unlock the extent range to free any
2327 	 * existing extent state.
2328 	 */
2329 	unlock_extent(tree, start, end, &cached_state);
2330 	return 0;
2331 }
2332 
2333 /*
2334  * a helper for release_folio, this tests for areas of the page that
2335  * are locked or under IO and drops the related state bits if it is safe
2336  * to drop the page.
2337  */
2338 static int try_release_extent_state(struct extent_io_tree *tree,
2339 				    struct page *page, gfp_t mask)
2340 {
2341 	u64 start = page_offset(page);
2342 	u64 end = start + PAGE_SIZE - 1;
2343 	int ret = 1;
2344 
2345 	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2346 		ret = 0;
2347 	} else {
2348 		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2349 				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2350 				   EXTENT_QGROUP_RESERVED);
2351 
2352 		/*
2353 		 * At this point we can safely clear everything except the
2354 		 * locked bit, the nodatasum bit and the delalloc new bit.
2355 		 * The delalloc new bit will be cleared by ordered extent
2356 		 * completion.
2357 		 */
2358 		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2359 
2360 		/* if clear_extent_bit failed for enomem reasons,
2361 		 * we can't allow the release to continue.
2362 		 */
2363 		if (ret < 0)
2364 			ret = 0;
2365 		else
2366 			ret = 1;
2367 	}
2368 	return ret;
2369 }
2370 
2371 /*
2372  * a helper for release_folio.  As long as there are no locked extents
2373  * in the range corresponding to the page, both state records and extent
2374  * map records are removed
2375  */
2376 int try_release_extent_mapping(struct page *page, gfp_t mask)
2377 {
2378 	struct extent_map *em;
2379 	u64 start = page_offset(page);
2380 	u64 end = start + PAGE_SIZE - 1;
2381 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2382 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2383 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2384 
2385 	if (gfpflags_allow_blocking(mask) &&
2386 	    page->mapping->host->i_size > SZ_16M) {
2387 		u64 len;
2388 		while (start <= end) {
2389 			struct btrfs_fs_info *fs_info;
2390 			u64 cur_gen;
2391 
2392 			len = end - start + 1;
2393 			write_lock(&map->lock);
2394 			em = lookup_extent_mapping(map, start, len);
2395 			if (!em) {
2396 				write_unlock(&map->lock);
2397 				break;
2398 			}
2399 			if ((em->flags & EXTENT_FLAG_PINNED) ||
2400 			    em->start != start) {
2401 				write_unlock(&map->lock);
2402 				free_extent_map(em);
2403 				break;
2404 			}
2405 			if (test_range_bit_exists(tree, em->start,
2406 						  extent_map_end(em) - 1,
2407 						  EXTENT_LOCKED))
2408 				goto next;
2409 			/*
2410 			 * If it's not in the list of modified extents, used
2411 			 * by a fast fsync, we can remove it. If it's being
2412 			 * logged we can safely remove it since fsync took an
2413 			 * extra reference on the em.
2414 			 */
2415 			if (list_empty(&em->list) ||
2416 			    (em->flags & EXTENT_FLAG_LOGGING))
2417 				goto remove_em;
2418 			/*
2419 			 * If it's in the list of modified extents, remove it
2420 			 * only if its generation is older then the current one,
2421 			 * in which case we don't need it for a fast fsync.
2422 			 * Otherwise don't remove it, we could be racing with an
2423 			 * ongoing fast fsync that could miss the new extent.
2424 			 */
2425 			fs_info = btrfs_inode->root->fs_info;
2426 			spin_lock(&fs_info->trans_lock);
2427 			cur_gen = fs_info->generation;
2428 			spin_unlock(&fs_info->trans_lock);
2429 			if (em->generation >= cur_gen)
2430 				goto next;
2431 remove_em:
2432 			/*
2433 			 * We only remove extent maps that are not in the list of
2434 			 * modified extents or that are in the list but with a
2435 			 * generation lower then the current generation, so there
2436 			 * is no need to set the full fsync flag on the inode (it
2437 			 * hurts the fsync performance for workloads with a data
2438 			 * size that exceeds or is close to the system's memory).
2439 			 */
2440 			remove_extent_mapping(map, em);
2441 			/* once for the rb tree */
2442 			free_extent_map(em);
2443 next:
2444 			start = extent_map_end(em);
2445 			write_unlock(&map->lock);
2446 
2447 			/* once for us */
2448 			free_extent_map(em);
2449 
2450 			cond_resched(); /* Allow large-extent preemption. */
2451 		}
2452 	}
2453 	return try_release_extent_state(tree, page, mask);
2454 }
2455 
2456 /*
2457  * To cache previous fiemap extent
2458  *
2459  * Will be used for merging fiemap extent
2460  */
2461 struct fiemap_cache {
2462 	u64 offset;
2463 	u64 phys;
2464 	u64 len;
2465 	u32 flags;
2466 	bool cached;
2467 };
2468 
2469 /*
2470  * Helper to submit fiemap extent.
2471  *
2472  * Will try to merge current fiemap extent specified by @offset, @phys,
2473  * @len and @flags with cached one.
2474  * And only when we fails to merge, cached one will be submitted as
2475  * fiemap extent.
2476  *
2477  * Return value is the same as fiemap_fill_next_extent().
2478  */
2479 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2480 				struct fiemap_cache *cache,
2481 				u64 offset, u64 phys, u64 len, u32 flags)
2482 {
2483 	int ret = 0;
2484 
2485 	/* Set at the end of extent_fiemap(). */
2486 	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2487 
2488 	if (!cache->cached)
2489 		goto assign;
2490 
2491 	/*
2492 	 * Sanity check, extent_fiemap() should have ensured that new
2493 	 * fiemap extent won't overlap with cached one.
2494 	 * Not recoverable.
2495 	 *
2496 	 * NOTE: Physical address can overlap, due to compression
2497 	 */
2498 	if (cache->offset + cache->len > offset) {
2499 		WARN_ON(1);
2500 		return -EINVAL;
2501 	}
2502 
2503 	/*
2504 	 * Only merges fiemap extents if
2505 	 * 1) Their logical addresses are continuous
2506 	 *
2507 	 * 2) Their physical addresses are continuous
2508 	 *    So truly compressed (physical size smaller than logical size)
2509 	 *    extents won't get merged with each other
2510 	 *
2511 	 * 3) Share same flags
2512 	 */
2513 	if (cache->offset + cache->len  == offset &&
2514 	    cache->phys + cache->len == phys  &&
2515 	    cache->flags == flags) {
2516 		cache->len += len;
2517 		return 0;
2518 	}
2519 
2520 	/* Not mergeable, need to submit cached one */
2521 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2522 				      cache->len, cache->flags);
2523 	cache->cached = false;
2524 	if (ret)
2525 		return ret;
2526 assign:
2527 	cache->cached = true;
2528 	cache->offset = offset;
2529 	cache->phys = phys;
2530 	cache->len = len;
2531 	cache->flags = flags;
2532 
2533 	return 0;
2534 }
2535 
2536 /*
2537  * Emit last fiemap cache
2538  *
2539  * The last fiemap cache may still be cached in the following case:
2540  * 0		      4k		    8k
2541  * |<- Fiemap range ->|
2542  * |<------------  First extent ----------->|
2543  *
2544  * In this case, the first extent range will be cached but not emitted.
2545  * So we must emit it before ending extent_fiemap().
2546  */
2547 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2548 				  struct fiemap_cache *cache)
2549 {
2550 	int ret;
2551 
2552 	if (!cache->cached)
2553 		return 0;
2554 
2555 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2556 				      cache->len, cache->flags);
2557 	cache->cached = false;
2558 	if (ret > 0)
2559 		ret = 0;
2560 	return ret;
2561 }
2562 
2563 static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2564 {
2565 	struct extent_buffer *clone;
2566 	struct btrfs_key key;
2567 	int slot;
2568 	int ret;
2569 
2570 	path->slots[0]++;
2571 	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2572 		return 0;
2573 
2574 	ret = btrfs_next_leaf(inode->root, path);
2575 	if (ret != 0)
2576 		return ret;
2577 
2578 	/*
2579 	 * Don't bother with cloning if there are no more file extent items for
2580 	 * our inode.
2581 	 */
2582 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2583 	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2584 		return 1;
2585 
2586 	/* See the comment at fiemap_search_slot() about why we clone. */
2587 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2588 	if (!clone)
2589 		return -ENOMEM;
2590 
2591 	slot = path->slots[0];
2592 	btrfs_release_path(path);
2593 	path->nodes[0] = clone;
2594 	path->slots[0] = slot;
2595 
2596 	return 0;
2597 }
2598 
2599 /*
2600  * Search for the first file extent item that starts at a given file offset or
2601  * the one that starts immediately before that offset.
2602  * Returns: 0 on success, < 0 on error, 1 if not found.
2603  */
2604 static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2605 			      u64 file_offset)
2606 {
2607 	const u64 ino = btrfs_ino(inode);
2608 	struct btrfs_root *root = inode->root;
2609 	struct extent_buffer *clone;
2610 	struct btrfs_key key;
2611 	int slot;
2612 	int ret;
2613 
2614 	key.objectid = ino;
2615 	key.type = BTRFS_EXTENT_DATA_KEY;
2616 	key.offset = file_offset;
2617 
2618 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2619 	if (ret < 0)
2620 		return ret;
2621 
2622 	if (ret > 0 && path->slots[0] > 0) {
2623 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2624 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2625 			path->slots[0]--;
2626 	}
2627 
2628 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2629 		ret = btrfs_next_leaf(root, path);
2630 		if (ret != 0)
2631 			return ret;
2632 
2633 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2634 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2635 			return 1;
2636 	}
2637 
2638 	/*
2639 	 * We clone the leaf and use it during fiemap. This is because while
2640 	 * using the leaf we do expensive things like checking if an extent is
2641 	 * shared, which can take a long time. In order to prevent blocking
2642 	 * other tasks for too long, we use a clone of the leaf. We have locked
2643 	 * the file range in the inode's io tree, so we know none of our file
2644 	 * extent items can change. This way we avoid blocking other tasks that
2645 	 * want to insert items for other inodes in the same leaf or b+tree
2646 	 * rebalance operations (triggered for example when someone is trying
2647 	 * to push items into this leaf when trying to insert an item in a
2648 	 * neighbour leaf).
2649 	 * We also need the private clone because holding a read lock on an
2650 	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2651 	 * when we call fiemap_fill_next_extent(), because that may cause a page
2652 	 * fault when filling the user space buffer with fiemap data.
2653 	 */
2654 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2655 	if (!clone)
2656 		return -ENOMEM;
2657 
2658 	slot = path->slots[0];
2659 	btrfs_release_path(path);
2660 	path->nodes[0] = clone;
2661 	path->slots[0] = slot;
2662 
2663 	return 0;
2664 }
2665 
2666 /*
2667  * Process a range which is a hole or a prealloc extent in the inode's subvolume
2668  * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2669  * extent. The end offset (@end) is inclusive.
2670  */
2671 static int fiemap_process_hole(struct btrfs_inode *inode,
2672 			       struct fiemap_extent_info *fieinfo,
2673 			       struct fiemap_cache *cache,
2674 			       struct extent_state **delalloc_cached_state,
2675 			       struct btrfs_backref_share_check_ctx *backref_ctx,
2676 			       u64 disk_bytenr, u64 extent_offset,
2677 			       u64 extent_gen,
2678 			       u64 start, u64 end)
2679 {
2680 	const u64 i_size = i_size_read(&inode->vfs_inode);
2681 	u64 cur_offset = start;
2682 	u64 last_delalloc_end = 0;
2683 	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2684 	bool checked_extent_shared = false;
2685 	int ret;
2686 
2687 	/*
2688 	 * There can be no delalloc past i_size, so don't waste time looking for
2689 	 * it beyond i_size.
2690 	 */
2691 	while (cur_offset < end && cur_offset < i_size) {
2692 		struct extent_state *cached_state = NULL;
2693 		u64 delalloc_start;
2694 		u64 delalloc_end;
2695 		u64 prealloc_start;
2696 		u64 lockstart;
2697 		u64 lockend;
2698 		u64 prealloc_len = 0;
2699 		bool delalloc;
2700 
2701 		lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
2702 		lockend = round_up(end, inode->root->fs_info->sectorsize);
2703 
2704 		/*
2705 		 * We are only locking for the delalloc range because that's the
2706 		 * only thing that can change here.  With fiemap we have a lock
2707 		 * on the inode, so no buffered or direct writes can happen.
2708 		 *
2709 		 * However mmaps and normal page writeback will cause this to
2710 		 * change arbitrarily.  We have to lock the extent lock here to
2711 		 * make sure that nobody messes with the tree while we're doing
2712 		 * btrfs_find_delalloc_in_range.
2713 		 */
2714 		lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2715 		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2716 							delalloc_cached_state,
2717 							&delalloc_start,
2718 							&delalloc_end);
2719 		unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2720 		if (!delalloc)
2721 			break;
2722 
2723 		/*
2724 		 * If this is a prealloc extent we have to report every section
2725 		 * of it that has no delalloc.
2726 		 */
2727 		if (disk_bytenr != 0) {
2728 			if (last_delalloc_end == 0) {
2729 				prealloc_start = start;
2730 				prealloc_len = delalloc_start - start;
2731 			} else {
2732 				prealloc_start = last_delalloc_end + 1;
2733 				prealloc_len = delalloc_start - prealloc_start;
2734 			}
2735 		}
2736 
2737 		if (prealloc_len > 0) {
2738 			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2739 				ret = btrfs_is_data_extent_shared(inode,
2740 								  disk_bytenr,
2741 								  extent_gen,
2742 								  backref_ctx);
2743 				if (ret < 0)
2744 					return ret;
2745 				else if (ret > 0)
2746 					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2747 
2748 				checked_extent_shared = true;
2749 			}
2750 			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2751 						 disk_bytenr + extent_offset,
2752 						 prealloc_len, prealloc_flags);
2753 			if (ret)
2754 				return ret;
2755 			extent_offset += prealloc_len;
2756 		}
2757 
2758 		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2759 					 delalloc_end + 1 - delalloc_start,
2760 					 FIEMAP_EXTENT_DELALLOC |
2761 					 FIEMAP_EXTENT_UNKNOWN);
2762 		if (ret)
2763 			return ret;
2764 
2765 		last_delalloc_end = delalloc_end;
2766 		cur_offset = delalloc_end + 1;
2767 		extent_offset += cur_offset - delalloc_start;
2768 		cond_resched();
2769 	}
2770 
2771 	/*
2772 	 * Either we found no delalloc for the whole prealloc extent or we have
2773 	 * a prealloc extent that spans i_size or starts at or after i_size.
2774 	 */
2775 	if (disk_bytenr != 0 && last_delalloc_end < end) {
2776 		u64 prealloc_start;
2777 		u64 prealloc_len;
2778 
2779 		if (last_delalloc_end == 0) {
2780 			prealloc_start = start;
2781 			prealloc_len = end + 1 - start;
2782 		} else {
2783 			prealloc_start = last_delalloc_end + 1;
2784 			prealloc_len = end + 1 - prealloc_start;
2785 		}
2786 
2787 		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2788 			ret = btrfs_is_data_extent_shared(inode,
2789 							  disk_bytenr,
2790 							  extent_gen,
2791 							  backref_ctx);
2792 			if (ret < 0)
2793 				return ret;
2794 			else if (ret > 0)
2795 				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2796 		}
2797 		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2798 					 disk_bytenr + extent_offset,
2799 					 prealloc_len, prealloc_flags);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	return 0;
2805 }
2806 
2807 static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2808 					  struct btrfs_path *path,
2809 					  u64 *last_extent_end_ret)
2810 {
2811 	const u64 ino = btrfs_ino(inode);
2812 	struct btrfs_root *root = inode->root;
2813 	struct extent_buffer *leaf;
2814 	struct btrfs_file_extent_item *ei;
2815 	struct btrfs_key key;
2816 	u64 disk_bytenr;
2817 	int ret;
2818 
2819 	/*
2820 	 * Lookup the last file extent. We're not using i_size here because
2821 	 * there might be preallocation past i_size.
2822 	 */
2823 	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2824 	/* There can't be a file extent item at offset (u64)-1 */
2825 	ASSERT(ret != 0);
2826 	if (ret < 0)
2827 		return ret;
2828 
2829 	/*
2830 	 * For a non-existing key, btrfs_search_slot() always leaves us at a
2831 	 * slot > 0, except if the btree is empty, which is impossible because
2832 	 * at least it has the inode item for this inode and all the items for
2833 	 * the root inode 256.
2834 	 */
2835 	ASSERT(path->slots[0] > 0);
2836 	path->slots[0]--;
2837 	leaf = path->nodes[0];
2838 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2839 	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2840 		/* No file extent items in the subvolume tree. */
2841 		*last_extent_end_ret = 0;
2842 		return 0;
2843 	}
2844 
2845 	/*
2846 	 * For an inline extent, the disk_bytenr is where inline data starts at,
2847 	 * so first check if we have an inline extent item before checking if we
2848 	 * have an implicit hole (disk_bytenr == 0).
2849 	 */
2850 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2851 	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2852 		*last_extent_end_ret = btrfs_file_extent_end(path);
2853 		return 0;
2854 	}
2855 
2856 	/*
2857 	 * Find the last file extent item that is not a hole (when NO_HOLES is
2858 	 * not enabled). This should take at most 2 iterations in the worst
2859 	 * case: we have one hole file extent item at slot 0 of a leaf and
2860 	 * another hole file extent item as the last item in the previous leaf.
2861 	 * This is because we merge file extent items that represent holes.
2862 	 */
2863 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2864 	while (disk_bytenr == 0) {
2865 		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2866 		if (ret < 0) {
2867 			return ret;
2868 		} else if (ret > 0) {
2869 			/* No file extent items that are not holes. */
2870 			*last_extent_end_ret = 0;
2871 			return 0;
2872 		}
2873 		leaf = path->nodes[0];
2874 		ei = btrfs_item_ptr(leaf, path->slots[0],
2875 				    struct btrfs_file_extent_item);
2876 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2877 	}
2878 
2879 	*last_extent_end_ret = btrfs_file_extent_end(path);
2880 	return 0;
2881 }
2882 
2883 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2884 		  u64 start, u64 len)
2885 {
2886 	const u64 ino = btrfs_ino(inode);
2887 	struct extent_state *delalloc_cached_state = NULL;
2888 	struct btrfs_path *path;
2889 	struct fiemap_cache cache = { 0 };
2890 	struct btrfs_backref_share_check_ctx *backref_ctx;
2891 	u64 last_extent_end;
2892 	u64 prev_extent_end;
2893 	u64 range_start;
2894 	u64 range_end;
2895 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2896 	bool stopped = false;
2897 	int ret;
2898 
2899 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
2900 	path = btrfs_alloc_path();
2901 	if (!backref_ctx || !path) {
2902 		ret = -ENOMEM;
2903 		goto out;
2904 	}
2905 
2906 	range_start = round_down(start, sectorsize);
2907 	range_end = round_up(start + len, sectorsize);
2908 	prev_extent_end = range_start;
2909 
2910 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
2911 
2912 	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
2913 	if (ret < 0)
2914 		goto out_unlock;
2915 	btrfs_release_path(path);
2916 
2917 	path->reada = READA_FORWARD;
2918 	ret = fiemap_search_slot(inode, path, range_start);
2919 	if (ret < 0) {
2920 		goto out_unlock;
2921 	} else if (ret > 0) {
2922 		/*
2923 		 * No file extent item found, but we may have delalloc between
2924 		 * the current offset and i_size. So check for that.
2925 		 */
2926 		ret = 0;
2927 		goto check_eof_delalloc;
2928 	}
2929 
2930 	while (prev_extent_end < range_end) {
2931 		struct extent_buffer *leaf = path->nodes[0];
2932 		struct btrfs_file_extent_item *ei;
2933 		struct btrfs_key key;
2934 		u64 extent_end;
2935 		u64 extent_len;
2936 		u64 extent_offset = 0;
2937 		u64 extent_gen;
2938 		u64 disk_bytenr = 0;
2939 		u64 flags = 0;
2940 		int extent_type;
2941 		u8 compression;
2942 
2943 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2944 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2945 			break;
2946 
2947 		extent_end = btrfs_file_extent_end(path);
2948 
2949 		/*
2950 		 * The first iteration can leave us at an extent item that ends
2951 		 * before our range's start. Move to the next item.
2952 		 */
2953 		if (extent_end <= range_start)
2954 			goto next_item;
2955 
2956 		backref_ctx->curr_leaf_bytenr = leaf->start;
2957 
2958 		/* We have in implicit hole (NO_HOLES feature enabled). */
2959 		if (prev_extent_end < key.offset) {
2960 			const u64 hole_end = min(key.offset, range_end) - 1;
2961 
2962 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2963 						  &delalloc_cached_state,
2964 						  backref_ctx, 0, 0, 0,
2965 						  prev_extent_end, hole_end);
2966 			if (ret < 0) {
2967 				goto out_unlock;
2968 			} else if (ret > 0) {
2969 				/* fiemap_fill_next_extent() told us to stop. */
2970 				stopped = true;
2971 				break;
2972 			}
2973 
2974 			/* We've reached the end of the fiemap range, stop. */
2975 			if (key.offset >= range_end) {
2976 				stopped = true;
2977 				break;
2978 			}
2979 		}
2980 
2981 		extent_len = extent_end - key.offset;
2982 		ei = btrfs_item_ptr(leaf, path->slots[0],
2983 				    struct btrfs_file_extent_item);
2984 		compression = btrfs_file_extent_compression(leaf, ei);
2985 		extent_type = btrfs_file_extent_type(leaf, ei);
2986 		extent_gen = btrfs_file_extent_generation(leaf, ei);
2987 
2988 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2989 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2990 			if (compression == BTRFS_COMPRESS_NONE)
2991 				extent_offset = btrfs_file_extent_offset(leaf, ei);
2992 		}
2993 
2994 		if (compression != BTRFS_COMPRESS_NONE)
2995 			flags |= FIEMAP_EXTENT_ENCODED;
2996 
2997 		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2998 			flags |= FIEMAP_EXTENT_DATA_INLINE;
2999 			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3000 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3001 						 extent_len, flags);
3002 		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3003 			ret = fiemap_process_hole(inode, fieinfo, &cache,
3004 						  &delalloc_cached_state,
3005 						  backref_ctx,
3006 						  disk_bytenr, extent_offset,
3007 						  extent_gen, key.offset,
3008 						  extent_end - 1);
3009 		} else if (disk_bytenr == 0) {
3010 			/* We have an explicit hole. */
3011 			ret = fiemap_process_hole(inode, fieinfo, &cache,
3012 						  &delalloc_cached_state,
3013 						  backref_ctx, 0, 0, 0,
3014 						  key.offset, extent_end - 1);
3015 		} else {
3016 			/* We have a regular extent. */
3017 			if (fieinfo->fi_extents_max) {
3018 				ret = btrfs_is_data_extent_shared(inode,
3019 								  disk_bytenr,
3020 								  extent_gen,
3021 								  backref_ctx);
3022 				if (ret < 0)
3023 					goto out_unlock;
3024 				else if (ret > 0)
3025 					flags |= FIEMAP_EXTENT_SHARED;
3026 			}
3027 
3028 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3029 						 disk_bytenr + extent_offset,
3030 						 extent_len, flags);
3031 		}
3032 
3033 		if (ret < 0) {
3034 			goto out_unlock;
3035 		} else if (ret > 0) {
3036 			/* fiemap_fill_next_extent() told us to stop. */
3037 			stopped = true;
3038 			break;
3039 		}
3040 
3041 		prev_extent_end = extent_end;
3042 next_item:
3043 		if (fatal_signal_pending(current)) {
3044 			ret = -EINTR;
3045 			goto out_unlock;
3046 		}
3047 
3048 		ret = fiemap_next_leaf_item(inode, path);
3049 		if (ret < 0) {
3050 			goto out_unlock;
3051 		} else if (ret > 0) {
3052 			/* No more file extent items for this inode. */
3053 			break;
3054 		}
3055 		cond_resched();
3056 	}
3057 
3058 check_eof_delalloc:
3059 	/*
3060 	 * Release (and free) the path before emitting any final entries to
3061 	 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3062 	 * once we find no more file extent items exist, we may have a
3063 	 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3064 	 * faults when copying data to the user space buffer.
3065 	 */
3066 	btrfs_free_path(path);
3067 	path = NULL;
3068 
3069 	if (!stopped && prev_extent_end < range_end) {
3070 		ret = fiemap_process_hole(inode, fieinfo, &cache,
3071 					  &delalloc_cached_state, backref_ctx,
3072 					  0, 0, 0, prev_extent_end, range_end - 1);
3073 		if (ret < 0)
3074 			goto out_unlock;
3075 		prev_extent_end = range_end;
3076 	}
3077 
3078 	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3079 		const u64 i_size = i_size_read(&inode->vfs_inode);
3080 
3081 		if (prev_extent_end < i_size) {
3082 			struct extent_state *cached_state = NULL;
3083 			u64 delalloc_start;
3084 			u64 delalloc_end;
3085 			u64 lockstart;
3086 			u64 lockend;
3087 			bool delalloc;
3088 
3089 			lockstart = round_down(prev_extent_end, sectorsize);
3090 			lockend = round_up(i_size, sectorsize);
3091 
3092 			/*
3093 			 * See the comment in fiemap_process_hole as to why
3094 			 * we're doing the locking here.
3095 			 */
3096 			lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3097 			delalloc = btrfs_find_delalloc_in_range(inode,
3098 								prev_extent_end,
3099 								i_size - 1,
3100 								&delalloc_cached_state,
3101 								&delalloc_start,
3102 								&delalloc_end);
3103 			unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3104 			if (!delalloc)
3105 				cache.flags |= FIEMAP_EXTENT_LAST;
3106 		} else {
3107 			cache.flags |= FIEMAP_EXTENT_LAST;
3108 		}
3109 	}
3110 
3111 	ret = emit_last_fiemap_cache(fieinfo, &cache);
3112 
3113 out_unlock:
3114 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3115 out:
3116 	free_extent_state(delalloc_cached_state);
3117 	btrfs_free_backref_share_ctx(backref_ctx);
3118 	btrfs_free_path(path);
3119 	return ret;
3120 }
3121 
3122 static void __free_extent_buffer(struct extent_buffer *eb)
3123 {
3124 	kmem_cache_free(extent_buffer_cache, eb);
3125 }
3126 
3127 static int extent_buffer_under_io(const struct extent_buffer *eb)
3128 {
3129 	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3130 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3131 }
3132 
3133 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3134 {
3135 	struct btrfs_subpage *subpage;
3136 
3137 	lockdep_assert_held(&folio->mapping->i_private_lock);
3138 
3139 	if (folio_test_private(folio)) {
3140 		subpage = folio_get_private(folio);
3141 		if (atomic_read(&subpage->eb_refs))
3142 			return true;
3143 		/*
3144 		 * Even there is no eb refs here, we may still have
3145 		 * end_page_read() call relying on page::private.
3146 		 */
3147 		if (atomic_read(&subpage->readers))
3148 			return true;
3149 	}
3150 	return false;
3151 }
3152 
3153 static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3154 {
3155 	struct btrfs_fs_info *fs_info = eb->fs_info;
3156 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3157 
3158 	/*
3159 	 * For mapped eb, we're going to change the folio private, which should
3160 	 * be done under the i_private_lock.
3161 	 */
3162 	if (mapped)
3163 		spin_lock(&folio->mapping->i_private_lock);
3164 
3165 	if (!folio_test_private(folio)) {
3166 		if (mapped)
3167 			spin_unlock(&folio->mapping->i_private_lock);
3168 		return;
3169 	}
3170 
3171 	if (fs_info->nodesize >= PAGE_SIZE) {
3172 		/*
3173 		 * We do this since we'll remove the pages after we've
3174 		 * removed the eb from the radix tree, so we could race
3175 		 * and have this page now attached to the new eb.  So
3176 		 * only clear folio if it's still connected to
3177 		 * this eb.
3178 		 */
3179 		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3180 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3181 			BUG_ON(folio_test_dirty(folio));
3182 			BUG_ON(folio_test_writeback(folio));
3183 			/* We need to make sure we haven't be attached to a new eb. */
3184 			folio_detach_private(folio);
3185 		}
3186 		if (mapped)
3187 			spin_unlock(&folio->mapping->i_private_lock);
3188 		return;
3189 	}
3190 
3191 	/*
3192 	 * For subpage, we can have dummy eb with folio private attached.  In
3193 	 * this case, we can directly detach the private as such folio is only
3194 	 * attached to one dummy eb, no sharing.
3195 	 */
3196 	if (!mapped) {
3197 		btrfs_detach_subpage(fs_info, folio);
3198 		return;
3199 	}
3200 
3201 	btrfs_folio_dec_eb_refs(fs_info, folio);
3202 
3203 	/*
3204 	 * We can only detach the folio private if there are no other ebs in the
3205 	 * page range and no unfinished IO.
3206 	 */
3207 	if (!folio_range_has_eb(fs_info, folio))
3208 		btrfs_detach_subpage(fs_info, folio);
3209 
3210 	spin_unlock(&folio->mapping->i_private_lock);
3211 }
3212 
3213 /* Release all pages attached to the extent buffer */
3214 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3215 {
3216 	ASSERT(!extent_buffer_under_io(eb));
3217 
3218 	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3219 		struct folio *folio = eb->folios[i];
3220 
3221 		if (!folio)
3222 			continue;
3223 
3224 		detach_extent_buffer_folio(eb, folio);
3225 
3226 		/* One for when we allocated the folio. */
3227 		folio_put(folio);
3228 	}
3229 }
3230 
3231 /*
3232  * Helper for releasing the extent buffer.
3233  */
3234 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3235 {
3236 	btrfs_release_extent_buffer_pages(eb);
3237 	btrfs_leak_debug_del_eb(eb);
3238 	__free_extent_buffer(eb);
3239 }
3240 
3241 static struct extent_buffer *
3242 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3243 		      unsigned long len)
3244 {
3245 	struct extent_buffer *eb = NULL;
3246 
3247 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3248 	eb->start = start;
3249 	eb->len = len;
3250 	eb->fs_info = fs_info;
3251 	init_rwsem(&eb->lock);
3252 
3253 	btrfs_leak_debug_add_eb(eb);
3254 
3255 	spin_lock_init(&eb->refs_lock);
3256 	atomic_set(&eb->refs, 1);
3257 
3258 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3259 
3260 	return eb;
3261 }
3262 
3263 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3264 {
3265 	struct extent_buffer *new;
3266 	int num_folios = num_extent_folios(src);
3267 	int ret;
3268 
3269 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3270 	if (new == NULL)
3271 		return NULL;
3272 
3273 	/*
3274 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3275 	 * btrfs_release_extent_buffer() have different behavior for
3276 	 * UNMAPPED subpage extent buffer.
3277 	 */
3278 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3279 
3280 	ret = alloc_eb_folio_array(new, 0);
3281 	if (ret) {
3282 		btrfs_release_extent_buffer(new);
3283 		return NULL;
3284 	}
3285 
3286 	for (int i = 0; i < num_folios; i++) {
3287 		struct folio *folio = new->folios[i];
3288 		int ret;
3289 
3290 		ret = attach_extent_buffer_folio(new, folio, NULL);
3291 		if (ret < 0) {
3292 			btrfs_release_extent_buffer(new);
3293 			return NULL;
3294 		}
3295 		WARN_ON(folio_test_dirty(folio));
3296 	}
3297 	copy_extent_buffer_full(new, src);
3298 	set_extent_buffer_uptodate(new);
3299 
3300 	return new;
3301 }
3302 
3303 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3304 						  u64 start, unsigned long len)
3305 {
3306 	struct extent_buffer *eb;
3307 	int num_folios = 0;
3308 	int ret;
3309 
3310 	eb = __alloc_extent_buffer(fs_info, start, len);
3311 	if (!eb)
3312 		return NULL;
3313 
3314 	ret = alloc_eb_folio_array(eb, 0);
3315 	if (ret)
3316 		goto err;
3317 
3318 	num_folios = num_extent_folios(eb);
3319 	for (int i = 0; i < num_folios; i++) {
3320 		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3321 		if (ret < 0)
3322 			goto err;
3323 	}
3324 
3325 	set_extent_buffer_uptodate(eb);
3326 	btrfs_set_header_nritems(eb, 0);
3327 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3328 
3329 	return eb;
3330 err:
3331 	for (int i = 0; i < num_folios; i++) {
3332 		if (eb->folios[i]) {
3333 			detach_extent_buffer_folio(eb, eb->folios[i]);
3334 			__folio_put(eb->folios[i]);
3335 		}
3336 	}
3337 	__free_extent_buffer(eb);
3338 	return NULL;
3339 }
3340 
3341 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3342 						u64 start)
3343 {
3344 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3345 }
3346 
3347 static void check_buffer_tree_ref(struct extent_buffer *eb)
3348 {
3349 	int refs;
3350 	/*
3351 	 * The TREE_REF bit is first set when the extent_buffer is added
3352 	 * to the radix tree. It is also reset, if unset, when a new reference
3353 	 * is created by find_extent_buffer.
3354 	 *
3355 	 * It is only cleared in two cases: freeing the last non-tree
3356 	 * reference to the extent_buffer when its STALE bit is set or
3357 	 * calling release_folio when the tree reference is the only reference.
3358 	 *
3359 	 * In both cases, care is taken to ensure that the extent_buffer's
3360 	 * pages are not under io. However, release_folio can be concurrently
3361 	 * called with creating new references, which is prone to race
3362 	 * conditions between the calls to check_buffer_tree_ref in those
3363 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3364 	 *
3365 	 * The actual lifetime of the extent_buffer in the radix tree is
3366 	 * adequately protected by the refcount, but the TREE_REF bit and
3367 	 * its corresponding reference are not. To protect against this
3368 	 * class of races, we call check_buffer_tree_ref from the codepaths
3369 	 * which trigger io. Note that once io is initiated, TREE_REF can no
3370 	 * longer be cleared, so that is the moment at which any such race is
3371 	 * best fixed.
3372 	 */
3373 	refs = atomic_read(&eb->refs);
3374 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3375 		return;
3376 
3377 	spin_lock(&eb->refs_lock);
3378 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3379 		atomic_inc(&eb->refs);
3380 	spin_unlock(&eb->refs_lock);
3381 }
3382 
3383 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3384 {
3385 	int num_folios= num_extent_folios(eb);
3386 
3387 	check_buffer_tree_ref(eb);
3388 
3389 	for (int i = 0; i < num_folios; i++)
3390 		folio_mark_accessed(eb->folios[i]);
3391 }
3392 
3393 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3394 					 u64 start)
3395 {
3396 	struct extent_buffer *eb;
3397 
3398 	eb = find_extent_buffer_nolock(fs_info, start);
3399 	if (!eb)
3400 		return NULL;
3401 	/*
3402 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3403 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3404 	 * another task running free_extent_buffer() might have seen that flag
3405 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3406 	 * writeback flags not set) and it's still in the tree (flag
3407 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3408 	 * decrementing the extent buffer's reference count twice.  So here we
3409 	 * could race and increment the eb's reference count, clear its stale
3410 	 * flag, mark it as dirty and drop our reference before the other task
3411 	 * finishes executing free_extent_buffer, which would later result in
3412 	 * an attempt to free an extent buffer that is dirty.
3413 	 */
3414 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3415 		spin_lock(&eb->refs_lock);
3416 		spin_unlock(&eb->refs_lock);
3417 	}
3418 	mark_extent_buffer_accessed(eb);
3419 	return eb;
3420 }
3421 
3422 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3423 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3424 					u64 start)
3425 {
3426 	struct extent_buffer *eb, *exists = NULL;
3427 	int ret;
3428 
3429 	eb = find_extent_buffer(fs_info, start);
3430 	if (eb)
3431 		return eb;
3432 	eb = alloc_dummy_extent_buffer(fs_info, start);
3433 	if (!eb)
3434 		return ERR_PTR(-ENOMEM);
3435 	eb->fs_info = fs_info;
3436 again:
3437 	ret = radix_tree_preload(GFP_NOFS);
3438 	if (ret) {
3439 		exists = ERR_PTR(ret);
3440 		goto free_eb;
3441 	}
3442 	spin_lock(&fs_info->buffer_lock);
3443 	ret = radix_tree_insert(&fs_info->buffer_radix,
3444 				start >> fs_info->sectorsize_bits, eb);
3445 	spin_unlock(&fs_info->buffer_lock);
3446 	radix_tree_preload_end();
3447 	if (ret == -EEXIST) {
3448 		exists = find_extent_buffer(fs_info, start);
3449 		if (exists)
3450 			goto free_eb;
3451 		else
3452 			goto again;
3453 	}
3454 	check_buffer_tree_ref(eb);
3455 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3456 
3457 	return eb;
3458 free_eb:
3459 	btrfs_release_extent_buffer(eb);
3460 	return exists;
3461 }
3462 #endif
3463 
3464 static struct extent_buffer *grab_extent_buffer(
3465 		struct btrfs_fs_info *fs_info, struct page *page)
3466 {
3467 	struct folio *folio = page_folio(page);
3468 	struct extent_buffer *exists;
3469 
3470 	/*
3471 	 * For subpage case, we completely rely on radix tree to ensure we
3472 	 * don't try to insert two ebs for the same bytenr.  So here we always
3473 	 * return NULL and just continue.
3474 	 */
3475 	if (fs_info->nodesize < PAGE_SIZE)
3476 		return NULL;
3477 
3478 	/* Page not yet attached to an extent buffer */
3479 	if (!folio_test_private(folio))
3480 		return NULL;
3481 
3482 	/*
3483 	 * We could have already allocated an eb for this page and attached one
3484 	 * so lets see if we can get a ref on the existing eb, and if we can we
3485 	 * know it's good and we can just return that one, else we know we can
3486 	 * just overwrite folio private.
3487 	 */
3488 	exists = folio_get_private(folio);
3489 	if (atomic_inc_not_zero(&exists->refs))
3490 		return exists;
3491 
3492 	WARN_ON(PageDirty(page));
3493 	folio_detach_private(folio);
3494 	return NULL;
3495 }
3496 
3497 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3498 {
3499 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3500 		btrfs_err(fs_info, "bad tree block start %llu", start);
3501 		return -EINVAL;
3502 	}
3503 
3504 	if (fs_info->nodesize < PAGE_SIZE &&
3505 	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3506 		btrfs_err(fs_info,
3507 		"tree block crosses page boundary, start %llu nodesize %u",
3508 			  start, fs_info->nodesize);
3509 		return -EINVAL;
3510 	}
3511 	if (fs_info->nodesize >= PAGE_SIZE &&
3512 	    !PAGE_ALIGNED(start)) {
3513 		btrfs_err(fs_info,
3514 		"tree block is not page aligned, start %llu nodesize %u",
3515 			  start, fs_info->nodesize);
3516 		return -EINVAL;
3517 	}
3518 	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3519 	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3520 		btrfs_warn(fs_info,
3521 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3522 			      start, fs_info->nodesize);
3523 	}
3524 	return 0;
3525 }
3526 
3527 
3528 /*
3529  * Return 0 if eb->folios[i] is attached to btree inode successfully.
3530  * Return >0 if there is already another extent buffer for the range,
3531  * and @found_eb_ret would be updated.
3532  * Return -EAGAIN if the filemap has an existing folio but with different size
3533  * than @eb.
3534  * The caller needs to free the existing folios and retry using the same order.
3535  */
3536 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3537 				      struct extent_buffer **found_eb_ret)
3538 {
3539 
3540 	struct btrfs_fs_info *fs_info = eb->fs_info;
3541 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3542 	const unsigned long index = eb->start >> PAGE_SHIFT;
3543 	struct folio *existing_folio;
3544 	int ret;
3545 
3546 	ASSERT(found_eb_ret);
3547 
3548 	/* Caller should ensure the folio exists. */
3549 	ASSERT(eb->folios[i]);
3550 
3551 retry:
3552 	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3553 				GFP_NOFS | __GFP_NOFAIL);
3554 	if (!ret)
3555 		return 0;
3556 
3557 	existing_folio = filemap_lock_folio(mapping, index + i);
3558 	/* The page cache only exists for a very short time, just retry. */
3559 	if (IS_ERR(existing_folio))
3560 		goto retry;
3561 
3562 	/* For now, we should only have single-page folios for btree inode. */
3563 	ASSERT(folio_nr_pages(existing_folio) == 1);
3564 
3565 	if (folio_size(existing_folio) != folio_size(eb->folios[0])) {
3566 		folio_unlock(existing_folio);
3567 		folio_put(existing_folio);
3568 		return -EAGAIN;
3569 	}
3570 
3571 	if (fs_info->nodesize < PAGE_SIZE) {
3572 		/*
3573 		 * We're going to reuse the existing page, can drop our page
3574 		 * and subpage structure now.
3575 		 */
3576 		__free_page(folio_page(eb->folios[i], 0));
3577 		eb->folios[i] = existing_folio;
3578 	} else {
3579 		struct extent_buffer *existing_eb;
3580 
3581 		existing_eb = grab_extent_buffer(fs_info,
3582 						 folio_page(existing_folio, 0));
3583 		if (existing_eb) {
3584 			/* The extent buffer still exists, we can use it directly. */
3585 			*found_eb_ret = existing_eb;
3586 			folio_unlock(existing_folio);
3587 			folio_put(existing_folio);
3588 			return 1;
3589 		}
3590 		/* The extent buffer no longer exists, we can reuse the folio. */
3591 		__free_page(folio_page(eb->folios[i], 0));
3592 		eb->folios[i] = existing_folio;
3593 	}
3594 	return 0;
3595 }
3596 
3597 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3598 					  u64 start, u64 owner_root, int level)
3599 {
3600 	unsigned long len = fs_info->nodesize;
3601 	int num_folios;
3602 	int attached = 0;
3603 	struct extent_buffer *eb;
3604 	struct extent_buffer *existing_eb = NULL;
3605 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3606 	struct btrfs_subpage *prealloc = NULL;
3607 	u64 lockdep_owner = owner_root;
3608 	bool page_contig = true;
3609 	int uptodate = 1;
3610 	int ret;
3611 
3612 	if (check_eb_alignment(fs_info, start))
3613 		return ERR_PTR(-EINVAL);
3614 
3615 #if BITS_PER_LONG == 32
3616 	if (start >= MAX_LFS_FILESIZE) {
3617 		btrfs_err_rl(fs_info,
3618 		"extent buffer %llu is beyond 32bit page cache limit", start);
3619 		btrfs_err_32bit_limit(fs_info);
3620 		return ERR_PTR(-EOVERFLOW);
3621 	}
3622 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3623 		btrfs_warn_32bit_limit(fs_info);
3624 #endif
3625 
3626 	eb = find_extent_buffer(fs_info, start);
3627 	if (eb)
3628 		return eb;
3629 
3630 	eb = __alloc_extent_buffer(fs_info, start, len);
3631 	if (!eb)
3632 		return ERR_PTR(-ENOMEM);
3633 
3634 	/*
3635 	 * The reloc trees are just snapshots, so we need them to appear to be
3636 	 * just like any other fs tree WRT lockdep.
3637 	 */
3638 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3639 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3640 
3641 	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3642 
3643 	/*
3644 	 * Preallocate folio private for subpage case, so that we won't
3645 	 * allocate memory with i_private_lock nor page lock hold.
3646 	 *
3647 	 * The memory will be freed by attach_extent_buffer_page() or freed
3648 	 * manually if we exit earlier.
3649 	 */
3650 	if (fs_info->nodesize < PAGE_SIZE) {
3651 		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3652 		if (IS_ERR(prealloc)) {
3653 			ret = PTR_ERR(prealloc);
3654 			goto out;
3655 		}
3656 	}
3657 
3658 reallocate:
3659 	/* Allocate all pages first. */
3660 	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3661 	if (ret < 0) {
3662 		btrfs_free_subpage(prealloc);
3663 		goto out;
3664 	}
3665 
3666 	num_folios = num_extent_folios(eb);
3667 	/* Attach all pages to the filemap. */
3668 	for (int i = 0; i < num_folios; i++) {
3669 		struct folio *folio;
3670 
3671 		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3672 		if (ret > 0) {
3673 			ASSERT(existing_eb);
3674 			goto out;
3675 		}
3676 
3677 		/*
3678 		 * TODO: Special handling for a corner case where the order of
3679 		 * folios mismatch between the new eb and filemap.
3680 		 *
3681 		 * This happens when:
3682 		 *
3683 		 * - the new eb is using higher order folio
3684 		 *
3685 		 * - the filemap is still using 0-order folios for the range
3686 		 *   This can happen at the previous eb allocation, and we don't
3687 		 *   have higher order folio for the call.
3688 		 *
3689 		 * - the existing eb has already been freed
3690 		 *
3691 		 * In this case, we have to free the existing folios first, and
3692 		 * re-allocate using the same order.
3693 		 * Thankfully this is not going to happen yet, as we're still
3694 		 * using 0-order folios.
3695 		 */
3696 		if (unlikely(ret == -EAGAIN)) {
3697 			ASSERT(0);
3698 			goto reallocate;
3699 		}
3700 		attached++;
3701 
3702 		/*
3703 		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3704 		 * reliable, as we may choose to reuse the existing page cache
3705 		 * and free the allocated page.
3706 		 */
3707 		folio = eb->folios[i];
3708 		spin_lock(&mapping->i_private_lock);
3709 		/* Should not fail, as we have preallocated the memory */
3710 		ret = attach_extent_buffer_folio(eb, folio, prealloc);
3711 		ASSERT(!ret);
3712 		/*
3713 		 * To inform we have extra eb under allocation, so that
3714 		 * detach_extent_buffer_page() won't release the folio private
3715 		 * when the eb hasn't yet been inserted into radix tree.
3716 		 *
3717 		 * The ref will be decreased when the eb released the page, in
3718 		 * detach_extent_buffer_page().
3719 		 * Thus needs no special handling in error path.
3720 		 */
3721 		btrfs_folio_inc_eb_refs(fs_info, folio);
3722 		spin_unlock(&mapping->i_private_lock);
3723 
3724 		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3725 
3726 		/*
3727 		 * Check if the current page is physically contiguous with previous eb
3728 		 * page.
3729 		 * At this stage, either we allocated a large folio, thus @i
3730 		 * would only be 0, or we fall back to per-page allocation.
3731 		 */
3732 		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3733 			page_contig = false;
3734 
3735 		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3736 			uptodate = 0;
3737 
3738 		/*
3739 		 * We can't unlock the pages just yet since the extent buffer
3740 		 * hasn't been properly inserted in the radix tree, this
3741 		 * opens a race with btree_release_folio which can free a page
3742 		 * while we are still filling in all pages for the buffer and
3743 		 * we could crash.
3744 		 */
3745 	}
3746 	if (uptodate)
3747 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3748 	/* All pages are physically contiguous, can skip cross page handling. */
3749 	if (page_contig)
3750 		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3751 again:
3752 	ret = radix_tree_preload(GFP_NOFS);
3753 	if (ret)
3754 		goto out;
3755 
3756 	spin_lock(&fs_info->buffer_lock);
3757 	ret = radix_tree_insert(&fs_info->buffer_radix,
3758 				start >> fs_info->sectorsize_bits, eb);
3759 	spin_unlock(&fs_info->buffer_lock);
3760 	radix_tree_preload_end();
3761 	if (ret == -EEXIST) {
3762 		ret = 0;
3763 		existing_eb = find_extent_buffer(fs_info, start);
3764 		if (existing_eb)
3765 			goto out;
3766 		else
3767 			goto again;
3768 	}
3769 	/* add one reference for the tree */
3770 	check_buffer_tree_ref(eb);
3771 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3772 
3773 	/*
3774 	 * Now it's safe to unlock the pages because any calls to
3775 	 * btree_release_folio will correctly detect that a page belongs to a
3776 	 * live buffer and won't free them prematurely.
3777 	 */
3778 	for (int i = 0; i < num_folios; i++)
3779 		unlock_page(folio_page(eb->folios[i], 0));
3780 	return eb;
3781 
3782 out:
3783 	WARN_ON(!atomic_dec_and_test(&eb->refs));
3784 
3785 	/*
3786 	 * Any attached folios need to be detached before we unlock them.  This
3787 	 * is because when we're inserting our new folios into the mapping, and
3788 	 * then attaching our eb to that folio.  If we fail to insert our folio
3789 	 * we'll lookup the folio for that index, and grab that EB.  We do not
3790 	 * want that to grab this eb, as we're getting ready to free it.  So we
3791 	 * have to detach it first and then unlock it.
3792 	 *
3793 	 * We have to drop our reference and NULL it out here because in the
3794 	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3795 	 * Below when we call btrfs_release_extent_buffer() we will call
3796 	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3797 	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3798 	 * double put our reference and be super sad.
3799 	 */
3800 	for (int i = 0; i < attached; i++) {
3801 		ASSERT(eb->folios[i]);
3802 		detach_extent_buffer_folio(eb, eb->folios[i]);
3803 		unlock_page(folio_page(eb->folios[i], 0));
3804 		folio_put(eb->folios[i]);
3805 		eb->folios[i] = NULL;
3806 	}
3807 	/*
3808 	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3809 	 * so it can be cleaned up without utlizing page->mapping.
3810 	 */
3811 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3812 
3813 	btrfs_release_extent_buffer(eb);
3814 	if (ret < 0)
3815 		return ERR_PTR(ret);
3816 	ASSERT(existing_eb);
3817 	return existing_eb;
3818 }
3819 
3820 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3821 {
3822 	struct extent_buffer *eb =
3823 			container_of(head, struct extent_buffer, rcu_head);
3824 
3825 	__free_extent_buffer(eb);
3826 }
3827 
3828 static int release_extent_buffer(struct extent_buffer *eb)
3829 	__releases(&eb->refs_lock)
3830 {
3831 	lockdep_assert_held(&eb->refs_lock);
3832 
3833 	WARN_ON(atomic_read(&eb->refs) == 0);
3834 	if (atomic_dec_and_test(&eb->refs)) {
3835 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3836 			struct btrfs_fs_info *fs_info = eb->fs_info;
3837 
3838 			spin_unlock(&eb->refs_lock);
3839 
3840 			spin_lock(&fs_info->buffer_lock);
3841 			radix_tree_delete(&fs_info->buffer_radix,
3842 					  eb->start >> fs_info->sectorsize_bits);
3843 			spin_unlock(&fs_info->buffer_lock);
3844 		} else {
3845 			spin_unlock(&eb->refs_lock);
3846 		}
3847 
3848 		btrfs_leak_debug_del_eb(eb);
3849 		/* Should be safe to release our pages at this point */
3850 		btrfs_release_extent_buffer_pages(eb);
3851 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3852 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3853 			__free_extent_buffer(eb);
3854 			return 1;
3855 		}
3856 #endif
3857 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3858 		return 1;
3859 	}
3860 	spin_unlock(&eb->refs_lock);
3861 
3862 	return 0;
3863 }
3864 
3865 void free_extent_buffer(struct extent_buffer *eb)
3866 {
3867 	int refs;
3868 	if (!eb)
3869 		return;
3870 
3871 	refs = atomic_read(&eb->refs);
3872 	while (1) {
3873 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3874 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3875 			refs == 1))
3876 			break;
3877 		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3878 			return;
3879 	}
3880 
3881 	spin_lock(&eb->refs_lock);
3882 	if (atomic_read(&eb->refs) == 2 &&
3883 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3884 	    !extent_buffer_under_io(eb) &&
3885 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3886 		atomic_dec(&eb->refs);
3887 
3888 	/*
3889 	 * I know this is terrible, but it's temporary until we stop tracking
3890 	 * the uptodate bits and such for the extent buffers.
3891 	 */
3892 	release_extent_buffer(eb);
3893 }
3894 
3895 void free_extent_buffer_stale(struct extent_buffer *eb)
3896 {
3897 	if (!eb)
3898 		return;
3899 
3900 	spin_lock(&eb->refs_lock);
3901 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3902 
3903 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3904 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3905 		atomic_dec(&eb->refs);
3906 	release_extent_buffer(eb);
3907 }
3908 
3909 static void btree_clear_folio_dirty(struct folio *folio)
3910 {
3911 	ASSERT(folio_test_dirty(folio));
3912 	ASSERT(folio_test_locked(folio));
3913 	folio_clear_dirty_for_io(folio);
3914 	xa_lock_irq(&folio->mapping->i_pages);
3915 	if (!folio_test_dirty(folio))
3916 		__xa_clear_mark(&folio->mapping->i_pages,
3917 				folio_index(folio), PAGECACHE_TAG_DIRTY);
3918 	xa_unlock_irq(&folio->mapping->i_pages);
3919 }
3920 
3921 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3922 {
3923 	struct btrfs_fs_info *fs_info = eb->fs_info;
3924 	struct folio *folio = eb->folios[0];
3925 	bool last;
3926 
3927 	/* btree_clear_folio_dirty() needs page locked. */
3928 	folio_lock(folio);
3929 	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3930 	if (last)
3931 		btree_clear_folio_dirty(folio);
3932 	folio_unlock(folio);
3933 	WARN_ON(atomic_read(&eb->refs) == 0);
3934 }
3935 
3936 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3937 			      struct extent_buffer *eb)
3938 {
3939 	struct btrfs_fs_info *fs_info = eb->fs_info;
3940 	int num_folios;
3941 
3942 	btrfs_assert_tree_write_locked(eb);
3943 
3944 	if (trans && btrfs_header_generation(eb) != trans->transid)
3945 		return;
3946 
3947 	/*
3948 	 * Instead of clearing the dirty flag off of the buffer, mark it as
3949 	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3950 	 * write-ordering in zoned mode, without the need to later re-dirty
3951 	 * the extent_buffer.
3952 	 *
3953 	 * The actual zeroout of the buffer will happen later in
3954 	 * btree_csum_one_bio.
3955 	 */
3956 	if (btrfs_is_zoned(fs_info)) {
3957 		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3958 		return;
3959 	}
3960 
3961 	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3962 		return;
3963 
3964 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3965 				 fs_info->dirty_metadata_batch);
3966 
3967 	if (eb->fs_info->nodesize < PAGE_SIZE)
3968 		return clear_subpage_extent_buffer_dirty(eb);
3969 
3970 	num_folios = num_extent_folios(eb);
3971 	for (int i = 0; i < num_folios; i++) {
3972 		struct folio *folio = eb->folios[i];
3973 
3974 		if (!folio_test_dirty(folio))
3975 			continue;
3976 		folio_lock(folio);
3977 		btree_clear_folio_dirty(folio);
3978 		folio_unlock(folio);
3979 	}
3980 	WARN_ON(atomic_read(&eb->refs) == 0);
3981 }
3982 
3983 void set_extent_buffer_dirty(struct extent_buffer *eb)
3984 {
3985 	int num_folios;
3986 	bool was_dirty;
3987 
3988 	check_buffer_tree_ref(eb);
3989 
3990 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3991 
3992 	num_folios = num_extent_folios(eb);
3993 	WARN_ON(atomic_read(&eb->refs) == 0);
3994 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3995 
3996 	if (!was_dirty) {
3997 		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3998 
3999 		/*
4000 		 * For subpage case, we can have other extent buffers in the
4001 		 * same page, and in clear_subpage_extent_buffer_dirty() we
4002 		 * have to clear page dirty without subpage lock held.
4003 		 * This can cause race where our page gets dirty cleared after
4004 		 * we just set it.
4005 		 *
4006 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4007 		 * its page for other reasons, we can use page lock to prevent
4008 		 * the above race.
4009 		 */
4010 		if (subpage)
4011 			lock_page(folio_page(eb->folios[0], 0));
4012 		for (int i = 0; i < num_folios; i++)
4013 			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4014 					      eb->start, eb->len);
4015 		if (subpage)
4016 			unlock_page(folio_page(eb->folios[0], 0));
4017 		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4018 					 eb->len,
4019 					 eb->fs_info->dirty_metadata_batch);
4020 	}
4021 #ifdef CONFIG_BTRFS_DEBUG
4022 	for (int i = 0; i < num_folios; i++)
4023 		ASSERT(folio_test_dirty(eb->folios[i]));
4024 #endif
4025 }
4026 
4027 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4028 {
4029 	struct btrfs_fs_info *fs_info = eb->fs_info;
4030 	int num_folios = num_extent_folios(eb);
4031 
4032 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4033 	for (int i = 0; i < num_folios; i++) {
4034 		struct folio *folio = eb->folios[i];
4035 
4036 		if (!folio)
4037 			continue;
4038 
4039 		/*
4040 		 * This is special handling for metadata subpage, as regular
4041 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4042 		 */
4043 		if (fs_info->nodesize >= PAGE_SIZE)
4044 			folio_clear_uptodate(folio);
4045 		else
4046 			btrfs_subpage_clear_uptodate(fs_info, folio,
4047 						     eb->start, eb->len);
4048 	}
4049 }
4050 
4051 void set_extent_buffer_uptodate(struct extent_buffer *eb)
4052 {
4053 	struct btrfs_fs_info *fs_info = eb->fs_info;
4054 	int num_folios = num_extent_folios(eb);
4055 
4056 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4057 	for (int i = 0; i < num_folios; i++) {
4058 		struct folio *folio = eb->folios[i];
4059 
4060 		/*
4061 		 * This is special handling for metadata subpage, as regular
4062 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4063 		 */
4064 		if (fs_info->nodesize >= PAGE_SIZE)
4065 			folio_mark_uptodate(folio);
4066 		else
4067 			btrfs_subpage_set_uptodate(fs_info, folio,
4068 						   eb->start, eb->len);
4069 	}
4070 }
4071 
4072 static void end_bbio_meta_read(struct btrfs_bio *bbio)
4073 {
4074 	struct extent_buffer *eb = bbio->private;
4075 	struct btrfs_fs_info *fs_info = eb->fs_info;
4076 	bool uptodate = !bbio->bio.bi_status;
4077 	struct folio_iter fi;
4078 	u32 bio_offset = 0;
4079 
4080 	eb->read_mirror = bbio->mirror_num;
4081 
4082 	if (uptodate &&
4083 	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4084 		uptodate = false;
4085 
4086 	if (uptodate) {
4087 		set_extent_buffer_uptodate(eb);
4088 	} else {
4089 		clear_extent_buffer_uptodate(eb);
4090 		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4091 	}
4092 
4093 	bio_for_each_folio_all(fi, &bbio->bio) {
4094 		struct folio *folio = fi.folio;
4095 		u64 start = eb->start + bio_offset;
4096 		u32 len = fi.length;
4097 
4098 		if (uptodate)
4099 			btrfs_folio_set_uptodate(fs_info, folio, start, len);
4100 		else
4101 			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4102 
4103 		bio_offset += len;
4104 	}
4105 
4106 	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4107 	smp_mb__after_atomic();
4108 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4109 	free_extent_buffer(eb);
4110 
4111 	bio_put(&bbio->bio);
4112 }
4113 
4114 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4115 			     struct btrfs_tree_parent_check *check)
4116 {
4117 	struct btrfs_bio *bbio;
4118 	bool ret;
4119 
4120 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4121 		return 0;
4122 
4123 	/*
4124 	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4125 	 * operation, which could potentially still be in flight.  In this case
4126 	 * we simply want to return an error.
4127 	 */
4128 	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4129 		return -EIO;
4130 
4131 	/* Someone else is already reading the buffer, just wait for it. */
4132 	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4133 		goto done;
4134 
4135 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4136 	eb->read_mirror = 0;
4137 	check_buffer_tree_ref(eb);
4138 	atomic_inc(&eb->refs);
4139 
4140 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4141 			       REQ_OP_READ | REQ_META, eb->fs_info,
4142 			       end_bbio_meta_read, eb);
4143 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4144 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4145 	bbio->file_offset = eb->start;
4146 	memcpy(&bbio->parent_check, check, sizeof(*check));
4147 	if (eb->fs_info->nodesize < PAGE_SIZE) {
4148 		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4149 				    eb->start - folio_pos(eb->folios[0]));
4150 		ASSERT(ret);
4151 	} else {
4152 		int num_folios = num_extent_folios(eb);
4153 
4154 		for (int i = 0; i < num_folios; i++) {
4155 			struct folio *folio = eb->folios[i];
4156 
4157 			ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
4158 			ASSERT(ret);
4159 		}
4160 	}
4161 	btrfs_submit_bio(bbio, mirror_num);
4162 
4163 done:
4164 	if (wait == WAIT_COMPLETE) {
4165 		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4166 		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4167 			return -EIO;
4168 	}
4169 
4170 	return 0;
4171 }
4172 
4173 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4174 			    unsigned long len)
4175 {
4176 	btrfs_warn(eb->fs_info,
4177 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
4178 		eb->start, eb->len, start, len);
4179 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4180 
4181 	return true;
4182 }
4183 
4184 /*
4185  * Check if the [start, start + len) range is valid before reading/writing
4186  * the eb.
4187  * NOTE: @start and @len are offset inside the eb, not logical address.
4188  *
4189  * Caller should not touch the dst/src memory if this function returns error.
4190  */
4191 static inline int check_eb_range(const struct extent_buffer *eb,
4192 				 unsigned long start, unsigned long len)
4193 {
4194 	unsigned long offset;
4195 
4196 	/* start, start + len should not go beyond eb->len nor overflow */
4197 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4198 		return report_eb_range(eb, start, len);
4199 
4200 	return false;
4201 }
4202 
4203 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4204 			unsigned long start, unsigned long len)
4205 {
4206 	const int unit_size = folio_size(eb->folios[0]);
4207 	size_t cur;
4208 	size_t offset;
4209 	char *dst = (char *)dstv;
4210 	unsigned long i = get_eb_folio_index(eb, start);
4211 
4212 	if (check_eb_range(eb, start, len)) {
4213 		/*
4214 		 * Invalid range hit, reset the memory, so callers won't get
4215 		 * some random garbage for their uninitialized memory.
4216 		 */
4217 		memset(dstv, 0, len);
4218 		return;
4219 	}
4220 
4221 	if (eb->addr) {
4222 		memcpy(dstv, eb->addr + start, len);
4223 		return;
4224 	}
4225 
4226 	offset = get_eb_offset_in_folio(eb, start);
4227 
4228 	while (len > 0) {
4229 		char *kaddr;
4230 
4231 		cur = min(len, unit_size - offset);
4232 		kaddr = folio_address(eb->folios[i]);
4233 		memcpy(dst, kaddr + offset, cur);
4234 
4235 		dst += cur;
4236 		len -= cur;
4237 		offset = 0;
4238 		i++;
4239 	}
4240 }
4241 
4242 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4243 				       void __user *dstv,
4244 				       unsigned long start, unsigned long len)
4245 {
4246 	const int unit_size = folio_size(eb->folios[0]);
4247 	size_t cur;
4248 	size_t offset;
4249 	char __user *dst = (char __user *)dstv;
4250 	unsigned long i = get_eb_folio_index(eb, start);
4251 	int ret = 0;
4252 
4253 	WARN_ON(start > eb->len);
4254 	WARN_ON(start + len > eb->start + eb->len);
4255 
4256 	if (eb->addr) {
4257 		if (copy_to_user_nofault(dstv, eb->addr + start, len))
4258 			ret = -EFAULT;
4259 		return ret;
4260 	}
4261 
4262 	offset = get_eb_offset_in_folio(eb, start);
4263 
4264 	while (len > 0) {
4265 		char *kaddr;
4266 
4267 		cur = min(len, unit_size - offset);
4268 		kaddr = folio_address(eb->folios[i]);
4269 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4270 			ret = -EFAULT;
4271 			break;
4272 		}
4273 
4274 		dst += cur;
4275 		len -= cur;
4276 		offset = 0;
4277 		i++;
4278 	}
4279 
4280 	return ret;
4281 }
4282 
4283 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4284 			 unsigned long start, unsigned long len)
4285 {
4286 	const int unit_size = folio_size(eb->folios[0]);
4287 	size_t cur;
4288 	size_t offset;
4289 	char *kaddr;
4290 	char *ptr = (char *)ptrv;
4291 	unsigned long i = get_eb_folio_index(eb, start);
4292 	int ret = 0;
4293 
4294 	if (check_eb_range(eb, start, len))
4295 		return -EINVAL;
4296 
4297 	if (eb->addr)
4298 		return memcmp(ptrv, eb->addr + start, len);
4299 
4300 	offset = get_eb_offset_in_folio(eb, start);
4301 
4302 	while (len > 0) {
4303 		cur = min(len, unit_size - offset);
4304 		kaddr = folio_address(eb->folios[i]);
4305 		ret = memcmp(ptr, kaddr + offset, cur);
4306 		if (ret)
4307 			break;
4308 
4309 		ptr += cur;
4310 		len -= cur;
4311 		offset = 0;
4312 		i++;
4313 	}
4314 	return ret;
4315 }
4316 
4317 /*
4318  * Check that the extent buffer is uptodate.
4319  *
4320  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4321  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4322  */
4323 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4324 {
4325 	struct btrfs_fs_info *fs_info = eb->fs_info;
4326 	struct folio *folio = eb->folios[i];
4327 
4328 	ASSERT(folio);
4329 
4330 	/*
4331 	 * If we are using the commit root we could potentially clear a page
4332 	 * Uptodate while we're using the extent buffer that we've previously
4333 	 * looked up.  We don't want to complain in this case, as the page was
4334 	 * valid before, we just didn't write it out.  Instead we want to catch
4335 	 * the case where we didn't actually read the block properly, which
4336 	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4337 	 */
4338 	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4339 		return;
4340 
4341 	if (fs_info->nodesize < PAGE_SIZE) {
4342 		struct folio *folio = eb->folios[0];
4343 
4344 		ASSERT(i == 0);
4345 		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4346 							 eb->start, eb->len)))
4347 			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4348 	} else {
4349 		WARN_ON(!folio_test_uptodate(folio));
4350 	}
4351 }
4352 
4353 static void __write_extent_buffer(const struct extent_buffer *eb,
4354 				  const void *srcv, unsigned long start,
4355 				  unsigned long len, bool use_memmove)
4356 {
4357 	const int unit_size = folio_size(eb->folios[0]);
4358 	size_t cur;
4359 	size_t offset;
4360 	char *kaddr;
4361 	char *src = (char *)srcv;
4362 	unsigned long i = get_eb_folio_index(eb, start);
4363 	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4364 	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4365 
4366 	if (check_eb_range(eb, start, len))
4367 		return;
4368 
4369 	if (eb->addr) {
4370 		if (use_memmove)
4371 			memmove(eb->addr + start, srcv, len);
4372 		else
4373 			memcpy(eb->addr + start, srcv, len);
4374 		return;
4375 	}
4376 
4377 	offset = get_eb_offset_in_folio(eb, start);
4378 
4379 	while (len > 0) {
4380 		if (check_uptodate)
4381 			assert_eb_folio_uptodate(eb, i);
4382 
4383 		cur = min(len, unit_size - offset);
4384 		kaddr = folio_address(eb->folios[i]);
4385 		if (use_memmove)
4386 			memmove(kaddr + offset, src, cur);
4387 		else
4388 			memcpy(kaddr + offset, src, cur);
4389 
4390 		src += cur;
4391 		len -= cur;
4392 		offset = 0;
4393 		i++;
4394 	}
4395 }
4396 
4397 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4398 			 unsigned long start, unsigned long len)
4399 {
4400 	return __write_extent_buffer(eb, srcv, start, len, false);
4401 }
4402 
4403 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4404 				 unsigned long start, unsigned long len)
4405 {
4406 	const int unit_size = folio_size(eb->folios[0]);
4407 	unsigned long cur = start;
4408 
4409 	if (eb->addr) {
4410 		memset(eb->addr + start, c, len);
4411 		return;
4412 	}
4413 
4414 	while (cur < start + len) {
4415 		unsigned long index = get_eb_folio_index(eb, cur);
4416 		unsigned int offset = get_eb_offset_in_folio(eb, cur);
4417 		unsigned int cur_len = min(start + len - cur, unit_size - offset);
4418 
4419 		assert_eb_folio_uptodate(eb, index);
4420 		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4421 
4422 		cur += cur_len;
4423 	}
4424 }
4425 
4426 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4427 			   unsigned long len)
4428 {
4429 	if (check_eb_range(eb, start, len))
4430 		return;
4431 	return memset_extent_buffer(eb, 0, start, len);
4432 }
4433 
4434 void copy_extent_buffer_full(const struct extent_buffer *dst,
4435 			     const struct extent_buffer *src)
4436 {
4437 	const int unit_size = folio_size(src->folios[0]);
4438 	unsigned long cur = 0;
4439 
4440 	ASSERT(dst->len == src->len);
4441 
4442 	while (cur < src->len) {
4443 		unsigned long index = get_eb_folio_index(src, cur);
4444 		unsigned long offset = get_eb_offset_in_folio(src, cur);
4445 		unsigned long cur_len = min(src->len, unit_size - offset);
4446 		void *addr = folio_address(src->folios[index]) + offset;
4447 
4448 		write_extent_buffer(dst, addr, cur, cur_len);
4449 
4450 		cur += cur_len;
4451 	}
4452 }
4453 
4454 void copy_extent_buffer(const struct extent_buffer *dst,
4455 			const struct extent_buffer *src,
4456 			unsigned long dst_offset, unsigned long src_offset,
4457 			unsigned long len)
4458 {
4459 	const int unit_size = folio_size(dst->folios[0]);
4460 	u64 dst_len = dst->len;
4461 	size_t cur;
4462 	size_t offset;
4463 	char *kaddr;
4464 	unsigned long i = get_eb_folio_index(dst, dst_offset);
4465 
4466 	if (check_eb_range(dst, dst_offset, len) ||
4467 	    check_eb_range(src, src_offset, len))
4468 		return;
4469 
4470 	WARN_ON(src->len != dst_len);
4471 
4472 	offset = get_eb_offset_in_folio(dst, dst_offset);
4473 
4474 	while (len > 0) {
4475 		assert_eb_folio_uptodate(dst, i);
4476 
4477 		cur = min(len, (unsigned long)(unit_size - offset));
4478 
4479 		kaddr = folio_address(dst->folios[i]);
4480 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4481 
4482 		src_offset += cur;
4483 		len -= cur;
4484 		offset = 0;
4485 		i++;
4486 	}
4487 }
4488 
4489 /*
4490  * Calculate the folio and offset of the byte containing the given bit number.
4491  *
4492  * @eb:           the extent buffer
4493  * @start:        offset of the bitmap item in the extent buffer
4494  * @nr:           bit number
4495  * @folio_index:  return index of the folio in the extent buffer that contains
4496  *                the given bit number
4497  * @folio_offset: return offset into the folio given by folio_index
4498  *
4499  * This helper hides the ugliness of finding the byte in an extent buffer which
4500  * contains a given bit.
4501  */
4502 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4503 				    unsigned long start, unsigned long nr,
4504 				    unsigned long *folio_index,
4505 				    size_t *folio_offset)
4506 {
4507 	size_t byte_offset = BIT_BYTE(nr);
4508 	size_t offset;
4509 
4510 	/*
4511 	 * The byte we want is the offset of the extent buffer + the offset of
4512 	 * the bitmap item in the extent buffer + the offset of the byte in the
4513 	 * bitmap item.
4514 	 */
4515 	offset = start + offset_in_folio(eb->folios[0], eb->start) + byte_offset;
4516 
4517 	*folio_index = offset >> folio_shift(eb->folios[0]);
4518 	*folio_offset = offset_in_folio(eb->folios[0], offset);
4519 }
4520 
4521 /*
4522  * Determine whether a bit in a bitmap item is set.
4523  *
4524  * @eb:     the extent buffer
4525  * @start:  offset of the bitmap item in the extent buffer
4526  * @nr:     bit number to test
4527  */
4528 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4529 			   unsigned long nr)
4530 {
4531 	unsigned long i;
4532 	size_t offset;
4533 	u8 *kaddr;
4534 
4535 	eb_bitmap_offset(eb, start, nr, &i, &offset);
4536 	assert_eb_folio_uptodate(eb, i);
4537 	kaddr = folio_address(eb->folios[i]);
4538 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4539 }
4540 
4541 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4542 {
4543 	unsigned long index = get_eb_folio_index(eb, bytenr);
4544 
4545 	if (check_eb_range(eb, bytenr, 1))
4546 		return NULL;
4547 	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4548 }
4549 
4550 /*
4551  * Set an area of a bitmap to 1.
4552  *
4553  * @eb:     the extent buffer
4554  * @start:  offset of the bitmap item in the extent buffer
4555  * @pos:    bit number of the first bit
4556  * @len:    number of bits to set
4557  */
4558 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4559 			      unsigned long pos, unsigned long len)
4560 {
4561 	unsigned int first_byte = start + BIT_BYTE(pos);
4562 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4563 	const bool same_byte = (first_byte == last_byte);
4564 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4565 	u8 *kaddr;
4566 
4567 	if (same_byte)
4568 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4569 
4570 	/* Handle the first byte. */
4571 	kaddr = extent_buffer_get_byte(eb, first_byte);
4572 	*kaddr |= mask;
4573 	if (same_byte)
4574 		return;
4575 
4576 	/* Handle the byte aligned part. */
4577 	ASSERT(first_byte + 1 <= last_byte);
4578 	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4579 
4580 	/* Handle the last byte. */
4581 	kaddr = extent_buffer_get_byte(eb, last_byte);
4582 	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4583 }
4584 
4585 
4586 /*
4587  * Clear an area of a bitmap.
4588  *
4589  * @eb:     the extent buffer
4590  * @start:  offset of the bitmap item in the extent buffer
4591  * @pos:    bit number of the first bit
4592  * @len:    number of bits to clear
4593  */
4594 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4595 				unsigned long start, unsigned long pos,
4596 				unsigned long len)
4597 {
4598 	unsigned int first_byte = start + BIT_BYTE(pos);
4599 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4600 	const bool same_byte = (first_byte == last_byte);
4601 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4602 	u8 *kaddr;
4603 
4604 	if (same_byte)
4605 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4606 
4607 	/* Handle the first byte. */
4608 	kaddr = extent_buffer_get_byte(eb, first_byte);
4609 	*kaddr &= ~mask;
4610 	if (same_byte)
4611 		return;
4612 
4613 	/* Handle the byte aligned part. */
4614 	ASSERT(first_byte + 1 <= last_byte);
4615 	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4616 
4617 	/* Handle the last byte. */
4618 	kaddr = extent_buffer_get_byte(eb, last_byte);
4619 	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4620 }
4621 
4622 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4623 {
4624 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4625 	return distance < len;
4626 }
4627 
4628 void memcpy_extent_buffer(const struct extent_buffer *dst,
4629 			  unsigned long dst_offset, unsigned long src_offset,
4630 			  unsigned long len)
4631 {
4632 	const int unit_size = folio_size(dst->folios[0]);
4633 	unsigned long cur_off = 0;
4634 
4635 	if (check_eb_range(dst, dst_offset, len) ||
4636 	    check_eb_range(dst, src_offset, len))
4637 		return;
4638 
4639 	if (dst->addr) {
4640 		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4641 
4642 		if (use_memmove)
4643 			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4644 		else
4645 			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4646 		return;
4647 	}
4648 
4649 	while (cur_off < len) {
4650 		unsigned long cur_src = cur_off + src_offset;
4651 		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4652 		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4653 		unsigned long cur_len = min(src_offset + len - cur_src,
4654 					    unit_size - folio_off);
4655 		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4656 		const bool use_memmove = areas_overlap(src_offset + cur_off,
4657 						       dst_offset + cur_off, cur_len);
4658 
4659 		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4660 				      use_memmove);
4661 		cur_off += cur_len;
4662 	}
4663 }
4664 
4665 void memmove_extent_buffer(const struct extent_buffer *dst,
4666 			   unsigned long dst_offset, unsigned long src_offset,
4667 			   unsigned long len)
4668 {
4669 	unsigned long dst_end = dst_offset + len - 1;
4670 	unsigned long src_end = src_offset + len - 1;
4671 
4672 	if (check_eb_range(dst, dst_offset, len) ||
4673 	    check_eb_range(dst, src_offset, len))
4674 		return;
4675 
4676 	if (dst_offset < src_offset) {
4677 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4678 		return;
4679 	}
4680 
4681 	if (dst->addr) {
4682 		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4683 		return;
4684 	}
4685 
4686 	while (len > 0) {
4687 		unsigned long src_i;
4688 		size_t cur;
4689 		size_t dst_off_in_folio;
4690 		size_t src_off_in_folio;
4691 		void *src_addr;
4692 		bool use_memmove;
4693 
4694 		src_i = get_eb_folio_index(dst, src_end);
4695 
4696 		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4697 		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4698 
4699 		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4700 		cur = min(cur, dst_off_in_folio + 1);
4701 
4702 		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4703 					 cur + 1;
4704 		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4705 					    cur);
4706 
4707 		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4708 				      use_memmove);
4709 
4710 		dst_end -= cur;
4711 		src_end -= cur;
4712 		len -= cur;
4713 	}
4714 }
4715 
4716 #define GANG_LOOKUP_SIZE	16
4717 static struct extent_buffer *get_next_extent_buffer(
4718 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4719 {
4720 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4721 	struct extent_buffer *found = NULL;
4722 	u64 page_start = page_offset(page);
4723 	u64 cur = page_start;
4724 
4725 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4726 	lockdep_assert_held(&fs_info->buffer_lock);
4727 
4728 	while (cur < page_start + PAGE_SIZE) {
4729 		int ret;
4730 		int i;
4731 
4732 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4733 				(void **)gang, cur >> fs_info->sectorsize_bits,
4734 				min_t(unsigned int, GANG_LOOKUP_SIZE,
4735 				      PAGE_SIZE / fs_info->nodesize));
4736 		if (ret == 0)
4737 			goto out;
4738 		for (i = 0; i < ret; i++) {
4739 			/* Already beyond page end */
4740 			if (gang[i]->start >= page_start + PAGE_SIZE)
4741 				goto out;
4742 			/* Found one */
4743 			if (gang[i]->start >= bytenr) {
4744 				found = gang[i];
4745 				goto out;
4746 			}
4747 		}
4748 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4749 	}
4750 out:
4751 	return found;
4752 }
4753 
4754 static int try_release_subpage_extent_buffer(struct page *page)
4755 {
4756 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4757 	u64 cur = page_offset(page);
4758 	const u64 end = page_offset(page) + PAGE_SIZE;
4759 	int ret;
4760 
4761 	while (cur < end) {
4762 		struct extent_buffer *eb = NULL;
4763 
4764 		/*
4765 		 * Unlike try_release_extent_buffer() which uses folio private
4766 		 * to grab buffer, for subpage case we rely on radix tree, thus
4767 		 * we need to ensure radix tree consistency.
4768 		 *
4769 		 * We also want an atomic snapshot of the radix tree, thus go
4770 		 * with spinlock rather than RCU.
4771 		 */
4772 		spin_lock(&fs_info->buffer_lock);
4773 		eb = get_next_extent_buffer(fs_info, page, cur);
4774 		if (!eb) {
4775 			/* No more eb in the page range after or at cur */
4776 			spin_unlock(&fs_info->buffer_lock);
4777 			break;
4778 		}
4779 		cur = eb->start + eb->len;
4780 
4781 		/*
4782 		 * The same as try_release_extent_buffer(), to ensure the eb
4783 		 * won't disappear out from under us.
4784 		 */
4785 		spin_lock(&eb->refs_lock);
4786 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4787 			spin_unlock(&eb->refs_lock);
4788 			spin_unlock(&fs_info->buffer_lock);
4789 			break;
4790 		}
4791 		spin_unlock(&fs_info->buffer_lock);
4792 
4793 		/*
4794 		 * If tree ref isn't set then we know the ref on this eb is a
4795 		 * real ref, so just return, this eb will likely be freed soon
4796 		 * anyway.
4797 		 */
4798 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4799 			spin_unlock(&eb->refs_lock);
4800 			break;
4801 		}
4802 
4803 		/*
4804 		 * Here we don't care about the return value, we will always
4805 		 * check the folio private at the end.  And
4806 		 * release_extent_buffer() will release the refs_lock.
4807 		 */
4808 		release_extent_buffer(eb);
4809 	}
4810 	/*
4811 	 * Finally to check if we have cleared folio private, as if we have
4812 	 * released all ebs in the page, the folio private should be cleared now.
4813 	 */
4814 	spin_lock(&page->mapping->i_private_lock);
4815 	if (!folio_test_private(page_folio(page)))
4816 		ret = 1;
4817 	else
4818 		ret = 0;
4819 	spin_unlock(&page->mapping->i_private_lock);
4820 	return ret;
4821 
4822 }
4823 
4824 int try_release_extent_buffer(struct page *page)
4825 {
4826 	struct folio *folio = page_folio(page);
4827 	struct extent_buffer *eb;
4828 
4829 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4830 		return try_release_subpage_extent_buffer(page);
4831 
4832 	/*
4833 	 * We need to make sure nobody is changing folio private, as we rely on
4834 	 * folio private as the pointer to extent buffer.
4835 	 */
4836 	spin_lock(&page->mapping->i_private_lock);
4837 	if (!folio_test_private(folio)) {
4838 		spin_unlock(&page->mapping->i_private_lock);
4839 		return 1;
4840 	}
4841 
4842 	eb = folio_get_private(folio);
4843 	BUG_ON(!eb);
4844 
4845 	/*
4846 	 * This is a little awful but should be ok, we need to make sure that
4847 	 * the eb doesn't disappear out from under us while we're looking at
4848 	 * this page.
4849 	 */
4850 	spin_lock(&eb->refs_lock);
4851 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4852 		spin_unlock(&eb->refs_lock);
4853 		spin_unlock(&page->mapping->i_private_lock);
4854 		return 0;
4855 	}
4856 	spin_unlock(&page->mapping->i_private_lock);
4857 
4858 	/*
4859 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4860 	 * so just return, this page will likely be freed soon anyway.
4861 	 */
4862 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4863 		spin_unlock(&eb->refs_lock);
4864 		return 0;
4865 	}
4866 
4867 	return release_extent_buffer(eb);
4868 }
4869 
4870 /*
4871  * Attempt to readahead a child block.
4872  *
4873  * @fs_info:	the fs_info
4874  * @bytenr:	bytenr to read
4875  * @owner_root: objectid of the root that owns this eb
4876  * @gen:	generation for the uptodate check, can be 0
4877  * @level:	level for the eb
4878  *
4879  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4880  * normal uptodate check of the eb, without checking the generation.  If we have
4881  * to read the block we will not block on anything.
4882  */
4883 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4884 				u64 bytenr, u64 owner_root, u64 gen, int level)
4885 {
4886 	struct btrfs_tree_parent_check check = {
4887 		.has_first_key = 0,
4888 		.level = level,
4889 		.transid = gen
4890 	};
4891 	struct extent_buffer *eb;
4892 	int ret;
4893 
4894 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4895 	if (IS_ERR(eb))
4896 		return;
4897 
4898 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4899 		free_extent_buffer(eb);
4900 		return;
4901 	}
4902 
4903 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4904 	if (ret < 0)
4905 		free_extent_buffer_stale(eb);
4906 	else
4907 		free_extent_buffer(eb);
4908 }
4909 
4910 /*
4911  * Readahead a node's child block.
4912  *
4913  * @node:	parent node we're reading from
4914  * @slot:	slot in the parent node for the child we want to read
4915  *
4916  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4917  * the slot in the node provided.
4918  */
4919 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4920 {
4921 	btrfs_readahead_tree_block(node->fs_info,
4922 				   btrfs_node_blockptr(node, slot),
4923 				   btrfs_header_owner(node),
4924 				   btrfs_node_ptr_generation(node, slot),
4925 				   btrfs_header_level(node) - 1);
4926 }
4927