xref: /linux/fs/btrfs/extent_io.c (revision e28c5efc31397af17bc5a7d55b963f59bcde0166)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "misc.h"
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
21 #include "ctree.h"
22 #include "btrfs_inode.h"
23 #include "bio.h"
24 #include "locking.h"
25 #include "rcu-string.h"
26 #include "backref.h"
27 #include "disk-io.h"
28 #include "subpage.h"
29 #include "zoned.h"
30 #include "block-group.h"
31 #include "compression.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "file-item.h"
35 #include "file.h"
36 #include "dev-replace.h"
37 #include "super.h"
38 #include "transaction.h"
39 
40 static struct kmem_cache *extent_buffer_cache;
41 
42 #ifdef CONFIG_BTRFS_DEBUG
43 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
44 {
45 	struct btrfs_fs_info *fs_info = eb->fs_info;
46 	unsigned long flags;
47 
48 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
49 	list_add(&eb->leak_list, &fs_info->allocated_ebs);
50 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
51 }
52 
53 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
54 {
55 	struct btrfs_fs_info *fs_info = eb->fs_info;
56 	unsigned long flags;
57 
58 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
59 	list_del(&eb->leak_list);
60 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
61 }
62 
63 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
64 {
65 	struct extent_buffer *eb;
66 	unsigned long flags;
67 
68 	/*
69 	 * If we didn't get into open_ctree our allocated_ebs will not be
70 	 * initialized, so just skip this.
71 	 */
72 	if (!fs_info->allocated_ebs.next)
73 		return;
74 
75 	WARN_ON(!list_empty(&fs_info->allocated_ebs));
76 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
77 	while (!list_empty(&fs_info->allocated_ebs)) {
78 		eb = list_first_entry(&fs_info->allocated_ebs,
79 				      struct extent_buffer, leak_list);
80 		pr_err(
81 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
82 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
83 		       btrfs_header_owner(eb));
84 		list_del(&eb->leak_list);
85 		kmem_cache_free(extent_buffer_cache, eb);
86 	}
87 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
88 }
89 #else
90 #define btrfs_leak_debug_add_eb(eb)			do {} while (0)
91 #define btrfs_leak_debug_del_eb(eb)			do {} while (0)
92 #endif
93 
94 /*
95  * Structure to record info about the bio being assembled, and other info like
96  * how many bytes are there before stripe/ordered extent boundary.
97  */
98 struct btrfs_bio_ctrl {
99 	struct btrfs_bio *bbio;
100 	enum btrfs_compression_type compress_type;
101 	u32 len_to_oe_boundary;
102 	blk_opf_t opf;
103 	btrfs_bio_end_io_t end_io_func;
104 	struct writeback_control *wbc;
105 };
106 
107 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
108 {
109 	struct btrfs_bio *bbio = bio_ctrl->bbio;
110 
111 	if (!bbio)
112 		return;
113 
114 	/* Caller should ensure the bio has at least some range added */
115 	ASSERT(bbio->bio.bi_iter.bi_size);
116 
117 	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
118 	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
119 		btrfs_submit_compressed_read(bbio);
120 	else
121 		btrfs_submit_bio(bbio, 0);
122 
123 	/* The bbio is owned by the end_io handler now */
124 	bio_ctrl->bbio = NULL;
125 }
126 
127 /*
128  * Submit or fail the current bio in the bio_ctrl structure.
129  */
130 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
131 {
132 	struct btrfs_bio *bbio = bio_ctrl->bbio;
133 
134 	if (!bbio)
135 		return;
136 
137 	if (ret) {
138 		ASSERT(ret < 0);
139 		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
140 		/* The bio is owned by the end_io handler now */
141 		bio_ctrl->bbio = NULL;
142 	} else {
143 		submit_one_bio(bio_ctrl);
144 	}
145 }
146 
147 int __init extent_buffer_init_cachep(void)
148 {
149 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
150 			sizeof(struct extent_buffer), 0,
151 			SLAB_MEM_SPREAD, NULL);
152 	if (!extent_buffer_cache)
153 		return -ENOMEM;
154 
155 	return 0;
156 }
157 
158 void __cold extent_buffer_free_cachep(void)
159 {
160 	/*
161 	 * Make sure all delayed rcu free are flushed before we
162 	 * destroy caches.
163 	 */
164 	rcu_barrier();
165 	kmem_cache_destroy(extent_buffer_cache);
166 }
167 
168 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
169 {
170 	unsigned long index = start >> PAGE_SHIFT;
171 	unsigned long end_index = end >> PAGE_SHIFT;
172 	struct page *page;
173 
174 	while (index <= end_index) {
175 		page = find_get_page(inode->i_mapping, index);
176 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
177 		clear_page_dirty_for_io(page);
178 		put_page(page);
179 		index++;
180 	}
181 }
182 
183 static void process_one_page(struct btrfs_fs_info *fs_info,
184 			     struct page *page, struct page *locked_page,
185 			     unsigned long page_ops, u64 start, u64 end)
186 {
187 	struct folio *folio = page_folio(page);
188 	u32 len;
189 
190 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
191 	len = end + 1 - start;
192 
193 	if (page_ops & PAGE_SET_ORDERED)
194 		btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
195 	if (page_ops & PAGE_START_WRITEBACK) {
196 		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
197 		btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
198 	}
199 	if (page_ops & PAGE_END_WRITEBACK)
200 		btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
201 
202 	if (page != locked_page && (page_ops & PAGE_UNLOCK))
203 		btrfs_folio_end_writer_lock(fs_info, folio, start, len);
204 }
205 
206 static void __process_pages_contig(struct address_space *mapping,
207 				   struct page *locked_page, u64 start, u64 end,
208 				   unsigned long page_ops)
209 {
210 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
211 	pgoff_t start_index = start >> PAGE_SHIFT;
212 	pgoff_t end_index = end >> PAGE_SHIFT;
213 	pgoff_t index = start_index;
214 	struct folio_batch fbatch;
215 	int i;
216 
217 	folio_batch_init(&fbatch);
218 	while (index <= end_index) {
219 		int found_folios;
220 
221 		found_folios = filemap_get_folios_contig(mapping, &index,
222 				end_index, &fbatch);
223 		for (i = 0; i < found_folios; i++) {
224 			struct folio *folio = fbatch.folios[i];
225 
226 			process_one_page(fs_info, &folio->page, locked_page,
227 					 page_ops, start, end);
228 		}
229 		folio_batch_release(&fbatch);
230 		cond_resched();
231 	}
232 }
233 
234 static noinline void __unlock_for_delalloc(struct inode *inode,
235 					   struct page *locked_page,
236 					   u64 start, u64 end)
237 {
238 	unsigned long index = start >> PAGE_SHIFT;
239 	unsigned long end_index = end >> PAGE_SHIFT;
240 
241 	ASSERT(locked_page);
242 	if (index == locked_page->index && end_index == index)
243 		return;
244 
245 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
246 			       PAGE_UNLOCK);
247 }
248 
249 static noinline int lock_delalloc_pages(struct inode *inode,
250 					struct page *locked_page,
251 					u64 start,
252 					u64 end)
253 {
254 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
255 	struct address_space *mapping = inode->i_mapping;
256 	pgoff_t start_index = start >> PAGE_SHIFT;
257 	pgoff_t end_index = end >> PAGE_SHIFT;
258 	pgoff_t index = start_index;
259 	u64 processed_end = start;
260 	struct folio_batch fbatch;
261 
262 	if (index == locked_page->index && index == end_index)
263 		return 0;
264 
265 	folio_batch_init(&fbatch);
266 	while (index <= end_index) {
267 		unsigned int found_folios, i;
268 
269 		found_folios = filemap_get_folios_contig(mapping, &index,
270 				end_index, &fbatch);
271 		if (found_folios == 0)
272 			goto out;
273 
274 		for (i = 0; i < found_folios; i++) {
275 			struct folio *folio = fbatch.folios[i];
276 			struct page *page = folio_page(folio, 0);
277 			u32 len = end + 1 - start;
278 
279 			if (page == locked_page)
280 				continue;
281 
282 			if (btrfs_folio_start_writer_lock(fs_info, folio, start,
283 							  len))
284 				goto out;
285 
286 			if (!PageDirty(page) || page->mapping != mapping) {
287 				btrfs_folio_end_writer_lock(fs_info, folio, start,
288 							    len);
289 				goto out;
290 			}
291 
292 			processed_end = page_offset(page) + PAGE_SIZE - 1;
293 		}
294 		folio_batch_release(&fbatch);
295 		cond_resched();
296 	}
297 
298 	return 0;
299 out:
300 	folio_batch_release(&fbatch);
301 	if (processed_end > start)
302 		__unlock_for_delalloc(inode, locked_page, start, processed_end);
303 	return -EAGAIN;
304 }
305 
306 /*
307  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
308  * more than @max_bytes.
309  *
310  * @start:	The original start bytenr to search.
311  *		Will store the extent range start bytenr.
312  * @end:	The original end bytenr of the search range
313  *		Will store the extent range end bytenr.
314  *
315  * Return true if we find a delalloc range which starts inside the original
316  * range, and @start/@end will store the delalloc range start/end.
317  *
318  * Return false if we can't find any delalloc range which starts inside the
319  * original range, and @start/@end will be the non-delalloc range start/end.
320  */
321 EXPORT_FOR_TESTS
322 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
323 				    struct page *locked_page, u64 *start,
324 				    u64 *end)
325 {
326 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
327 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
328 	const u64 orig_start = *start;
329 	const u64 orig_end = *end;
330 	/* The sanity tests may not set a valid fs_info. */
331 	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
332 	u64 delalloc_start;
333 	u64 delalloc_end;
334 	bool found;
335 	struct extent_state *cached_state = NULL;
336 	int ret;
337 	int loops = 0;
338 
339 	/* Caller should pass a valid @end to indicate the search range end */
340 	ASSERT(orig_end > orig_start);
341 
342 	/* The range should at least cover part of the page */
343 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
344 		 orig_end <= page_offset(locked_page)));
345 again:
346 	/* step one, find a bunch of delalloc bytes starting at start */
347 	delalloc_start = *start;
348 	delalloc_end = 0;
349 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
350 					  max_bytes, &cached_state);
351 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
352 		*start = delalloc_start;
353 
354 		/* @delalloc_end can be -1, never go beyond @orig_end */
355 		*end = min(delalloc_end, orig_end);
356 		free_extent_state(cached_state);
357 		return false;
358 	}
359 
360 	/*
361 	 * start comes from the offset of locked_page.  We have to lock
362 	 * pages in order, so we can't process delalloc bytes before
363 	 * locked_page
364 	 */
365 	if (delalloc_start < *start)
366 		delalloc_start = *start;
367 
368 	/*
369 	 * make sure to limit the number of pages we try to lock down
370 	 */
371 	if (delalloc_end + 1 - delalloc_start > max_bytes)
372 		delalloc_end = delalloc_start + max_bytes - 1;
373 
374 	/* step two, lock all the pages after the page that has start */
375 	ret = lock_delalloc_pages(inode, locked_page,
376 				  delalloc_start, delalloc_end);
377 	ASSERT(!ret || ret == -EAGAIN);
378 	if (ret == -EAGAIN) {
379 		/* some of the pages are gone, lets avoid looping by
380 		 * shortening the size of the delalloc range we're searching
381 		 */
382 		free_extent_state(cached_state);
383 		cached_state = NULL;
384 		if (!loops) {
385 			max_bytes = PAGE_SIZE;
386 			loops = 1;
387 			goto again;
388 		} else {
389 			found = false;
390 			goto out_failed;
391 		}
392 	}
393 
394 	/* step three, lock the state bits for the whole range */
395 	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
396 
397 	/* then test to make sure it is all still delalloc */
398 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
399 			     EXTENT_DELALLOC, cached_state);
400 	if (!ret) {
401 		unlock_extent(tree, delalloc_start, delalloc_end,
402 			      &cached_state);
403 		__unlock_for_delalloc(inode, locked_page,
404 			      delalloc_start, delalloc_end);
405 		cond_resched();
406 		goto again;
407 	}
408 	free_extent_state(cached_state);
409 	*start = delalloc_start;
410 	*end = delalloc_end;
411 out_failed:
412 	return found;
413 }
414 
415 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
416 				  struct page *locked_page,
417 				  u32 clear_bits, unsigned long page_ops)
418 {
419 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
420 
421 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
422 			       start, end, page_ops);
423 }
424 
425 static bool btrfs_verify_page(struct page *page, u64 start)
426 {
427 	if (!fsverity_active(page->mapping->host) ||
428 	    PageUptodate(page) ||
429 	    start >= i_size_read(page->mapping->host))
430 		return true;
431 	return fsverity_verify_page(page);
432 }
433 
434 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
435 {
436 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
437 	struct folio *folio = page_folio(page);
438 
439 	ASSERT(page_offset(page) <= start &&
440 	       start + len <= page_offset(page) + PAGE_SIZE);
441 
442 	if (uptodate && btrfs_verify_page(page, start))
443 		btrfs_folio_set_uptodate(fs_info, folio, start, len);
444 	else
445 		btrfs_folio_clear_uptodate(fs_info, folio, start, len);
446 
447 	if (!btrfs_is_subpage(fs_info, page->mapping))
448 		unlock_page(page);
449 	else
450 		btrfs_subpage_end_reader(fs_info, folio, start, len);
451 }
452 
453 /*
454  * After a write IO is done, we need to:
455  *
456  * - clear the uptodate bits on error
457  * - clear the writeback bits in the extent tree for the range
458  * - filio_end_writeback()  if there is no more pending io for the folio
459  *
460  * Scheduling is not allowed, so the extent state tree is expected
461  * to have one and only one object corresponding to this IO.
462  */
463 static void end_bbio_data_write(struct btrfs_bio *bbio)
464 {
465 	struct bio *bio = &bbio->bio;
466 	int error = blk_status_to_errno(bio->bi_status);
467 	struct folio_iter fi;
468 
469 	ASSERT(!bio_flagged(bio, BIO_CLONED));
470 	bio_for_each_folio_all(fi, bio) {
471 		struct folio *folio = fi.folio;
472 		struct inode *inode = folio->mapping->host;
473 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
474 		const u32 sectorsize = fs_info->sectorsize;
475 		u64 start = folio_pos(folio) + fi.offset;
476 		u32 len = fi.length;
477 
478 		/* Only order 0 (single page) folios are allowed for data. */
479 		ASSERT(folio_order(folio) == 0);
480 
481 		/* Our read/write should always be sector aligned. */
482 		if (!IS_ALIGNED(fi.offset, sectorsize))
483 			btrfs_err(fs_info,
484 		"partial page write in btrfs with offset %zu and length %zu",
485 				  fi.offset, fi.length);
486 		else if (!IS_ALIGNED(fi.length, sectorsize))
487 			btrfs_info(fs_info,
488 		"incomplete page write with offset %zu and length %zu",
489 				   fi.offset, fi.length);
490 
491 		btrfs_finish_ordered_extent(bbio->ordered,
492 				folio_page(folio, 0), start, len, !error);
493 		if (error)
494 			mapping_set_error(folio->mapping, error);
495 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
496 	}
497 
498 	bio_put(bio);
499 }
500 
501 /*
502  * Record previously processed extent range
503  *
504  * For endio_readpage_release_extent() to handle a full extent range, reducing
505  * the extent io operations.
506  */
507 struct processed_extent {
508 	struct btrfs_inode *inode;
509 	/* Start of the range in @inode */
510 	u64 start;
511 	/* End of the range in @inode */
512 	u64 end;
513 	bool uptodate;
514 };
515 
516 /*
517  * Try to release processed extent range
518  *
519  * May not release the extent range right now if the current range is
520  * contiguous to processed extent.
521  *
522  * Will release processed extent when any of @inode, @uptodate, the range is
523  * no longer contiguous to the processed range.
524  *
525  * Passing @inode == NULL will force processed extent to be released.
526  */
527 static void endio_readpage_release_extent(struct processed_extent *processed,
528 			      struct btrfs_inode *inode, u64 start, u64 end,
529 			      bool uptodate)
530 {
531 	struct extent_state *cached = NULL;
532 	struct extent_io_tree *tree;
533 
534 	/* The first extent, initialize @processed */
535 	if (!processed->inode)
536 		goto update;
537 
538 	/*
539 	 * Contiguous to processed extent, just uptodate the end.
540 	 *
541 	 * Several things to notice:
542 	 *
543 	 * - bio can be merged as long as on-disk bytenr is contiguous
544 	 *   This means we can have page belonging to other inodes, thus need to
545 	 *   check if the inode still matches.
546 	 * - bvec can contain range beyond current page for multi-page bvec
547 	 *   Thus we need to do processed->end + 1 >= start check
548 	 */
549 	if (processed->inode == inode && processed->uptodate == uptodate &&
550 	    processed->end + 1 >= start && end >= processed->end) {
551 		processed->end = end;
552 		return;
553 	}
554 
555 	tree = &processed->inode->io_tree;
556 	/*
557 	 * Now we don't have range contiguous to the processed range, release
558 	 * the processed range now.
559 	 */
560 	unlock_extent(tree, processed->start, processed->end, &cached);
561 
562 update:
563 	/* Update processed to current range */
564 	processed->inode = inode;
565 	processed->start = start;
566 	processed->end = end;
567 	processed->uptodate = uptodate;
568 }
569 
570 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
571 {
572 	struct folio *folio = page_folio(page);
573 
574 	ASSERT(folio_test_locked(folio));
575 	if (!btrfs_is_subpage(fs_info, folio->mapping))
576 		return;
577 
578 	ASSERT(folio_test_private(folio));
579 	btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
580 }
581 
582 /*
583  * After a data read IO is done, we need to:
584  *
585  * - clear the uptodate bits on error
586  * - set the uptodate bits if things worked
587  * - set the folio up to date if all extents in the tree are uptodate
588  * - clear the lock bit in the extent tree
589  * - unlock the folio if there are no other extents locked for it
590  *
591  * Scheduling is not allowed, so the extent state tree is expected
592  * to have one and only one object corresponding to this IO.
593  */
594 static void end_bbio_data_read(struct btrfs_bio *bbio)
595 {
596 	struct bio *bio = &bbio->bio;
597 	struct processed_extent processed = { 0 };
598 	struct folio_iter fi;
599 	/*
600 	 * The offset to the beginning of a bio, since one bio can never be
601 	 * larger than UINT_MAX, u32 here is enough.
602 	 */
603 	u32 bio_offset = 0;
604 
605 	ASSERT(!bio_flagged(bio, BIO_CLONED));
606 	bio_for_each_folio_all(fi, &bbio->bio) {
607 		bool uptodate = !bio->bi_status;
608 		struct folio *folio = fi.folio;
609 		struct inode *inode = folio->mapping->host;
610 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
611 		const u32 sectorsize = fs_info->sectorsize;
612 		u64 start;
613 		u64 end;
614 		u32 len;
615 
616 		/* For now only order 0 folios are supported for data. */
617 		ASSERT(folio_order(folio) == 0);
618 		btrfs_debug(fs_info,
619 			"%s: bi_sector=%llu, err=%d, mirror=%u",
620 			__func__, bio->bi_iter.bi_sector, bio->bi_status,
621 			bbio->mirror_num);
622 
623 		/*
624 		 * We always issue full-sector reads, but if some block in a
625 		 * folio fails to read, blk_update_request() will advance
626 		 * bv_offset and adjust bv_len to compensate.  Print a warning
627 		 * for unaligned offsets, and an error if they don't add up to
628 		 * a full sector.
629 		 */
630 		if (!IS_ALIGNED(fi.offset, sectorsize))
631 			btrfs_err(fs_info,
632 		"partial page read in btrfs with offset %zu and length %zu",
633 				  fi.offset, fi.length);
634 		else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
635 			btrfs_info(fs_info,
636 		"incomplete page read with offset %zu and length %zu",
637 				   fi.offset, fi.length);
638 
639 		start = folio_pos(folio) + fi.offset;
640 		end = start + fi.length - 1;
641 		len = fi.length;
642 
643 		if (likely(uptodate)) {
644 			loff_t i_size = i_size_read(inode);
645 			pgoff_t end_index = i_size >> folio_shift(folio);
646 
647 			/*
648 			 * Zero out the remaining part if this range straddles
649 			 * i_size.
650 			 *
651 			 * Here we should only zero the range inside the folio,
652 			 * not touch anything else.
653 			 *
654 			 * NOTE: i_size is exclusive while end is inclusive.
655 			 */
656 			if (folio_index(folio) == end_index && i_size <= end) {
657 				u32 zero_start = max(offset_in_folio(folio, i_size),
658 						     offset_in_folio(folio, start));
659 				u32 zero_len = offset_in_folio(folio, end) + 1 -
660 					       zero_start;
661 
662 				folio_zero_range(folio, zero_start, zero_len);
663 			}
664 		}
665 
666 		/* Update page status and unlock. */
667 		end_page_read(folio_page(folio, 0), uptodate, start, len);
668 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
669 					      start, end, uptodate);
670 
671 		ASSERT(bio_offset + len > bio_offset);
672 		bio_offset += len;
673 
674 	}
675 	/* Release the last extent */
676 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
677 	bio_put(bio);
678 }
679 
680 /*
681  * Populate every free slot in a provided array with pages.
682  *
683  * @nr_pages:   number of pages to allocate
684  * @page_array: the array to fill with pages; any existing non-null entries in
685  * 		the array will be skipped
686  * @extra_gfp:	the extra GFP flags for the allocation.
687  *
688  * Return: 0        if all pages were able to be allocated;
689  *         -ENOMEM  otherwise, the partially allocated pages would be freed and
690  *                  the array slots zeroed
691  */
692 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
693 			   gfp_t extra_gfp)
694 {
695 	unsigned int allocated;
696 
697 	for (allocated = 0; allocated < nr_pages;) {
698 		unsigned int last = allocated;
699 
700 		allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
701 						   nr_pages, page_array);
702 
703 		if (allocated == nr_pages)
704 			return 0;
705 
706 		/*
707 		 * During this iteration, no page could be allocated, even
708 		 * though alloc_pages_bulk_array() falls back to alloc_page()
709 		 * if  it could not bulk-allocate. So we must be out of memory.
710 		 */
711 		if (allocated == last) {
712 			for (int i = 0; i < allocated; i++) {
713 				__free_page(page_array[i]);
714 				page_array[i] = NULL;
715 			}
716 			return -ENOMEM;
717 		}
718 
719 		memalloc_retry_wait(GFP_NOFS);
720 	}
721 	return 0;
722 }
723 
724 /*
725  * Populate needed folios for the extent buffer.
726  *
727  * For now, the folios populated are always in order 0 (aka, single page).
728  */
729 static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
730 {
731 	struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
732 	int num_pages = num_extent_pages(eb);
733 	int ret;
734 
735 	ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
736 	if (ret < 0)
737 		return ret;
738 
739 	for (int i = 0; i < num_pages; i++)
740 		eb->folios[i] = page_folio(page_array[i]);
741 	return 0;
742 }
743 
744 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
745 				struct page *page, u64 disk_bytenr,
746 				unsigned int pg_offset)
747 {
748 	struct bio *bio = &bio_ctrl->bbio->bio;
749 	struct bio_vec *bvec = bio_last_bvec_all(bio);
750 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
751 
752 	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
753 		/*
754 		 * For compression, all IO should have its logical bytenr set
755 		 * to the starting bytenr of the compressed extent.
756 		 */
757 		return bio->bi_iter.bi_sector == sector;
758 	}
759 
760 	/*
761 	 * The contig check requires the following conditions to be met:
762 	 *
763 	 * 1) The pages are belonging to the same inode
764 	 *    This is implied by the call chain.
765 	 *
766 	 * 2) The range has adjacent logical bytenr
767 	 *
768 	 * 3) The range has adjacent file offset
769 	 *    This is required for the usage of btrfs_bio->file_offset.
770 	 */
771 	return bio_end_sector(bio) == sector &&
772 		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
773 		page_offset(page) + pg_offset;
774 }
775 
776 static void alloc_new_bio(struct btrfs_inode *inode,
777 			  struct btrfs_bio_ctrl *bio_ctrl,
778 			  u64 disk_bytenr, u64 file_offset)
779 {
780 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
781 	struct btrfs_bio *bbio;
782 
783 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
784 			       bio_ctrl->end_io_func, NULL);
785 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
786 	bbio->inode = inode;
787 	bbio->file_offset = file_offset;
788 	bio_ctrl->bbio = bbio;
789 	bio_ctrl->len_to_oe_boundary = U32_MAX;
790 
791 	/* Limit data write bios to the ordered boundary. */
792 	if (bio_ctrl->wbc) {
793 		struct btrfs_ordered_extent *ordered;
794 
795 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
796 		if (ordered) {
797 			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
798 					ordered->file_offset +
799 					ordered->disk_num_bytes - file_offset);
800 			bbio->ordered = ordered;
801 		}
802 
803 		/*
804 		 * Pick the last added device to support cgroup writeback.  For
805 		 * multi-device file systems this means blk-cgroup policies have
806 		 * to always be set on the last added/replaced device.
807 		 * This is a bit odd but has been like that for a long time.
808 		 */
809 		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
810 		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
811 	}
812 }
813 
814 /*
815  * @disk_bytenr: logical bytenr where the write will be
816  * @page:	page to add to the bio
817  * @size:	portion of page that we want to write to
818  * @pg_offset:	offset of the new bio or to check whether we are adding
819  *              a contiguous page to the previous one
820  *
821  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
822  * new one in @bio_ctrl->bbio.
823  * The mirror number for this IO should already be initizlied in
824  * @bio_ctrl->mirror_num.
825  */
826 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
827 			       u64 disk_bytenr, struct page *page,
828 			       size_t size, unsigned long pg_offset)
829 {
830 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
831 
832 	ASSERT(pg_offset + size <= PAGE_SIZE);
833 	ASSERT(bio_ctrl->end_io_func);
834 
835 	if (bio_ctrl->bbio &&
836 	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
837 		submit_one_bio(bio_ctrl);
838 
839 	do {
840 		u32 len = size;
841 
842 		/* Allocate new bio if needed */
843 		if (!bio_ctrl->bbio) {
844 			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
845 				      page_offset(page) + pg_offset);
846 		}
847 
848 		/* Cap to the current ordered extent boundary if there is one. */
849 		if (len > bio_ctrl->len_to_oe_boundary) {
850 			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
851 			ASSERT(is_data_inode(&inode->vfs_inode));
852 			len = bio_ctrl->len_to_oe_boundary;
853 		}
854 
855 		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
856 			/* bio full: move on to a new one */
857 			submit_one_bio(bio_ctrl);
858 			continue;
859 		}
860 
861 		if (bio_ctrl->wbc)
862 			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
863 
864 		size -= len;
865 		pg_offset += len;
866 		disk_bytenr += len;
867 
868 		/*
869 		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
870 		 * sector aligned.  alloc_new_bio() then sets it to the end of
871 		 * our ordered extent for writes into zoned devices.
872 		 *
873 		 * When len_to_oe_boundary is tracking an ordered extent, we
874 		 * trust the ordered extent code to align things properly, and
875 		 * the check above to cap our write to the ordered extent
876 		 * boundary is correct.
877 		 *
878 		 * When len_to_oe_boundary is U32_MAX, the cap above would
879 		 * result in a 4095 byte IO for the last page right before
880 		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
881 		 * the checks required to make sure we don't overflow the bio,
882 		 * and we should just ignore len_to_oe_boundary completely
883 		 * unless we're using it to track an ordered extent.
884 		 *
885 		 * It's pretty hard to make a bio sized U32_MAX, but it can
886 		 * happen when the page cache is able to feed us contiguous
887 		 * pages for large extents.
888 		 */
889 		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
890 			bio_ctrl->len_to_oe_boundary -= len;
891 
892 		/* Ordered extent boundary: move on to a new bio. */
893 		if (bio_ctrl->len_to_oe_boundary == 0)
894 			submit_one_bio(bio_ctrl);
895 	} while (size);
896 }
897 
898 static int attach_extent_buffer_folio(struct extent_buffer *eb,
899 				      struct folio *folio,
900 				      struct btrfs_subpage *prealloc)
901 {
902 	struct btrfs_fs_info *fs_info = eb->fs_info;
903 	int ret = 0;
904 
905 	/*
906 	 * If the page is mapped to btree inode, we should hold the private
907 	 * lock to prevent race.
908 	 * For cloned or dummy extent buffers, their pages are not mapped and
909 	 * will not race with any other ebs.
910 	 */
911 	if (folio->mapping)
912 		lockdep_assert_held(&folio->mapping->i_private_lock);
913 
914 	if (fs_info->nodesize >= PAGE_SIZE) {
915 		if (!folio_test_private(folio))
916 			folio_attach_private(folio, eb);
917 		else
918 			WARN_ON(folio_get_private(folio) != eb);
919 		return 0;
920 	}
921 
922 	/* Already mapped, just free prealloc */
923 	if (folio_test_private(folio)) {
924 		btrfs_free_subpage(prealloc);
925 		return 0;
926 	}
927 
928 	if (prealloc)
929 		/* Has preallocated memory for subpage */
930 		folio_attach_private(folio, prealloc);
931 	else
932 		/* Do new allocation to attach subpage */
933 		ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
934 	return ret;
935 }
936 
937 int set_page_extent_mapped(struct page *page)
938 {
939 	struct folio *folio = page_folio(page);
940 	struct btrfs_fs_info *fs_info;
941 
942 	ASSERT(page->mapping);
943 
944 	if (folio_test_private(folio))
945 		return 0;
946 
947 	fs_info = btrfs_sb(page->mapping->host->i_sb);
948 
949 	if (btrfs_is_subpage(fs_info, page->mapping))
950 		return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
951 
952 	folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
953 	return 0;
954 }
955 
956 void clear_page_extent_mapped(struct page *page)
957 {
958 	struct folio *folio = page_folio(page);
959 	struct btrfs_fs_info *fs_info;
960 
961 	ASSERT(page->mapping);
962 
963 	if (!folio_test_private(folio))
964 		return;
965 
966 	fs_info = btrfs_sb(page->mapping->host->i_sb);
967 	if (btrfs_is_subpage(fs_info, page->mapping))
968 		return btrfs_detach_subpage(fs_info, folio);
969 
970 	folio_detach_private(folio);
971 }
972 
973 static struct extent_map *
974 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
975 		 u64 start, u64 len, struct extent_map **em_cached)
976 {
977 	struct extent_map *em;
978 
979 	if (em_cached && *em_cached) {
980 		em = *em_cached;
981 		if (extent_map_in_tree(em) && start >= em->start &&
982 		    start < extent_map_end(em)) {
983 			refcount_inc(&em->refs);
984 			return em;
985 		}
986 
987 		free_extent_map(em);
988 		*em_cached = NULL;
989 	}
990 
991 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
992 	if (em_cached && !IS_ERR(em)) {
993 		BUG_ON(*em_cached);
994 		refcount_inc(&em->refs);
995 		*em_cached = em;
996 	}
997 	return em;
998 }
999 /*
1000  * basic readpage implementation.  Locked extent state structs are inserted
1001  * into the tree that are removed when the IO is done (by the end_io
1002  * handlers)
1003  * XXX JDM: This needs looking at to ensure proper page locking
1004  * return 0 on success, otherwise return error
1005  */
1006 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1007 		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
1008 {
1009 	struct inode *inode = page->mapping->host;
1010 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1011 	u64 start = page_offset(page);
1012 	const u64 end = start + PAGE_SIZE - 1;
1013 	u64 cur = start;
1014 	u64 extent_offset;
1015 	u64 last_byte = i_size_read(inode);
1016 	u64 block_start;
1017 	struct extent_map *em;
1018 	int ret = 0;
1019 	size_t pg_offset = 0;
1020 	size_t iosize;
1021 	size_t blocksize = inode->i_sb->s_blocksize;
1022 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1023 
1024 	ret = set_page_extent_mapped(page);
1025 	if (ret < 0) {
1026 		unlock_extent(tree, start, end, NULL);
1027 		unlock_page(page);
1028 		return ret;
1029 	}
1030 
1031 	if (page->index == last_byte >> PAGE_SHIFT) {
1032 		size_t zero_offset = offset_in_page(last_byte);
1033 
1034 		if (zero_offset) {
1035 			iosize = PAGE_SIZE - zero_offset;
1036 			memzero_page(page, zero_offset, iosize);
1037 		}
1038 	}
1039 	bio_ctrl->end_io_func = end_bbio_data_read;
1040 	begin_page_read(fs_info, page);
1041 	while (cur <= end) {
1042 		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1043 		bool force_bio_submit = false;
1044 		u64 disk_bytenr;
1045 
1046 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1047 		if (cur >= last_byte) {
1048 			iosize = PAGE_SIZE - pg_offset;
1049 			memzero_page(page, pg_offset, iosize);
1050 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1051 			end_page_read(page, true, cur, iosize);
1052 			break;
1053 		}
1054 		em = __get_extent_map(inode, page, pg_offset, cur,
1055 				      end - cur + 1, em_cached);
1056 		if (IS_ERR(em)) {
1057 			unlock_extent(tree, cur, end, NULL);
1058 			end_page_read(page, false, cur, end + 1 - cur);
1059 			return PTR_ERR(em);
1060 		}
1061 		extent_offset = cur - em->start;
1062 		BUG_ON(extent_map_end(em) <= cur);
1063 		BUG_ON(end < cur);
1064 
1065 		compress_type = extent_map_compression(em);
1066 
1067 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1068 		iosize = ALIGN(iosize, blocksize);
1069 		if (compress_type != BTRFS_COMPRESS_NONE)
1070 			disk_bytenr = em->block_start;
1071 		else
1072 			disk_bytenr = em->block_start + extent_offset;
1073 		block_start = em->block_start;
1074 		if (em->flags & EXTENT_FLAG_PREALLOC)
1075 			block_start = EXTENT_MAP_HOLE;
1076 
1077 		/*
1078 		 * If we have a file range that points to a compressed extent
1079 		 * and it's followed by a consecutive file range that points
1080 		 * to the same compressed extent (possibly with a different
1081 		 * offset and/or length, so it either points to the whole extent
1082 		 * or only part of it), we must make sure we do not submit a
1083 		 * single bio to populate the pages for the 2 ranges because
1084 		 * this makes the compressed extent read zero out the pages
1085 		 * belonging to the 2nd range. Imagine the following scenario:
1086 		 *
1087 		 *  File layout
1088 		 *  [0 - 8K]                     [8K - 24K]
1089 		 *    |                               |
1090 		 *    |                               |
1091 		 * points to extent X,         points to extent X,
1092 		 * offset 4K, length of 8K     offset 0, length 16K
1093 		 *
1094 		 * [extent X, compressed length = 4K uncompressed length = 16K]
1095 		 *
1096 		 * If the bio to read the compressed extent covers both ranges,
1097 		 * it will decompress extent X into the pages belonging to the
1098 		 * first range and then it will stop, zeroing out the remaining
1099 		 * pages that belong to the other range that points to extent X.
1100 		 * So here we make sure we submit 2 bios, one for the first
1101 		 * range and another one for the third range. Both will target
1102 		 * the same physical extent from disk, but we can't currently
1103 		 * make the compressed bio endio callback populate the pages
1104 		 * for both ranges because each compressed bio is tightly
1105 		 * coupled with a single extent map, and each range can have
1106 		 * an extent map with a different offset value relative to the
1107 		 * uncompressed data of our extent and different lengths. This
1108 		 * is a corner case so we prioritize correctness over
1109 		 * non-optimal behavior (submitting 2 bios for the same extent).
1110 		 */
1111 		if (compress_type != BTRFS_COMPRESS_NONE &&
1112 		    prev_em_start && *prev_em_start != (u64)-1 &&
1113 		    *prev_em_start != em->start)
1114 			force_bio_submit = true;
1115 
1116 		if (prev_em_start)
1117 			*prev_em_start = em->start;
1118 
1119 		free_extent_map(em);
1120 		em = NULL;
1121 
1122 		/* we've found a hole, just zero and go on */
1123 		if (block_start == EXTENT_MAP_HOLE) {
1124 			memzero_page(page, pg_offset, iosize);
1125 
1126 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1127 			end_page_read(page, true, cur, iosize);
1128 			cur = cur + iosize;
1129 			pg_offset += iosize;
1130 			continue;
1131 		}
1132 		/* the get_extent function already copied into the page */
1133 		if (block_start == EXTENT_MAP_INLINE) {
1134 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1135 			end_page_read(page, true, cur, iosize);
1136 			cur = cur + iosize;
1137 			pg_offset += iosize;
1138 			continue;
1139 		}
1140 
1141 		if (bio_ctrl->compress_type != compress_type) {
1142 			submit_one_bio(bio_ctrl);
1143 			bio_ctrl->compress_type = compress_type;
1144 		}
1145 
1146 		if (force_bio_submit)
1147 			submit_one_bio(bio_ctrl);
1148 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1149 				   pg_offset);
1150 		cur = cur + iosize;
1151 		pg_offset += iosize;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 int btrfs_read_folio(struct file *file, struct folio *folio)
1158 {
1159 	struct page *page = &folio->page;
1160 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1161 	u64 start = page_offset(page);
1162 	u64 end = start + PAGE_SIZE - 1;
1163 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1164 	int ret;
1165 
1166 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1167 
1168 	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1169 	/*
1170 	 * If btrfs_do_readpage() failed we will want to submit the assembled
1171 	 * bio to do the cleanup.
1172 	 */
1173 	submit_one_bio(&bio_ctrl);
1174 	return ret;
1175 }
1176 
1177 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1178 					u64 start, u64 end,
1179 					struct extent_map **em_cached,
1180 					struct btrfs_bio_ctrl *bio_ctrl,
1181 					u64 *prev_em_start)
1182 {
1183 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1184 	int index;
1185 
1186 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1187 
1188 	for (index = 0; index < nr_pages; index++) {
1189 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1190 				  prev_em_start);
1191 		put_page(pages[index]);
1192 	}
1193 }
1194 
1195 /*
1196  * helper for __extent_writepage, doing all of the delayed allocation setup.
1197  *
1198  * This returns 1 if btrfs_run_delalloc_range function did all the work required
1199  * to write the page (copy into inline extent).  In this case the IO has
1200  * been started and the page is already unlocked.
1201  *
1202  * This returns 0 if all went well (page still locked)
1203  * This returns < 0 if there were errors (page still locked)
1204  */
1205 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1206 		struct page *page, struct writeback_control *wbc)
1207 {
1208 	const u64 page_start = page_offset(page);
1209 	const u64 page_end = page_start + PAGE_SIZE - 1;
1210 	u64 delalloc_start = page_start;
1211 	u64 delalloc_end = page_end;
1212 	u64 delalloc_to_write = 0;
1213 	int ret = 0;
1214 
1215 	while (delalloc_start < page_end) {
1216 		delalloc_end = page_end;
1217 		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1218 					      &delalloc_start, &delalloc_end)) {
1219 			delalloc_start = delalloc_end + 1;
1220 			continue;
1221 		}
1222 
1223 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1224 					       delalloc_end, wbc);
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		delalloc_start = delalloc_end + 1;
1229 	}
1230 
1231 	/*
1232 	 * delalloc_end is already one less than the total length, so
1233 	 * we don't subtract one from PAGE_SIZE
1234 	 */
1235 	delalloc_to_write +=
1236 		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1237 
1238 	/*
1239 	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1240 	 * the pages, we just need to account for them here.
1241 	 */
1242 	if (ret == 1) {
1243 		wbc->nr_to_write -= delalloc_to_write;
1244 		return 1;
1245 	}
1246 
1247 	if (wbc->nr_to_write < delalloc_to_write) {
1248 		int thresh = 8192;
1249 
1250 		if (delalloc_to_write < thresh * 2)
1251 			thresh = delalloc_to_write;
1252 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1253 					 thresh);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 /*
1260  * Find the first byte we need to write.
1261  *
1262  * For subpage, one page can contain several sectors, and
1263  * __extent_writepage_io() will just grab all extent maps in the page
1264  * range and try to submit all non-inline/non-compressed extents.
1265  *
1266  * This is a big problem for subpage, we shouldn't re-submit already written
1267  * data at all.
1268  * This function will lookup subpage dirty bit to find which range we really
1269  * need to submit.
1270  *
1271  * Return the next dirty range in [@start, @end).
1272  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1273  */
1274 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1275 				 struct page *page, u64 *start, u64 *end)
1276 {
1277 	struct folio *folio = page_folio(page);
1278 	struct btrfs_subpage *subpage = folio_get_private(folio);
1279 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1280 	u64 orig_start = *start;
1281 	/* Declare as unsigned long so we can use bitmap ops */
1282 	unsigned long flags;
1283 	int range_start_bit;
1284 	int range_end_bit;
1285 
1286 	/*
1287 	 * For regular sector size == page size case, since one page only
1288 	 * contains one sector, we return the page offset directly.
1289 	 */
1290 	if (!btrfs_is_subpage(fs_info, page->mapping)) {
1291 		*start = page_offset(page);
1292 		*end = page_offset(page) + PAGE_SIZE;
1293 		return;
1294 	}
1295 
1296 	range_start_bit = spi->dirty_offset +
1297 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1298 
1299 	/* We should have the page locked, but just in case */
1300 	spin_lock_irqsave(&subpage->lock, flags);
1301 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1302 			       spi->dirty_offset + spi->bitmap_nr_bits);
1303 	spin_unlock_irqrestore(&subpage->lock, flags);
1304 
1305 	range_start_bit -= spi->dirty_offset;
1306 	range_end_bit -= spi->dirty_offset;
1307 
1308 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1309 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1310 }
1311 
1312 /*
1313  * helper for __extent_writepage.  This calls the writepage start hooks,
1314  * and does the loop to map the page into extents and bios.
1315  *
1316  * We return 1 if the IO is started and the page is unlocked,
1317  * 0 if all went well (page still locked)
1318  * < 0 if there were errors (page still locked)
1319  */
1320 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1321 				 struct page *page,
1322 				 struct btrfs_bio_ctrl *bio_ctrl,
1323 				 loff_t i_size,
1324 				 int *nr_ret)
1325 {
1326 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1327 	u64 cur = page_offset(page);
1328 	u64 end = cur + PAGE_SIZE - 1;
1329 	u64 extent_offset;
1330 	u64 block_start;
1331 	struct extent_map *em;
1332 	int ret = 0;
1333 	int nr = 0;
1334 
1335 	ret = btrfs_writepage_cow_fixup(page);
1336 	if (ret) {
1337 		/* Fixup worker will requeue */
1338 		redirty_page_for_writepage(bio_ctrl->wbc, page);
1339 		unlock_page(page);
1340 		return 1;
1341 	}
1342 
1343 	bio_ctrl->end_io_func = end_bbio_data_write;
1344 	while (cur <= end) {
1345 		u32 len = end - cur + 1;
1346 		u64 disk_bytenr;
1347 		u64 em_end;
1348 		u64 dirty_range_start = cur;
1349 		u64 dirty_range_end;
1350 		u32 iosize;
1351 
1352 		if (cur >= i_size) {
1353 			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1354 						       true);
1355 			/*
1356 			 * This range is beyond i_size, thus we don't need to
1357 			 * bother writing back.
1358 			 * But we still need to clear the dirty subpage bit, or
1359 			 * the next time the page gets dirtied, we will try to
1360 			 * writeback the sectors with subpage dirty bits,
1361 			 * causing writeback without ordered extent.
1362 			 */
1363 			btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1364 			break;
1365 		}
1366 
1367 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1368 				     &dirty_range_end);
1369 		if (cur < dirty_range_start) {
1370 			cur = dirty_range_start;
1371 			continue;
1372 		}
1373 
1374 		em = btrfs_get_extent(inode, NULL, 0, cur, len);
1375 		if (IS_ERR(em)) {
1376 			ret = PTR_ERR_OR_ZERO(em);
1377 			goto out_error;
1378 		}
1379 
1380 		extent_offset = cur - em->start;
1381 		em_end = extent_map_end(em);
1382 		ASSERT(cur <= em_end);
1383 		ASSERT(cur < end);
1384 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1385 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1386 
1387 		block_start = em->block_start;
1388 		disk_bytenr = em->block_start + extent_offset;
1389 
1390 		ASSERT(!extent_map_is_compressed(em));
1391 		ASSERT(block_start != EXTENT_MAP_HOLE);
1392 		ASSERT(block_start != EXTENT_MAP_INLINE);
1393 
1394 		/*
1395 		 * Note that em_end from extent_map_end() and dirty_range_end from
1396 		 * find_next_dirty_byte() are all exclusive
1397 		 */
1398 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1399 		free_extent_map(em);
1400 		em = NULL;
1401 
1402 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1403 		if (!PageWriteback(page)) {
1404 			btrfs_err(inode->root->fs_info,
1405 				   "page %lu not writeback, cur %llu end %llu",
1406 			       page->index, cur, end);
1407 		}
1408 
1409 		/*
1410 		 * Although the PageDirty bit is cleared before entering this
1411 		 * function, subpage dirty bit is not cleared.
1412 		 * So clear subpage dirty bit here so next time we won't submit
1413 		 * page for range already written to disk.
1414 		 */
1415 		btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1416 
1417 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1418 				   cur - page_offset(page));
1419 		cur += iosize;
1420 		nr++;
1421 	}
1422 
1423 	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1424 	*nr_ret = nr;
1425 	return 0;
1426 
1427 out_error:
1428 	/*
1429 	 * If we finish without problem, we should not only clear page dirty,
1430 	 * but also empty subpage dirty bits
1431 	 */
1432 	*nr_ret = nr;
1433 	return ret;
1434 }
1435 
1436 /*
1437  * the writepage semantics are similar to regular writepage.  extent
1438  * records are inserted to lock ranges in the tree, and as dirty areas
1439  * are found, they are marked writeback.  Then the lock bits are removed
1440  * and the end_io handler clears the writeback ranges
1441  *
1442  * Return 0 if everything goes well.
1443  * Return <0 for error.
1444  */
1445 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1446 {
1447 	struct folio *folio = page_folio(page);
1448 	struct inode *inode = page->mapping->host;
1449 	const u64 page_start = page_offset(page);
1450 	int ret;
1451 	int nr = 0;
1452 	size_t pg_offset;
1453 	loff_t i_size = i_size_read(inode);
1454 	unsigned long end_index = i_size >> PAGE_SHIFT;
1455 
1456 	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1457 
1458 	WARN_ON(!PageLocked(page));
1459 
1460 	pg_offset = offset_in_page(i_size);
1461 	if (page->index > end_index ||
1462 	   (page->index == end_index && !pg_offset)) {
1463 		folio_invalidate(folio, 0, folio_size(folio));
1464 		folio_unlock(folio);
1465 		return 0;
1466 	}
1467 
1468 	if (page->index == end_index)
1469 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1470 
1471 	ret = set_page_extent_mapped(page);
1472 	if (ret < 0)
1473 		goto done;
1474 
1475 	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1476 	if (ret == 1)
1477 		return 0;
1478 	if (ret)
1479 		goto done;
1480 
1481 	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1482 	if (ret == 1)
1483 		return 0;
1484 
1485 	bio_ctrl->wbc->nr_to_write--;
1486 
1487 done:
1488 	if (nr == 0) {
1489 		/* make sure the mapping tag for page dirty gets cleared */
1490 		set_page_writeback(page);
1491 		end_page_writeback(page);
1492 	}
1493 	if (ret) {
1494 		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1495 					       PAGE_SIZE, !ret);
1496 		mapping_set_error(page->mapping, ret);
1497 	}
1498 	unlock_page(page);
1499 	ASSERT(ret <= 0);
1500 	return ret;
1501 }
1502 
1503 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1504 {
1505 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1506 		       TASK_UNINTERRUPTIBLE);
1507 }
1508 
1509 /*
1510  * Lock extent buffer status and pages for writeback.
1511  *
1512  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1513  * extent buffer is not dirty)
1514  * Return %true is the extent buffer is submitted to bio.
1515  */
1516 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1517 			  struct writeback_control *wbc)
1518 {
1519 	struct btrfs_fs_info *fs_info = eb->fs_info;
1520 	bool ret = false;
1521 
1522 	btrfs_tree_lock(eb);
1523 	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1524 		btrfs_tree_unlock(eb);
1525 		if (wbc->sync_mode != WB_SYNC_ALL)
1526 			return false;
1527 		wait_on_extent_buffer_writeback(eb);
1528 		btrfs_tree_lock(eb);
1529 	}
1530 
1531 	/*
1532 	 * We need to do this to prevent races in people who check if the eb is
1533 	 * under IO since we can end up having no IO bits set for a short period
1534 	 * of time.
1535 	 */
1536 	spin_lock(&eb->refs_lock);
1537 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1538 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1539 		spin_unlock(&eb->refs_lock);
1540 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1541 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1542 					 -eb->len,
1543 					 fs_info->dirty_metadata_batch);
1544 		ret = true;
1545 	} else {
1546 		spin_unlock(&eb->refs_lock);
1547 	}
1548 	btrfs_tree_unlock(eb);
1549 	return ret;
1550 }
1551 
1552 static void set_btree_ioerr(struct extent_buffer *eb)
1553 {
1554 	struct btrfs_fs_info *fs_info = eb->fs_info;
1555 
1556 	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1557 
1558 	/*
1559 	 * A read may stumble upon this buffer later, make sure that it gets an
1560 	 * error and knows there was an error.
1561 	 */
1562 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1563 
1564 	/*
1565 	 * We need to set the mapping with the io error as well because a write
1566 	 * error will flip the file system readonly, and then syncfs() will
1567 	 * return a 0 because we are readonly if we don't modify the err seq for
1568 	 * the superblock.
1569 	 */
1570 	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1571 
1572 	/*
1573 	 * If writeback for a btree extent that doesn't belong to a log tree
1574 	 * failed, increment the counter transaction->eb_write_errors.
1575 	 * We do this because while the transaction is running and before it's
1576 	 * committing (when we call filemap_fdata[write|wait]_range against
1577 	 * the btree inode), we might have
1578 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1579 	 * returns an error or an error happens during writeback, when we're
1580 	 * committing the transaction we wouldn't know about it, since the pages
1581 	 * can be no longer dirty nor marked anymore for writeback (if a
1582 	 * subsequent modification to the extent buffer didn't happen before the
1583 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1584 	 * able to find the pages tagged with SetPageError at transaction
1585 	 * commit time. So if this happens we must abort the transaction,
1586 	 * otherwise we commit a super block with btree roots that point to
1587 	 * btree nodes/leafs whose content on disk is invalid - either garbage
1588 	 * or the content of some node/leaf from a past generation that got
1589 	 * cowed or deleted and is no longer valid.
1590 	 *
1591 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1592 	 * not be enough - we need to distinguish between log tree extents vs
1593 	 * non-log tree extents, and the next filemap_fdatawait_range() call
1594 	 * will catch and clear such errors in the mapping - and that call might
1595 	 * be from a log sync and not from a transaction commit. Also, checking
1596 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1597 	 * not done and would not be reliable - the eb might have been released
1598 	 * from memory and reading it back again means that flag would not be
1599 	 * set (since it's a runtime flag, not persisted on disk).
1600 	 *
1601 	 * Using the flags below in the btree inode also makes us achieve the
1602 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1603 	 * writeback for all dirty pages and before filemap_fdatawait_range()
1604 	 * is called, the writeback for all dirty pages had already finished
1605 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1606 	 * filemap_fdatawait_range() would return success, as it could not know
1607 	 * that writeback errors happened (the pages were no longer tagged for
1608 	 * writeback).
1609 	 */
1610 	switch (eb->log_index) {
1611 	case -1:
1612 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1613 		break;
1614 	case 0:
1615 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1616 		break;
1617 	case 1:
1618 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1619 		break;
1620 	default:
1621 		BUG(); /* unexpected, logic error */
1622 	}
1623 }
1624 
1625 /*
1626  * The endio specific version which won't touch any unsafe spinlock in endio
1627  * context.
1628  */
1629 static struct extent_buffer *find_extent_buffer_nolock(
1630 		struct btrfs_fs_info *fs_info, u64 start)
1631 {
1632 	struct extent_buffer *eb;
1633 
1634 	rcu_read_lock();
1635 	eb = radix_tree_lookup(&fs_info->buffer_radix,
1636 			       start >> fs_info->sectorsize_bits);
1637 	if (eb && atomic_inc_not_zero(&eb->refs)) {
1638 		rcu_read_unlock();
1639 		return eb;
1640 	}
1641 	rcu_read_unlock();
1642 	return NULL;
1643 }
1644 
1645 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1646 {
1647 	struct extent_buffer *eb = bbio->private;
1648 	struct btrfs_fs_info *fs_info = eb->fs_info;
1649 	bool uptodate = !bbio->bio.bi_status;
1650 	struct folio_iter fi;
1651 	u32 bio_offset = 0;
1652 
1653 	if (!uptodate)
1654 		set_btree_ioerr(eb);
1655 
1656 	bio_for_each_folio_all(fi, &bbio->bio) {
1657 		u64 start = eb->start + bio_offset;
1658 		struct folio *folio = fi.folio;
1659 		u32 len = fi.length;
1660 
1661 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
1662 		bio_offset += len;
1663 	}
1664 
1665 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1666 	smp_mb__after_atomic();
1667 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1668 
1669 	bio_put(&bbio->bio);
1670 }
1671 
1672 static void prepare_eb_write(struct extent_buffer *eb)
1673 {
1674 	u32 nritems;
1675 	unsigned long start;
1676 	unsigned long end;
1677 
1678 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1679 
1680 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1681 	nritems = btrfs_header_nritems(eb);
1682 	if (btrfs_header_level(eb) > 0) {
1683 		end = btrfs_node_key_ptr_offset(eb, nritems);
1684 		memzero_extent_buffer(eb, end, eb->len - end);
1685 	} else {
1686 		/*
1687 		 * Leaf:
1688 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1689 		 */
1690 		start = btrfs_item_nr_offset(eb, nritems);
1691 		end = btrfs_item_nr_offset(eb, 0);
1692 		if (nritems == 0)
1693 			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1694 		else
1695 			end += btrfs_item_offset(eb, nritems - 1);
1696 		memzero_extent_buffer(eb, start, end - start);
1697 	}
1698 }
1699 
1700 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1701 					    struct writeback_control *wbc)
1702 {
1703 	struct btrfs_fs_info *fs_info = eb->fs_info;
1704 	struct btrfs_bio *bbio;
1705 
1706 	prepare_eb_write(eb);
1707 
1708 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1709 			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1710 			       eb->fs_info, end_bbio_meta_write, eb);
1711 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1712 	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1713 	wbc_init_bio(wbc, &bbio->bio);
1714 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1715 	bbio->file_offset = eb->start;
1716 	if (fs_info->nodesize < PAGE_SIZE) {
1717 		struct folio *folio = eb->folios[0];
1718 		bool ret;
1719 
1720 		folio_lock(folio);
1721 		btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1722 		if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1723 						       eb->len)) {
1724 			folio_clear_dirty_for_io(folio);
1725 			wbc->nr_to_write--;
1726 		}
1727 		ret = bio_add_folio(&bbio->bio, folio, eb->len,
1728 				    eb->start - folio_pos(folio));
1729 		ASSERT(ret);
1730 		wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1731 		folio_unlock(folio);
1732 	} else {
1733 		int num_folios = num_extent_folios(eb);
1734 
1735 		for (int i = 0; i < num_folios; i++) {
1736 			struct folio *folio = eb->folios[i];
1737 			bool ret;
1738 
1739 			folio_lock(folio);
1740 			folio_clear_dirty_for_io(folio);
1741 			folio_start_writeback(folio);
1742 			ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
1743 			ASSERT(ret);
1744 			wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1745 						 folio_size(folio));
1746 			wbc->nr_to_write -= folio_nr_pages(folio);
1747 			folio_unlock(folio);
1748 		}
1749 	}
1750 	btrfs_submit_bio(bbio, 0);
1751 }
1752 
1753 /*
1754  * Submit one subpage btree page.
1755  *
1756  * The main difference to submit_eb_page() is:
1757  * - Page locking
1758  *   For subpage, we don't rely on page locking at all.
1759  *
1760  * - Flush write bio
1761  *   We only flush bio if we may be unable to fit current extent buffers into
1762  *   current bio.
1763  *
1764  * Return >=0 for the number of submitted extent buffers.
1765  * Return <0 for fatal error.
1766  */
1767 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1768 {
1769 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1770 	struct folio *folio = page_folio(page);
1771 	int submitted = 0;
1772 	u64 page_start = page_offset(page);
1773 	int bit_start = 0;
1774 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1775 
1776 	/* Lock and write each dirty extent buffers in the range */
1777 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1778 		struct btrfs_subpage *subpage = folio_get_private(folio);
1779 		struct extent_buffer *eb;
1780 		unsigned long flags;
1781 		u64 start;
1782 
1783 		/*
1784 		 * Take private lock to ensure the subpage won't be detached
1785 		 * in the meantime.
1786 		 */
1787 		spin_lock(&page->mapping->i_private_lock);
1788 		if (!folio_test_private(folio)) {
1789 			spin_unlock(&page->mapping->i_private_lock);
1790 			break;
1791 		}
1792 		spin_lock_irqsave(&subpage->lock, flags);
1793 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1794 			      subpage->bitmaps)) {
1795 			spin_unlock_irqrestore(&subpage->lock, flags);
1796 			spin_unlock(&page->mapping->i_private_lock);
1797 			bit_start++;
1798 			continue;
1799 		}
1800 
1801 		start = page_start + bit_start * fs_info->sectorsize;
1802 		bit_start += sectors_per_node;
1803 
1804 		/*
1805 		 * Here we just want to grab the eb without touching extra
1806 		 * spin locks, so call find_extent_buffer_nolock().
1807 		 */
1808 		eb = find_extent_buffer_nolock(fs_info, start);
1809 		spin_unlock_irqrestore(&subpage->lock, flags);
1810 		spin_unlock(&page->mapping->i_private_lock);
1811 
1812 		/*
1813 		 * The eb has already reached 0 refs thus find_extent_buffer()
1814 		 * doesn't return it. We don't need to write back such eb
1815 		 * anyway.
1816 		 */
1817 		if (!eb)
1818 			continue;
1819 
1820 		if (lock_extent_buffer_for_io(eb, wbc)) {
1821 			write_one_eb(eb, wbc);
1822 			submitted++;
1823 		}
1824 		free_extent_buffer(eb);
1825 	}
1826 	return submitted;
1827 }
1828 
1829 /*
1830  * Submit all page(s) of one extent buffer.
1831  *
1832  * @page:	the page of one extent buffer
1833  * @eb_context:	to determine if we need to submit this page, if current page
1834  *		belongs to this eb, we don't need to submit
1835  *
1836  * The caller should pass each page in their bytenr order, and here we use
1837  * @eb_context to determine if we have submitted pages of one extent buffer.
1838  *
1839  * If we have, we just skip until we hit a new page that doesn't belong to
1840  * current @eb_context.
1841  *
1842  * If not, we submit all the page(s) of the extent buffer.
1843  *
1844  * Return >0 if we have submitted the extent buffer successfully.
1845  * Return 0 if we don't need to submit the page, as it's already submitted by
1846  * previous call.
1847  * Return <0 for fatal error.
1848  */
1849 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1850 {
1851 	struct writeback_control *wbc = ctx->wbc;
1852 	struct address_space *mapping = page->mapping;
1853 	struct folio *folio = page_folio(page);
1854 	struct extent_buffer *eb;
1855 	int ret;
1856 
1857 	if (!folio_test_private(folio))
1858 		return 0;
1859 
1860 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1861 		return submit_eb_subpage(page, wbc);
1862 
1863 	spin_lock(&mapping->i_private_lock);
1864 	if (!folio_test_private(folio)) {
1865 		spin_unlock(&mapping->i_private_lock);
1866 		return 0;
1867 	}
1868 
1869 	eb = folio_get_private(folio);
1870 
1871 	/*
1872 	 * Shouldn't happen and normally this would be a BUG_ON but no point
1873 	 * crashing the machine for something we can survive anyway.
1874 	 */
1875 	if (WARN_ON(!eb)) {
1876 		spin_unlock(&mapping->i_private_lock);
1877 		return 0;
1878 	}
1879 
1880 	if (eb == ctx->eb) {
1881 		spin_unlock(&mapping->i_private_lock);
1882 		return 0;
1883 	}
1884 	ret = atomic_inc_not_zero(&eb->refs);
1885 	spin_unlock(&mapping->i_private_lock);
1886 	if (!ret)
1887 		return 0;
1888 
1889 	ctx->eb = eb;
1890 
1891 	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1892 	if (ret) {
1893 		if (ret == -EBUSY)
1894 			ret = 0;
1895 		free_extent_buffer(eb);
1896 		return ret;
1897 	}
1898 
1899 	if (!lock_extent_buffer_for_io(eb, wbc)) {
1900 		free_extent_buffer(eb);
1901 		return 0;
1902 	}
1903 	/* Implies write in zoned mode. */
1904 	if (ctx->zoned_bg) {
1905 		/* Mark the last eb in the block group. */
1906 		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1907 		ctx->zoned_bg->meta_write_pointer += eb->len;
1908 	}
1909 	write_one_eb(eb, wbc);
1910 	free_extent_buffer(eb);
1911 	return 1;
1912 }
1913 
1914 int btree_write_cache_pages(struct address_space *mapping,
1915 				   struct writeback_control *wbc)
1916 {
1917 	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1918 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1919 	int ret = 0;
1920 	int done = 0;
1921 	int nr_to_write_done = 0;
1922 	struct folio_batch fbatch;
1923 	unsigned int nr_folios;
1924 	pgoff_t index;
1925 	pgoff_t end;		/* Inclusive */
1926 	int scanned = 0;
1927 	xa_mark_t tag;
1928 
1929 	folio_batch_init(&fbatch);
1930 	if (wbc->range_cyclic) {
1931 		index = mapping->writeback_index; /* Start from prev offset */
1932 		end = -1;
1933 		/*
1934 		 * Start from the beginning does not need to cycle over the
1935 		 * range, mark it as scanned.
1936 		 */
1937 		scanned = (index == 0);
1938 	} else {
1939 		index = wbc->range_start >> PAGE_SHIFT;
1940 		end = wbc->range_end >> PAGE_SHIFT;
1941 		scanned = 1;
1942 	}
1943 	if (wbc->sync_mode == WB_SYNC_ALL)
1944 		tag = PAGECACHE_TAG_TOWRITE;
1945 	else
1946 		tag = PAGECACHE_TAG_DIRTY;
1947 	btrfs_zoned_meta_io_lock(fs_info);
1948 retry:
1949 	if (wbc->sync_mode == WB_SYNC_ALL)
1950 		tag_pages_for_writeback(mapping, index, end);
1951 	while (!done && !nr_to_write_done && (index <= end) &&
1952 	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1953 					    tag, &fbatch))) {
1954 		unsigned i;
1955 
1956 		for (i = 0; i < nr_folios; i++) {
1957 			struct folio *folio = fbatch.folios[i];
1958 
1959 			ret = submit_eb_page(&folio->page, &ctx);
1960 			if (ret == 0)
1961 				continue;
1962 			if (ret < 0) {
1963 				done = 1;
1964 				break;
1965 			}
1966 
1967 			/*
1968 			 * the filesystem may choose to bump up nr_to_write.
1969 			 * We have to make sure to honor the new nr_to_write
1970 			 * at any time
1971 			 */
1972 			nr_to_write_done = wbc->nr_to_write <= 0;
1973 		}
1974 		folio_batch_release(&fbatch);
1975 		cond_resched();
1976 	}
1977 	if (!scanned && !done) {
1978 		/*
1979 		 * We hit the last page and there is more work to be done: wrap
1980 		 * back to the start of the file
1981 		 */
1982 		scanned = 1;
1983 		index = 0;
1984 		goto retry;
1985 	}
1986 	/*
1987 	 * If something went wrong, don't allow any metadata write bio to be
1988 	 * submitted.
1989 	 *
1990 	 * This would prevent use-after-free if we had dirty pages not
1991 	 * cleaned up, which can still happen by fuzzed images.
1992 	 *
1993 	 * - Bad extent tree
1994 	 *   Allowing existing tree block to be allocated for other trees.
1995 	 *
1996 	 * - Log tree operations
1997 	 *   Exiting tree blocks get allocated to log tree, bumps its
1998 	 *   generation, then get cleaned in tree re-balance.
1999 	 *   Such tree block will not be written back, since it's clean,
2000 	 *   thus no WRITTEN flag set.
2001 	 *   And after log writes back, this tree block is not traced by
2002 	 *   any dirty extent_io_tree.
2003 	 *
2004 	 * - Offending tree block gets re-dirtied from its original owner
2005 	 *   Since it has bumped generation, no WRITTEN flag, it can be
2006 	 *   reused without COWing. This tree block will not be traced
2007 	 *   by btrfs_transaction::dirty_pages.
2008 	 *
2009 	 *   Now such dirty tree block will not be cleaned by any dirty
2010 	 *   extent io tree. Thus we don't want to submit such wild eb
2011 	 *   if the fs already has error.
2012 	 *
2013 	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2014 	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2015 	 */
2016 	if (ret > 0)
2017 		ret = 0;
2018 	if (!ret && BTRFS_FS_ERROR(fs_info))
2019 		ret = -EROFS;
2020 
2021 	if (ctx.zoned_bg)
2022 		btrfs_put_block_group(ctx.zoned_bg);
2023 	btrfs_zoned_meta_io_unlock(fs_info);
2024 	return ret;
2025 }
2026 
2027 /*
2028  * Walk the list of dirty pages of the given address space and write all of them.
2029  *
2030  * @mapping:   address space structure to write
2031  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2032  * @bio_ctrl:  holds context for the write, namely the bio
2033  *
2034  * If a page is already under I/O, write_cache_pages() skips it, even
2035  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2036  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2037  * and msync() need to guarantee that all the data which was dirty at the time
2038  * the call was made get new I/O started against them.  If wbc->sync_mode is
2039  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2040  * existing IO to complete.
2041  */
2042 static int extent_write_cache_pages(struct address_space *mapping,
2043 			     struct btrfs_bio_ctrl *bio_ctrl)
2044 {
2045 	struct writeback_control *wbc = bio_ctrl->wbc;
2046 	struct inode *inode = mapping->host;
2047 	int ret = 0;
2048 	int done = 0;
2049 	int nr_to_write_done = 0;
2050 	struct folio_batch fbatch;
2051 	unsigned int nr_folios;
2052 	pgoff_t index;
2053 	pgoff_t end;		/* Inclusive */
2054 	pgoff_t done_index;
2055 	int range_whole = 0;
2056 	int scanned = 0;
2057 	xa_mark_t tag;
2058 
2059 	/*
2060 	 * We have to hold onto the inode so that ordered extents can do their
2061 	 * work when the IO finishes.  The alternative to this is failing to add
2062 	 * an ordered extent if the igrab() fails there and that is a huge pain
2063 	 * to deal with, so instead just hold onto the inode throughout the
2064 	 * writepages operation.  If it fails here we are freeing up the inode
2065 	 * anyway and we'd rather not waste our time writing out stuff that is
2066 	 * going to be truncated anyway.
2067 	 */
2068 	if (!igrab(inode))
2069 		return 0;
2070 
2071 	folio_batch_init(&fbatch);
2072 	if (wbc->range_cyclic) {
2073 		index = mapping->writeback_index; /* Start from prev offset */
2074 		end = -1;
2075 		/*
2076 		 * Start from the beginning does not need to cycle over the
2077 		 * range, mark it as scanned.
2078 		 */
2079 		scanned = (index == 0);
2080 	} else {
2081 		index = wbc->range_start >> PAGE_SHIFT;
2082 		end = wbc->range_end >> PAGE_SHIFT;
2083 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2084 			range_whole = 1;
2085 		scanned = 1;
2086 	}
2087 
2088 	/*
2089 	 * We do the tagged writepage as long as the snapshot flush bit is set
2090 	 * and we are the first one who do the filemap_flush() on this inode.
2091 	 *
2092 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2093 	 * not race in and drop the bit.
2094 	 */
2095 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2096 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2097 			       &BTRFS_I(inode)->runtime_flags))
2098 		wbc->tagged_writepages = 1;
2099 
2100 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2101 		tag = PAGECACHE_TAG_TOWRITE;
2102 	else
2103 		tag = PAGECACHE_TAG_DIRTY;
2104 retry:
2105 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2106 		tag_pages_for_writeback(mapping, index, end);
2107 	done_index = index;
2108 	while (!done && !nr_to_write_done && (index <= end) &&
2109 			(nr_folios = filemap_get_folios_tag(mapping, &index,
2110 							end, tag, &fbatch))) {
2111 		unsigned i;
2112 
2113 		for (i = 0; i < nr_folios; i++) {
2114 			struct folio *folio = fbatch.folios[i];
2115 
2116 			done_index = folio_next_index(folio);
2117 			/*
2118 			 * At this point we hold neither the i_pages lock nor
2119 			 * the page lock: the page may be truncated or
2120 			 * invalidated (changing page->mapping to NULL),
2121 			 * or even swizzled back from swapper_space to
2122 			 * tmpfs file mapping
2123 			 */
2124 			if (!folio_trylock(folio)) {
2125 				submit_write_bio(bio_ctrl, 0);
2126 				folio_lock(folio);
2127 			}
2128 
2129 			if (unlikely(folio->mapping != mapping)) {
2130 				folio_unlock(folio);
2131 				continue;
2132 			}
2133 
2134 			if (!folio_test_dirty(folio)) {
2135 				/* Someone wrote it for us. */
2136 				folio_unlock(folio);
2137 				continue;
2138 			}
2139 
2140 			if (wbc->sync_mode != WB_SYNC_NONE) {
2141 				if (folio_test_writeback(folio))
2142 					submit_write_bio(bio_ctrl, 0);
2143 				folio_wait_writeback(folio);
2144 			}
2145 
2146 			if (folio_test_writeback(folio) ||
2147 			    !folio_clear_dirty_for_io(folio)) {
2148 				folio_unlock(folio);
2149 				continue;
2150 			}
2151 
2152 			ret = __extent_writepage(&folio->page, bio_ctrl);
2153 			if (ret < 0) {
2154 				done = 1;
2155 				break;
2156 			}
2157 
2158 			/*
2159 			 * The filesystem may choose to bump up nr_to_write.
2160 			 * We have to make sure to honor the new nr_to_write
2161 			 * at any time.
2162 			 */
2163 			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2164 					    wbc->nr_to_write <= 0);
2165 		}
2166 		folio_batch_release(&fbatch);
2167 		cond_resched();
2168 	}
2169 	if (!scanned && !done) {
2170 		/*
2171 		 * We hit the last page and there is more work to be done: wrap
2172 		 * back to the start of the file
2173 		 */
2174 		scanned = 1;
2175 		index = 0;
2176 
2177 		/*
2178 		 * If we're looping we could run into a page that is locked by a
2179 		 * writer and that writer could be waiting on writeback for a
2180 		 * page in our current bio, and thus deadlock, so flush the
2181 		 * write bio here.
2182 		 */
2183 		submit_write_bio(bio_ctrl, 0);
2184 		goto retry;
2185 	}
2186 
2187 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2188 		mapping->writeback_index = done_index;
2189 
2190 	btrfs_add_delayed_iput(BTRFS_I(inode));
2191 	return ret;
2192 }
2193 
2194 /*
2195  * Submit the pages in the range to bio for call sites which delalloc range has
2196  * already been ran (aka, ordered extent inserted) and all pages are still
2197  * locked.
2198  */
2199 void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2200 			       u64 start, u64 end, struct writeback_control *wbc,
2201 			       bool pages_dirty)
2202 {
2203 	bool found_error = false;
2204 	int ret = 0;
2205 	struct address_space *mapping = inode->i_mapping;
2206 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2207 	const u32 sectorsize = fs_info->sectorsize;
2208 	loff_t i_size = i_size_read(inode);
2209 	u64 cur = start;
2210 	struct btrfs_bio_ctrl bio_ctrl = {
2211 		.wbc = wbc,
2212 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2213 	};
2214 
2215 	if (wbc->no_cgroup_owner)
2216 		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2217 
2218 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2219 
2220 	while (cur <= end) {
2221 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2222 		u32 cur_len = cur_end + 1 - cur;
2223 		struct page *page;
2224 		int nr = 0;
2225 
2226 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2227 		ASSERT(PageLocked(page));
2228 		if (pages_dirty && page != locked_page) {
2229 			ASSERT(PageDirty(page));
2230 			clear_page_dirty_for_io(page);
2231 		}
2232 
2233 		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2234 					    i_size, &nr);
2235 		if (ret == 1)
2236 			goto next_page;
2237 
2238 		/* Make sure the mapping tag for page dirty gets cleared. */
2239 		if (nr == 0) {
2240 			set_page_writeback(page);
2241 			end_page_writeback(page);
2242 		}
2243 		if (ret) {
2244 			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2245 						       cur, cur_len, !ret);
2246 			mapping_set_error(page->mapping, ret);
2247 		}
2248 		btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2249 		if (ret < 0)
2250 			found_error = true;
2251 next_page:
2252 		put_page(page);
2253 		cur = cur_end + 1;
2254 	}
2255 
2256 	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2257 }
2258 
2259 int extent_writepages(struct address_space *mapping,
2260 		      struct writeback_control *wbc)
2261 {
2262 	struct inode *inode = mapping->host;
2263 	int ret = 0;
2264 	struct btrfs_bio_ctrl bio_ctrl = {
2265 		.wbc = wbc,
2266 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2267 	};
2268 
2269 	/*
2270 	 * Allow only a single thread to do the reloc work in zoned mode to
2271 	 * protect the write pointer updates.
2272 	 */
2273 	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2274 	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2275 	submit_write_bio(&bio_ctrl, ret);
2276 	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2277 	return ret;
2278 }
2279 
2280 void extent_readahead(struct readahead_control *rac)
2281 {
2282 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2283 	struct page *pagepool[16];
2284 	struct extent_map *em_cached = NULL;
2285 	u64 prev_em_start = (u64)-1;
2286 	int nr;
2287 
2288 	while ((nr = readahead_page_batch(rac, pagepool))) {
2289 		u64 contig_start = readahead_pos(rac);
2290 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2291 
2292 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2293 				&em_cached, &bio_ctrl, &prev_em_start);
2294 	}
2295 
2296 	if (em_cached)
2297 		free_extent_map(em_cached);
2298 	submit_one_bio(&bio_ctrl);
2299 }
2300 
2301 /*
2302  * basic invalidate_folio code, this waits on any locked or writeback
2303  * ranges corresponding to the folio, and then deletes any extent state
2304  * records from the tree
2305  */
2306 int extent_invalidate_folio(struct extent_io_tree *tree,
2307 			  struct folio *folio, size_t offset)
2308 {
2309 	struct extent_state *cached_state = NULL;
2310 	u64 start = folio_pos(folio);
2311 	u64 end = start + folio_size(folio) - 1;
2312 	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
2313 
2314 	/* This function is only called for the btree inode */
2315 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2316 
2317 	start += ALIGN(offset, blocksize);
2318 	if (start > end)
2319 		return 0;
2320 
2321 	lock_extent(tree, start, end, &cached_state);
2322 	folio_wait_writeback(folio);
2323 
2324 	/*
2325 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2326 	 * so here we only need to unlock the extent range to free any
2327 	 * existing extent state.
2328 	 */
2329 	unlock_extent(tree, start, end, &cached_state);
2330 	return 0;
2331 }
2332 
2333 /*
2334  * a helper for release_folio, this tests for areas of the page that
2335  * are locked or under IO and drops the related state bits if it is safe
2336  * to drop the page.
2337  */
2338 static int try_release_extent_state(struct extent_io_tree *tree,
2339 				    struct page *page, gfp_t mask)
2340 {
2341 	u64 start = page_offset(page);
2342 	u64 end = start + PAGE_SIZE - 1;
2343 	int ret = 1;
2344 
2345 	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2346 		ret = 0;
2347 	} else {
2348 		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2349 				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2350 				   EXTENT_QGROUP_RESERVED);
2351 
2352 		/*
2353 		 * At this point we can safely clear everything except the
2354 		 * locked bit, the nodatasum bit and the delalloc new bit.
2355 		 * The delalloc new bit will be cleared by ordered extent
2356 		 * completion.
2357 		 */
2358 		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2359 
2360 		/* if clear_extent_bit failed for enomem reasons,
2361 		 * we can't allow the release to continue.
2362 		 */
2363 		if (ret < 0)
2364 			ret = 0;
2365 		else
2366 			ret = 1;
2367 	}
2368 	return ret;
2369 }
2370 
2371 /*
2372  * a helper for release_folio.  As long as there are no locked extents
2373  * in the range corresponding to the page, both state records and extent
2374  * map records are removed
2375  */
2376 int try_release_extent_mapping(struct page *page, gfp_t mask)
2377 {
2378 	struct extent_map *em;
2379 	u64 start = page_offset(page);
2380 	u64 end = start + PAGE_SIZE - 1;
2381 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2382 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2383 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2384 
2385 	if (gfpflags_allow_blocking(mask) &&
2386 	    page->mapping->host->i_size > SZ_16M) {
2387 		u64 len;
2388 		while (start <= end) {
2389 			struct btrfs_fs_info *fs_info;
2390 			u64 cur_gen;
2391 
2392 			len = end - start + 1;
2393 			write_lock(&map->lock);
2394 			em = lookup_extent_mapping(map, start, len);
2395 			if (!em) {
2396 				write_unlock(&map->lock);
2397 				break;
2398 			}
2399 			if ((em->flags & EXTENT_FLAG_PINNED) ||
2400 			    em->start != start) {
2401 				write_unlock(&map->lock);
2402 				free_extent_map(em);
2403 				break;
2404 			}
2405 			if (test_range_bit_exists(tree, em->start,
2406 						  extent_map_end(em) - 1,
2407 						  EXTENT_LOCKED))
2408 				goto next;
2409 			/*
2410 			 * If it's not in the list of modified extents, used
2411 			 * by a fast fsync, we can remove it. If it's being
2412 			 * logged we can safely remove it since fsync took an
2413 			 * extra reference on the em.
2414 			 */
2415 			if (list_empty(&em->list) ||
2416 			    (em->flags & EXTENT_FLAG_LOGGING))
2417 				goto remove_em;
2418 			/*
2419 			 * If it's in the list of modified extents, remove it
2420 			 * only if its generation is older then the current one,
2421 			 * in which case we don't need it for a fast fsync.
2422 			 * Otherwise don't remove it, we could be racing with an
2423 			 * ongoing fast fsync that could miss the new extent.
2424 			 */
2425 			fs_info = btrfs_inode->root->fs_info;
2426 			spin_lock(&fs_info->trans_lock);
2427 			cur_gen = fs_info->generation;
2428 			spin_unlock(&fs_info->trans_lock);
2429 			if (em->generation >= cur_gen)
2430 				goto next;
2431 remove_em:
2432 			/*
2433 			 * We only remove extent maps that are not in the list of
2434 			 * modified extents or that are in the list but with a
2435 			 * generation lower then the current generation, so there
2436 			 * is no need to set the full fsync flag on the inode (it
2437 			 * hurts the fsync performance for workloads with a data
2438 			 * size that exceeds or is close to the system's memory).
2439 			 */
2440 			remove_extent_mapping(map, em);
2441 			/* once for the rb tree */
2442 			free_extent_map(em);
2443 next:
2444 			start = extent_map_end(em);
2445 			write_unlock(&map->lock);
2446 
2447 			/* once for us */
2448 			free_extent_map(em);
2449 
2450 			cond_resched(); /* Allow large-extent preemption. */
2451 		}
2452 	}
2453 	return try_release_extent_state(tree, page, mask);
2454 }
2455 
2456 /*
2457  * To cache previous fiemap extent
2458  *
2459  * Will be used for merging fiemap extent
2460  */
2461 struct fiemap_cache {
2462 	u64 offset;
2463 	u64 phys;
2464 	u64 len;
2465 	u32 flags;
2466 	bool cached;
2467 };
2468 
2469 /*
2470  * Helper to submit fiemap extent.
2471  *
2472  * Will try to merge current fiemap extent specified by @offset, @phys,
2473  * @len and @flags with cached one.
2474  * And only when we fails to merge, cached one will be submitted as
2475  * fiemap extent.
2476  *
2477  * Return value is the same as fiemap_fill_next_extent().
2478  */
2479 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2480 				struct fiemap_cache *cache,
2481 				u64 offset, u64 phys, u64 len, u32 flags)
2482 {
2483 	u64 cache_end;
2484 	int ret = 0;
2485 
2486 	/* Set at the end of extent_fiemap(). */
2487 	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2488 
2489 	if (!cache->cached)
2490 		goto assign;
2491 
2492 	/*
2493 	 * When iterating the extents of the inode, at extent_fiemap(), we may
2494 	 * find an extent that starts at an offset behind the end offset of the
2495 	 * previous extent we processed. This happens if fiemap is called
2496 	 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2497 	 * while we call btrfs_next_leaf() (through fiemap_next_leaf_item()).
2498 	 *
2499 	 * For example we are in leaf X processing its last item, which is the
2500 	 * file extent item for file range [512K, 1M[, and after
2501 	 * btrfs_next_leaf() releases the path, there's an ordered extent that
2502 	 * completes for the file range [768K, 2M[, and that results in trimming
2503 	 * the file extent item so that it now corresponds to the file range
2504 	 * [512K, 768K[ and a new file extent item is inserted for the file
2505 	 * range [768K, 2M[, which may end up as the last item of leaf X or as
2506 	 * the first item of the next leaf - in either case btrfs_next_leaf()
2507 	 * will leave us with a path pointing to the new extent item, for the
2508 	 * file range [768K, 2M[, since that's the first key that follows the
2509 	 * last one we processed. So in order not to report overlapping extents
2510 	 * to user space, we trim the length of the previously cached extent and
2511 	 * emit it.
2512 	 *
2513 	 * Upon calling btrfs_next_leaf() we may also find an extent with an
2514 	 * offset smaller than or equals to cache->offset, and this happens
2515 	 * when we had a hole or prealloc extent with several delalloc ranges in
2516 	 * it, but after btrfs_next_leaf() released the path, delalloc was
2517 	 * flushed and the resulting ordered extents were completed, so we can
2518 	 * now have found a file extent item for an offset that is smaller than
2519 	 * or equals to what we have in cache->offset. We deal with this as
2520 	 * described below.
2521 	 */
2522 	cache_end = cache->offset + cache->len;
2523 	if (cache_end > offset) {
2524 		if (offset == cache->offset) {
2525 			/*
2526 			 * We cached a dealloc range (found in the io tree) for
2527 			 * a hole or prealloc extent and we have now found a
2528 			 * file extent item for the same offset. What we have
2529 			 * now is more recent and up to date, so discard what
2530 			 * we had in the cache and use what we have just found.
2531 			 */
2532 			goto assign;
2533 		} else if (offset > cache->offset) {
2534 			/*
2535 			 * The extent range we previously found ends after the
2536 			 * offset of the file extent item we found and that
2537 			 * offset falls somewhere in the middle of that previous
2538 			 * extent range. So adjust the range we previously found
2539 			 * to end at the offset of the file extent item we have
2540 			 * just found, since this extent is more up to date.
2541 			 * Emit that adjusted range and cache the file extent
2542 			 * item we have just found. This corresponds to the case
2543 			 * where a previously found file extent item was split
2544 			 * due to an ordered extent completing.
2545 			 */
2546 			cache->len = offset - cache->offset;
2547 			goto emit;
2548 		} else {
2549 			const u64 range_end = offset + len;
2550 
2551 			/*
2552 			 * The offset of the file extent item we have just found
2553 			 * is behind the cached offset. This means we were
2554 			 * processing a hole or prealloc extent for which we
2555 			 * have found delalloc ranges (in the io tree), so what
2556 			 * we have in the cache is the last delalloc range we
2557 			 * found while the file extent item we found can be
2558 			 * either for a whole delalloc range we previously
2559 			 * emmitted or only a part of that range.
2560 			 *
2561 			 * We have two cases here:
2562 			 *
2563 			 * 1) The file extent item's range ends at or behind the
2564 			 *    cached extent's end. In this case just ignore the
2565 			 *    current file extent item because we don't want to
2566 			 *    overlap with previous ranges that may have been
2567 			 *    emmitted already;
2568 			 *
2569 			 * 2) The file extent item starts behind the currently
2570 			 *    cached extent but its end offset goes beyond the
2571 			 *    end offset of the cached extent. We don't want to
2572 			 *    overlap with a previous range that may have been
2573 			 *    emmitted already, so we emit the currently cached
2574 			 *    extent and then partially store the current file
2575 			 *    extent item's range in the cache, for the subrange
2576 			 *    going the cached extent's end to the end of the
2577 			 *    file extent item.
2578 			 */
2579 			if (range_end <= cache_end)
2580 				return 0;
2581 
2582 			if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2583 				phys += cache_end - offset;
2584 
2585 			offset = cache_end;
2586 			len = range_end - cache_end;
2587 			goto emit;
2588 		}
2589 	}
2590 
2591 	/*
2592 	 * Only merges fiemap extents if
2593 	 * 1) Their logical addresses are continuous
2594 	 *
2595 	 * 2) Their physical addresses are continuous
2596 	 *    So truly compressed (physical size smaller than logical size)
2597 	 *    extents won't get merged with each other
2598 	 *
2599 	 * 3) Share same flags
2600 	 */
2601 	if (cache->offset + cache->len  == offset &&
2602 	    cache->phys + cache->len == phys  &&
2603 	    cache->flags == flags) {
2604 		cache->len += len;
2605 		return 0;
2606 	}
2607 
2608 emit:
2609 	/* Not mergeable, need to submit cached one */
2610 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2611 				      cache->len, cache->flags);
2612 	cache->cached = false;
2613 	if (ret)
2614 		return ret;
2615 assign:
2616 	cache->cached = true;
2617 	cache->offset = offset;
2618 	cache->phys = phys;
2619 	cache->len = len;
2620 	cache->flags = flags;
2621 
2622 	return 0;
2623 }
2624 
2625 /*
2626  * Emit last fiemap cache
2627  *
2628  * The last fiemap cache may still be cached in the following case:
2629  * 0		      4k		    8k
2630  * |<- Fiemap range ->|
2631  * |<------------  First extent ----------->|
2632  *
2633  * In this case, the first extent range will be cached but not emitted.
2634  * So we must emit it before ending extent_fiemap().
2635  */
2636 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2637 				  struct fiemap_cache *cache)
2638 {
2639 	int ret;
2640 
2641 	if (!cache->cached)
2642 		return 0;
2643 
2644 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2645 				      cache->len, cache->flags);
2646 	cache->cached = false;
2647 	if (ret > 0)
2648 		ret = 0;
2649 	return ret;
2650 }
2651 
2652 static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2653 {
2654 	struct extent_buffer *clone;
2655 	struct btrfs_key key;
2656 	int slot;
2657 	int ret;
2658 
2659 	path->slots[0]++;
2660 	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2661 		return 0;
2662 
2663 	ret = btrfs_next_leaf(inode->root, path);
2664 	if (ret != 0)
2665 		return ret;
2666 
2667 	/*
2668 	 * Don't bother with cloning if there are no more file extent items for
2669 	 * our inode.
2670 	 */
2671 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2672 	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2673 		return 1;
2674 
2675 	/* See the comment at fiemap_search_slot() about why we clone. */
2676 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2677 	if (!clone)
2678 		return -ENOMEM;
2679 
2680 	slot = path->slots[0];
2681 	btrfs_release_path(path);
2682 	path->nodes[0] = clone;
2683 	path->slots[0] = slot;
2684 
2685 	return 0;
2686 }
2687 
2688 /*
2689  * Search for the first file extent item that starts at a given file offset or
2690  * the one that starts immediately before that offset.
2691  * Returns: 0 on success, < 0 on error, 1 if not found.
2692  */
2693 static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2694 			      u64 file_offset)
2695 {
2696 	const u64 ino = btrfs_ino(inode);
2697 	struct btrfs_root *root = inode->root;
2698 	struct extent_buffer *clone;
2699 	struct btrfs_key key;
2700 	int slot;
2701 	int ret;
2702 
2703 	key.objectid = ino;
2704 	key.type = BTRFS_EXTENT_DATA_KEY;
2705 	key.offset = file_offset;
2706 
2707 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2708 	if (ret < 0)
2709 		return ret;
2710 
2711 	if (ret > 0 && path->slots[0] > 0) {
2712 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2713 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2714 			path->slots[0]--;
2715 	}
2716 
2717 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2718 		ret = btrfs_next_leaf(root, path);
2719 		if (ret != 0)
2720 			return ret;
2721 
2722 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2723 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2724 			return 1;
2725 	}
2726 
2727 	/*
2728 	 * We clone the leaf and use it during fiemap. This is because while
2729 	 * using the leaf we do expensive things like checking if an extent is
2730 	 * shared, which can take a long time. In order to prevent blocking
2731 	 * other tasks for too long, we use a clone of the leaf. We have locked
2732 	 * the file range in the inode's io tree, so we know none of our file
2733 	 * extent items can change. This way we avoid blocking other tasks that
2734 	 * want to insert items for other inodes in the same leaf or b+tree
2735 	 * rebalance operations (triggered for example when someone is trying
2736 	 * to push items into this leaf when trying to insert an item in a
2737 	 * neighbour leaf).
2738 	 * We also need the private clone because holding a read lock on an
2739 	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2740 	 * when we call fiemap_fill_next_extent(), because that may cause a page
2741 	 * fault when filling the user space buffer with fiemap data.
2742 	 */
2743 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2744 	if (!clone)
2745 		return -ENOMEM;
2746 
2747 	slot = path->slots[0];
2748 	btrfs_release_path(path);
2749 	path->nodes[0] = clone;
2750 	path->slots[0] = slot;
2751 
2752 	return 0;
2753 }
2754 
2755 /*
2756  * Process a range which is a hole or a prealloc extent in the inode's subvolume
2757  * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2758  * extent. The end offset (@end) is inclusive.
2759  */
2760 static int fiemap_process_hole(struct btrfs_inode *inode,
2761 			       struct fiemap_extent_info *fieinfo,
2762 			       struct fiemap_cache *cache,
2763 			       struct extent_state **delalloc_cached_state,
2764 			       struct btrfs_backref_share_check_ctx *backref_ctx,
2765 			       u64 disk_bytenr, u64 extent_offset,
2766 			       u64 extent_gen,
2767 			       u64 start, u64 end)
2768 {
2769 	const u64 i_size = i_size_read(&inode->vfs_inode);
2770 	u64 cur_offset = start;
2771 	u64 last_delalloc_end = 0;
2772 	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2773 	bool checked_extent_shared = false;
2774 	int ret;
2775 
2776 	/*
2777 	 * There can be no delalloc past i_size, so don't waste time looking for
2778 	 * it beyond i_size.
2779 	 */
2780 	while (cur_offset < end && cur_offset < i_size) {
2781 		struct extent_state *cached_state = NULL;
2782 		u64 delalloc_start;
2783 		u64 delalloc_end;
2784 		u64 prealloc_start;
2785 		u64 lockstart;
2786 		u64 lockend;
2787 		u64 prealloc_len = 0;
2788 		bool delalloc;
2789 
2790 		lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
2791 		lockend = round_up(end, inode->root->fs_info->sectorsize);
2792 
2793 		/*
2794 		 * We are only locking for the delalloc range because that's the
2795 		 * only thing that can change here.  With fiemap we have a lock
2796 		 * on the inode, so no buffered or direct writes can happen.
2797 		 *
2798 		 * However mmaps and normal page writeback will cause this to
2799 		 * change arbitrarily.  We have to lock the extent lock here to
2800 		 * make sure that nobody messes with the tree while we're doing
2801 		 * btrfs_find_delalloc_in_range.
2802 		 */
2803 		lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2804 		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2805 							delalloc_cached_state,
2806 							&delalloc_start,
2807 							&delalloc_end);
2808 		unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2809 		if (!delalloc)
2810 			break;
2811 
2812 		/*
2813 		 * If this is a prealloc extent we have to report every section
2814 		 * of it that has no delalloc.
2815 		 */
2816 		if (disk_bytenr != 0) {
2817 			if (last_delalloc_end == 0) {
2818 				prealloc_start = start;
2819 				prealloc_len = delalloc_start - start;
2820 			} else {
2821 				prealloc_start = last_delalloc_end + 1;
2822 				prealloc_len = delalloc_start - prealloc_start;
2823 			}
2824 		}
2825 
2826 		if (prealloc_len > 0) {
2827 			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2828 				ret = btrfs_is_data_extent_shared(inode,
2829 								  disk_bytenr,
2830 								  extent_gen,
2831 								  backref_ctx);
2832 				if (ret < 0)
2833 					return ret;
2834 				else if (ret > 0)
2835 					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2836 
2837 				checked_extent_shared = true;
2838 			}
2839 			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2840 						 disk_bytenr + extent_offset,
2841 						 prealloc_len, prealloc_flags);
2842 			if (ret)
2843 				return ret;
2844 			extent_offset += prealloc_len;
2845 		}
2846 
2847 		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2848 					 delalloc_end + 1 - delalloc_start,
2849 					 FIEMAP_EXTENT_DELALLOC |
2850 					 FIEMAP_EXTENT_UNKNOWN);
2851 		if (ret)
2852 			return ret;
2853 
2854 		last_delalloc_end = delalloc_end;
2855 		cur_offset = delalloc_end + 1;
2856 		extent_offset += cur_offset - delalloc_start;
2857 		cond_resched();
2858 	}
2859 
2860 	/*
2861 	 * Either we found no delalloc for the whole prealloc extent or we have
2862 	 * a prealloc extent that spans i_size or starts at or after i_size.
2863 	 */
2864 	if (disk_bytenr != 0 && last_delalloc_end < end) {
2865 		u64 prealloc_start;
2866 		u64 prealloc_len;
2867 
2868 		if (last_delalloc_end == 0) {
2869 			prealloc_start = start;
2870 			prealloc_len = end + 1 - start;
2871 		} else {
2872 			prealloc_start = last_delalloc_end + 1;
2873 			prealloc_len = end + 1 - prealloc_start;
2874 		}
2875 
2876 		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2877 			ret = btrfs_is_data_extent_shared(inode,
2878 							  disk_bytenr,
2879 							  extent_gen,
2880 							  backref_ctx);
2881 			if (ret < 0)
2882 				return ret;
2883 			else if (ret > 0)
2884 				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2885 		}
2886 		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2887 					 disk_bytenr + extent_offset,
2888 					 prealloc_len, prealloc_flags);
2889 		if (ret)
2890 			return ret;
2891 	}
2892 
2893 	return 0;
2894 }
2895 
2896 static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2897 					  struct btrfs_path *path,
2898 					  u64 *last_extent_end_ret)
2899 {
2900 	const u64 ino = btrfs_ino(inode);
2901 	struct btrfs_root *root = inode->root;
2902 	struct extent_buffer *leaf;
2903 	struct btrfs_file_extent_item *ei;
2904 	struct btrfs_key key;
2905 	u64 disk_bytenr;
2906 	int ret;
2907 
2908 	/*
2909 	 * Lookup the last file extent. We're not using i_size here because
2910 	 * there might be preallocation past i_size.
2911 	 */
2912 	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2913 	/* There can't be a file extent item at offset (u64)-1 */
2914 	ASSERT(ret != 0);
2915 	if (ret < 0)
2916 		return ret;
2917 
2918 	/*
2919 	 * For a non-existing key, btrfs_search_slot() always leaves us at a
2920 	 * slot > 0, except if the btree is empty, which is impossible because
2921 	 * at least it has the inode item for this inode and all the items for
2922 	 * the root inode 256.
2923 	 */
2924 	ASSERT(path->slots[0] > 0);
2925 	path->slots[0]--;
2926 	leaf = path->nodes[0];
2927 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2928 	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2929 		/* No file extent items in the subvolume tree. */
2930 		*last_extent_end_ret = 0;
2931 		return 0;
2932 	}
2933 
2934 	/*
2935 	 * For an inline extent, the disk_bytenr is where inline data starts at,
2936 	 * so first check if we have an inline extent item before checking if we
2937 	 * have an implicit hole (disk_bytenr == 0).
2938 	 */
2939 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2940 	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2941 		*last_extent_end_ret = btrfs_file_extent_end(path);
2942 		return 0;
2943 	}
2944 
2945 	/*
2946 	 * Find the last file extent item that is not a hole (when NO_HOLES is
2947 	 * not enabled). This should take at most 2 iterations in the worst
2948 	 * case: we have one hole file extent item at slot 0 of a leaf and
2949 	 * another hole file extent item as the last item in the previous leaf.
2950 	 * This is because we merge file extent items that represent holes.
2951 	 */
2952 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2953 	while (disk_bytenr == 0) {
2954 		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2955 		if (ret < 0) {
2956 			return ret;
2957 		} else if (ret > 0) {
2958 			/* No file extent items that are not holes. */
2959 			*last_extent_end_ret = 0;
2960 			return 0;
2961 		}
2962 		leaf = path->nodes[0];
2963 		ei = btrfs_item_ptr(leaf, path->slots[0],
2964 				    struct btrfs_file_extent_item);
2965 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2966 	}
2967 
2968 	*last_extent_end_ret = btrfs_file_extent_end(path);
2969 	return 0;
2970 }
2971 
2972 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2973 		  u64 start, u64 len)
2974 {
2975 	const u64 ino = btrfs_ino(inode);
2976 	struct extent_state *delalloc_cached_state = NULL;
2977 	struct btrfs_path *path;
2978 	struct fiemap_cache cache = { 0 };
2979 	struct btrfs_backref_share_check_ctx *backref_ctx;
2980 	u64 last_extent_end;
2981 	u64 prev_extent_end;
2982 	u64 range_start;
2983 	u64 range_end;
2984 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2985 	bool stopped = false;
2986 	int ret;
2987 
2988 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
2989 	path = btrfs_alloc_path();
2990 	if (!backref_ctx || !path) {
2991 		ret = -ENOMEM;
2992 		goto out;
2993 	}
2994 
2995 	range_start = round_down(start, sectorsize);
2996 	range_end = round_up(start + len, sectorsize);
2997 	prev_extent_end = range_start;
2998 
2999 	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3000 	if (ret < 0)
3001 		goto out;
3002 	btrfs_release_path(path);
3003 
3004 	path->reada = READA_FORWARD;
3005 	ret = fiemap_search_slot(inode, path, range_start);
3006 	if (ret < 0) {
3007 		goto out;
3008 	} else if (ret > 0) {
3009 		/*
3010 		 * No file extent item found, but we may have delalloc between
3011 		 * the current offset and i_size. So check for that.
3012 		 */
3013 		ret = 0;
3014 		goto check_eof_delalloc;
3015 	}
3016 
3017 	while (prev_extent_end < range_end) {
3018 		struct extent_buffer *leaf = path->nodes[0];
3019 		struct btrfs_file_extent_item *ei;
3020 		struct btrfs_key key;
3021 		u64 extent_end;
3022 		u64 extent_len;
3023 		u64 extent_offset = 0;
3024 		u64 extent_gen;
3025 		u64 disk_bytenr = 0;
3026 		u64 flags = 0;
3027 		int extent_type;
3028 		u8 compression;
3029 
3030 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3031 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3032 			break;
3033 
3034 		extent_end = btrfs_file_extent_end(path);
3035 
3036 		/*
3037 		 * The first iteration can leave us at an extent item that ends
3038 		 * before our range's start. Move to the next item.
3039 		 */
3040 		if (extent_end <= range_start)
3041 			goto next_item;
3042 
3043 		backref_ctx->curr_leaf_bytenr = leaf->start;
3044 
3045 		/* We have in implicit hole (NO_HOLES feature enabled). */
3046 		if (prev_extent_end < key.offset) {
3047 			const u64 hole_end = min(key.offset, range_end) - 1;
3048 
3049 			ret = fiemap_process_hole(inode, fieinfo, &cache,
3050 						  &delalloc_cached_state,
3051 						  backref_ctx, 0, 0, 0,
3052 						  prev_extent_end, hole_end);
3053 			if (ret < 0) {
3054 				goto out;
3055 			} else if (ret > 0) {
3056 				/* fiemap_fill_next_extent() told us to stop. */
3057 				stopped = true;
3058 				break;
3059 			}
3060 
3061 			/* We've reached the end of the fiemap range, stop. */
3062 			if (key.offset >= range_end) {
3063 				stopped = true;
3064 				break;
3065 			}
3066 		}
3067 
3068 		extent_len = extent_end - key.offset;
3069 		ei = btrfs_item_ptr(leaf, path->slots[0],
3070 				    struct btrfs_file_extent_item);
3071 		compression = btrfs_file_extent_compression(leaf, ei);
3072 		extent_type = btrfs_file_extent_type(leaf, ei);
3073 		extent_gen = btrfs_file_extent_generation(leaf, ei);
3074 
3075 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3076 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3077 			if (compression == BTRFS_COMPRESS_NONE)
3078 				extent_offset = btrfs_file_extent_offset(leaf, ei);
3079 		}
3080 
3081 		if (compression != BTRFS_COMPRESS_NONE)
3082 			flags |= FIEMAP_EXTENT_ENCODED;
3083 
3084 		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3085 			flags |= FIEMAP_EXTENT_DATA_INLINE;
3086 			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3087 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3088 						 extent_len, flags);
3089 		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3090 			ret = fiemap_process_hole(inode, fieinfo, &cache,
3091 						  &delalloc_cached_state,
3092 						  backref_ctx,
3093 						  disk_bytenr, extent_offset,
3094 						  extent_gen, key.offset,
3095 						  extent_end - 1);
3096 		} else if (disk_bytenr == 0) {
3097 			/* We have an explicit hole. */
3098 			ret = fiemap_process_hole(inode, fieinfo, &cache,
3099 						  &delalloc_cached_state,
3100 						  backref_ctx, 0, 0, 0,
3101 						  key.offset, extent_end - 1);
3102 		} else {
3103 			/* We have a regular extent. */
3104 			if (fieinfo->fi_extents_max) {
3105 				ret = btrfs_is_data_extent_shared(inode,
3106 								  disk_bytenr,
3107 								  extent_gen,
3108 								  backref_ctx);
3109 				if (ret < 0)
3110 					goto out;
3111 				else if (ret > 0)
3112 					flags |= FIEMAP_EXTENT_SHARED;
3113 			}
3114 
3115 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3116 						 disk_bytenr + extent_offset,
3117 						 extent_len, flags);
3118 		}
3119 
3120 		if (ret < 0) {
3121 			goto out;
3122 		} else if (ret > 0) {
3123 			/* fiemap_fill_next_extent() told us to stop. */
3124 			stopped = true;
3125 			break;
3126 		}
3127 
3128 		prev_extent_end = extent_end;
3129 next_item:
3130 		if (fatal_signal_pending(current)) {
3131 			ret = -EINTR;
3132 			goto out;
3133 		}
3134 
3135 		ret = fiemap_next_leaf_item(inode, path);
3136 		if (ret < 0) {
3137 			goto out;
3138 		} else if (ret > 0) {
3139 			/* No more file extent items for this inode. */
3140 			break;
3141 		}
3142 		cond_resched();
3143 	}
3144 
3145 check_eof_delalloc:
3146 	/*
3147 	 * Release (and free) the path before emitting any final entries to
3148 	 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3149 	 * once we find no more file extent items exist, we may have a
3150 	 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3151 	 * faults when copying data to the user space buffer.
3152 	 */
3153 	btrfs_free_path(path);
3154 	path = NULL;
3155 
3156 	if (!stopped && prev_extent_end < range_end) {
3157 		ret = fiemap_process_hole(inode, fieinfo, &cache,
3158 					  &delalloc_cached_state, backref_ctx,
3159 					  0, 0, 0, prev_extent_end, range_end - 1);
3160 		if (ret < 0)
3161 			goto out;
3162 		prev_extent_end = range_end;
3163 	}
3164 
3165 	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3166 		const u64 i_size = i_size_read(&inode->vfs_inode);
3167 
3168 		if (prev_extent_end < i_size) {
3169 			struct extent_state *cached_state = NULL;
3170 			u64 delalloc_start;
3171 			u64 delalloc_end;
3172 			u64 lockstart;
3173 			u64 lockend;
3174 			bool delalloc;
3175 
3176 			lockstart = round_down(prev_extent_end, sectorsize);
3177 			lockend = round_up(i_size, sectorsize);
3178 
3179 			/*
3180 			 * See the comment in fiemap_process_hole as to why
3181 			 * we're doing the locking here.
3182 			 */
3183 			lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3184 			delalloc = btrfs_find_delalloc_in_range(inode,
3185 								prev_extent_end,
3186 								i_size - 1,
3187 								&delalloc_cached_state,
3188 								&delalloc_start,
3189 								&delalloc_end);
3190 			unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3191 			if (!delalloc)
3192 				cache.flags |= FIEMAP_EXTENT_LAST;
3193 		} else {
3194 			cache.flags |= FIEMAP_EXTENT_LAST;
3195 		}
3196 	}
3197 
3198 	ret = emit_last_fiemap_cache(fieinfo, &cache);
3199 out:
3200 	free_extent_state(delalloc_cached_state);
3201 	btrfs_free_backref_share_ctx(backref_ctx);
3202 	btrfs_free_path(path);
3203 	return ret;
3204 }
3205 
3206 static void __free_extent_buffer(struct extent_buffer *eb)
3207 {
3208 	kmem_cache_free(extent_buffer_cache, eb);
3209 }
3210 
3211 static int extent_buffer_under_io(const struct extent_buffer *eb)
3212 {
3213 	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3214 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3215 }
3216 
3217 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3218 {
3219 	struct btrfs_subpage *subpage;
3220 
3221 	lockdep_assert_held(&folio->mapping->i_private_lock);
3222 
3223 	if (folio_test_private(folio)) {
3224 		subpage = folio_get_private(folio);
3225 		if (atomic_read(&subpage->eb_refs))
3226 			return true;
3227 		/*
3228 		 * Even there is no eb refs here, we may still have
3229 		 * end_page_read() call relying on page::private.
3230 		 */
3231 		if (atomic_read(&subpage->readers))
3232 			return true;
3233 	}
3234 	return false;
3235 }
3236 
3237 static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3238 {
3239 	struct btrfs_fs_info *fs_info = eb->fs_info;
3240 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3241 
3242 	/*
3243 	 * For mapped eb, we're going to change the folio private, which should
3244 	 * be done under the i_private_lock.
3245 	 */
3246 	if (mapped)
3247 		spin_lock(&folio->mapping->i_private_lock);
3248 
3249 	if (!folio_test_private(folio)) {
3250 		if (mapped)
3251 			spin_unlock(&folio->mapping->i_private_lock);
3252 		return;
3253 	}
3254 
3255 	if (fs_info->nodesize >= PAGE_SIZE) {
3256 		/*
3257 		 * We do this since we'll remove the pages after we've
3258 		 * removed the eb from the radix tree, so we could race
3259 		 * and have this page now attached to the new eb.  So
3260 		 * only clear folio if it's still connected to
3261 		 * this eb.
3262 		 */
3263 		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3264 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3265 			BUG_ON(folio_test_dirty(folio));
3266 			BUG_ON(folio_test_writeback(folio));
3267 			/* We need to make sure we haven't be attached to a new eb. */
3268 			folio_detach_private(folio);
3269 		}
3270 		if (mapped)
3271 			spin_unlock(&folio->mapping->i_private_lock);
3272 		return;
3273 	}
3274 
3275 	/*
3276 	 * For subpage, we can have dummy eb with folio private attached.  In
3277 	 * this case, we can directly detach the private as such folio is only
3278 	 * attached to one dummy eb, no sharing.
3279 	 */
3280 	if (!mapped) {
3281 		btrfs_detach_subpage(fs_info, folio);
3282 		return;
3283 	}
3284 
3285 	btrfs_folio_dec_eb_refs(fs_info, folio);
3286 
3287 	/*
3288 	 * We can only detach the folio private if there are no other ebs in the
3289 	 * page range and no unfinished IO.
3290 	 */
3291 	if (!folio_range_has_eb(fs_info, folio))
3292 		btrfs_detach_subpage(fs_info, folio);
3293 
3294 	spin_unlock(&folio->mapping->i_private_lock);
3295 }
3296 
3297 /* Release all pages attached to the extent buffer */
3298 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3299 {
3300 	ASSERT(!extent_buffer_under_io(eb));
3301 
3302 	for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3303 		struct folio *folio = eb->folios[i];
3304 
3305 		if (!folio)
3306 			continue;
3307 
3308 		detach_extent_buffer_folio(eb, folio);
3309 
3310 		/* One for when we allocated the folio. */
3311 		folio_put(folio);
3312 	}
3313 }
3314 
3315 /*
3316  * Helper for releasing the extent buffer.
3317  */
3318 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3319 {
3320 	btrfs_release_extent_buffer_pages(eb);
3321 	btrfs_leak_debug_del_eb(eb);
3322 	__free_extent_buffer(eb);
3323 }
3324 
3325 static struct extent_buffer *
3326 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3327 		      unsigned long len)
3328 {
3329 	struct extent_buffer *eb = NULL;
3330 
3331 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3332 	eb->start = start;
3333 	eb->len = len;
3334 	eb->fs_info = fs_info;
3335 	init_rwsem(&eb->lock);
3336 
3337 	btrfs_leak_debug_add_eb(eb);
3338 
3339 	spin_lock_init(&eb->refs_lock);
3340 	atomic_set(&eb->refs, 1);
3341 
3342 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3343 
3344 	return eb;
3345 }
3346 
3347 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3348 {
3349 	struct extent_buffer *new;
3350 	int num_folios = num_extent_folios(src);
3351 	int ret;
3352 
3353 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3354 	if (new == NULL)
3355 		return NULL;
3356 
3357 	/*
3358 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3359 	 * btrfs_release_extent_buffer() have different behavior for
3360 	 * UNMAPPED subpage extent buffer.
3361 	 */
3362 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3363 
3364 	ret = alloc_eb_folio_array(new, 0);
3365 	if (ret) {
3366 		btrfs_release_extent_buffer(new);
3367 		return NULL;
3368 	}
3369 
3370 	for (int i = 0; i < num_folios; i++) {
3371 		struct folio *folio = new->folios[i];
3372 		int ret;
3373 
3374 		ret = attach_extent_buffer_folio(new, folio, NULL);
3375 		if (ret < 0) {
3376 			btrfs_release_extent_buffer(new);
3377 			return NULL;
3378 		}
3379 		WARN_ON(folio_test_dirty(folio));
3380 	}
3381 	copy_extent_buffer_full(new, src);
3382 	set_extent_buffer_uptodate(new);
3383 
3384 	return new;
3385 }
3386 
3387 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3388 						  u64 start, unsigned long len)
3389 {
3390 	struct extent_buffer *eb;
3391 	int num_folios = 0;
3392 	int ret;
3393 
3394 	eb = __alloc_extent_buffer(fs_info, start, len);
3395 	if (!eb)
3396 		return NULL;
3397 
3398 	ret = alloc_eb_folio_array(eb, 0);
3399 	if (ret)
3400 		goto err;
3401 
3402 	num_folios = num_extent_folios(eb);
3403 	for (int i = 0; i < num_folios; i++) {
3404 		ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3405 		if (ret < 0)
3406 			goto err;
3407 	}
3408 
3409 	set_extent_buffer_uptodate(eb);
3410 	btrfs_set_header_nritems(eb, 0);
3411 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3412 
3413 	return eb;
3414 err:
3415 	for (int i = 0; i < num_folios; i++) {
3416 		if (eb->folios[i]) {
3417 			detach_extent_buffer_folio(eb, eb->folios[i]);
3418 			__folio_put(eb->folios[i]);
3419 		}
3420 	}
3421 	__free_extent_buffer(eb);
3422 	return NULL;
3423 }
3424 
3425 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3426 						u64 start)
3427 {
3428 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3429 }
3430 
3431 static void check_buffer_tree_ref(struct extent_buffer *eb)
3432 {
3433 	int refs;
3434 	/*
3435 	 * The TREE_REF bit is first set when the extent_buffer is added
3436 	 * to the radix tree. It is also reset, if unset, when a new reference
3437 	 * is created by find_extent_buffer.
3438 	 *
3439 	 * It is only cleared in two cases: freeing the last non-tree
3440 	 * reference to the extent_buffer when its STALE bit is set or
3441 	 * calling release_folio when the tree reference is the only reference.
3442 	 *
3443 	 * In both cases, care is taken to ensure that the extent_buffer's
3444 	 * pages are not under io. However, release_folio can be concurrently
3445 	 * called with creating new references, which is prone to race
3446 	 * conditions between the calls to check_buffer_tree_ref in those
3447 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3448 	 *
3449 	 * The actual lifetime of the extent_buffer in the radix tree is
3450 	 * adequately protected by the refcount, but the TREE_REF bit and
3451 	 * its corresponding reference are not. To protect against this
3452 	 * class of races, we call check_buffer_tree_ref from the codepaths
3453 	 * which trigger io. Note that once io is initiated, TREE_REF can no
3454 	 * longer be cleared, so that is the moment at which any such race is
3455 	 * best fixed.
3456 	 */
3457 	refs = atomic_read(&eb->refs);
3458 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3459 		return;
3460 
3461 	spin_lock(&eb->refs_lock);
3462 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3463 		atomic_inc(&eb->refs);
3464 	spin_unlock(&eb->refs_lock);
3465 }
3466 
3467 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3468 {
3469 	int num_folios= num_extent_folios(eb);
3470 
3471 	check_buffer_tree_ref(eb);
3472 
3473 	for (int i = 0; i < num_folios; i++)
3474 		folio_mark_accessed(eb->folios[i]);
3475 }
3476 
3477 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3478 					 u64 start)
3479 {
3480 	struct extent_buffer *eb;
3481 
3482 	eb = find_extent_buffer_nolock(fs_info, start);
3483 	if (!eb)
3484 		return NULL;
3485 	/*
3486 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3487 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3488 	 * another task running free_extent_buffer() might have seen that flag
3489 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3490 	 * writeback flags not set) and it's still in the tree (flag
3491 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3492 	 * decrementing the extent buffer's reference count twice.  So here we
3493 	 * could race and increment the eb's reference count, clear its stale
3494 	 * flag, mark it as dirty and drop our reference before the other task
3495 	 * finishes executing free_extent_buffer, which would later result in
3496 	 * an attempt to free an extent buffer that is dirty.
3497 	 */
3498 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3499 		spin_lock(&eb->refs_lock);
3500 		spin_unlock(&eb->refs_lock);
3501 	}
3502 	mark_extent_buffer_accessed(eb);
3503 	return eb;
3504 }
3505 
3506 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3507 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3508 					u64 start)
3509 {
3510 	struct extent_buffer *eb, *exists = NULL;
3511 	int ret;
3512 
3513 	eb = find_extent_buffer(fs_info, start);
3514 	if (eb)
3515 		return eb;
3516 	eb = alloc_dummy_extent_buffer(fs_info, start);
3517 	if (!eb)
3518 		return ERR_PTR(-ENOMEM);
3519 	eb->fs_info = fs_info;
3520 again:
3521 	ret = radix_tree_preload(GFP_NOFS);
3522 	if (ret) {
3523 		exists = ERR_PTR(ret);
3524 		goto free_eb;
3525 	}
3526 	spin_lock(&fs_info->buffer_lock);
3527 	ret = radix_tree_insert(&fs_info->buffer_radix,
3528 				start >> fs_info->sectorsize_bits, eb);
3529 	spin_unlock(&fs_info->buffer_lock);
3530 	radix_tree_preload_end();
3531 	if (ret == -EEXIST) {
3532 		exists = find_extent_buffer(fs_info, start);
3533 		if (exists)
3534 			goto free_eb;
3535 		else
3536 			goto again;
3537 	}
3538 	check_buffer_tree_ref(eb);
3539 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3540 
3541 	return eb;
3542 free_eb:
3543 	btrfs_release_extent_buffer(eb);
3544 	return exists;
3545 }
3546 #endif
3547 
3548 static struct extent_buffer *grab_extent_buffer(
3549 		struct btrfs_fs_info *fs_info, struct page *page)
3550 {
3551 	struct folio *folio = page_folio(page);
3552 	struct extent_buffer *exists;
3553 
3554 	/*
3555 	 * For subpage case, we completely rely on radix tree to ensure we
3556 	 * don't try to insert two ebs for the same bytenr.  So here we always
3557 	 * return NULL and just continue.
3558 	 */
3559 	if (fs_info->nodesize < PAGE_SIZE)
3560 		return NULL;
3561 
3562 	/* Page not yet attached to an extent buffer */
3563 	if (!folio_test_private(folio))
3564 		return NULL;
3565 
3566 	/*
3567 	 * We could have already allocated an eb for this page and attached one
3568 	 * so lets see if we can get a ref on the existing eb, and if we can we
3569 	 * know it's good and we can just return that one, else we know we can
3570 	 * just overwrite folio private.
3571 	 */
3572 	exists = folio_get_private(folio);
3573 	if (atomic_inc_not_zero(&exists->refs))
3574 		return exists;
3575 
3576 	WARN_ON(PageDirty(page));
3577 	folio_detach_private(folio);
3578 	return NULL;
3579 }
3580 
3581 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3582 {
3583 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3584 		btrfs_err(fs_info, "bad tree block start %llu", start);
3585 		return -EINVAL;
3586 	}
3587 
3588 	if (fs_info->nodesize < PAGE_SIZE &&
3589 	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3590 		btrfs_err(fs_info,
3591 		"tree block crosses page boundary, start %llu nodesize %u",
3592 			  start, fs_info->nodesize);
3593 		return -EINVAL;
3594 	}
3595 	if (fs_info->nodesize >= PAGE_SIZE &&
3596 	    !PAGE_ALIGNED(start)) {
3597 		btrfs_err(fs_info,
3598 		"tree block is not page aligned, start %llu nodesize %u",
3599 			  start, fs_info->nodesize);
3600 		return -EINVAL;
3601 	}
3602 	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3603 	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3604 		btrfs_warn(fs_info,
3605 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3606 			      start, fs_info->nodesize);
3607 	}
3608 	return 0;
3609 }
3610 
3611 
3612 /*
3613  * Return 0 if eb->folios[i] is attached to btree inode successfully.
3614  * Return >0 if there is already another extent buffer for the range,
3615  * and @found_eb_ret would be updated.
3616  * Return -EAGAIN if the filemap has an existing folio but with different size
3617  * than @eb.
3618  * The caller needs to free the existing folios and retry using the same order.
3619  */
3620 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3621 				      struct extent_buffer **found_eb_ret)
3622 {
3623 
3624 	struct btrfs_fs_info *fs_info = eb->fs_info;
3625 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3626 	const unsigned long index = eb->start >> PAGE_SHIFT;
3627 	struct folio *existing_folio;
3628 	int ret;
3629 
3630 	ASSERT(found_eb_ret);
3631 
3632 	/* Caller should ensure the folio exists. */
3633 	ASSERT(eb->folios[i]);
3634 
3635 retry:
3636 	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3637 				GFP_NOFS | __GFP_NOFAIL);
3638 	if (!ret)
3639 		return 0;
3640 
3641 	existing_folio = filemap_lock_folio(mapping, index + i);
3642 	/* The page cache only exists for a very short time, just retry. */
3643 	if (IS_ERR(existing_folio))
3644 		goto retry;
3645 
3646 	/* For now, we should only have single-page folios for btree inode. */
3647 	ASSERT(folio_nr_pages(existing_folio) == 1);
3648 
3649 	if (folio_size(existing_folio) != folio_size(eb->folios[0])) {
3650 		folio_unlock(existing_folio);
3651 		folio_put(existing_folio);
3652 		return -EAGAIN;
3653 	}
3654 
3655 	if (fs_info->nodesize < PAGE_SIZE) {
3656 		/*
3657 		 * We're going to reuse the existing page, can drop our page
3658 		 * and subpage structure now.
3659 		 */
3660 		__free_page(folio_page(eb->folios[i], 0));
3661 		eb->folios[i] = existing_folio;
3662 	} else {
3663 		struct extent_buffer *existing_eb;
3664 
3665 		existing_eb = grab_extent_buffer(fs_info,
3666 						 folio_page(existing_folio, 0));
3667 		if (existing_eb) {
3668 			/* The extent buffer still exists, we can use it directly. */
3669 			*found_eb_ret = existing_eb;
3670 			folio_unlock(existing_folio);
3671 			folio_put(existing_folio);
3672 			return 1;
3673 		}
3674 		/* The extent buffer no longer exists, we can reuse the folio. */
3675 		__free_page(folio_page(eb->folios[i], 0));
3676 		eb->folios[i] = existing_folio;
3677 	}
3678 	return 0;
3679 }
3680 
3681 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3682 					  u64 start, u64 owner_root, int level)
3683 {
3684 	unsigned long len = fs_info->nodesize;
3685 	int num_folios;
3686 	int attached = 0;
3687 	struct extent_buffer *eb;
3688 	struct extent_buffer *existing_eb = NULL;
3689 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3690 	struct btrfs_subpage *prealloc = NULL;
3691 	u64 lockdep_owner = owner_root;
3692 	bool page_contig = true;
3693 	int uptodate = 1;
3694 	int ret;
3695 
3696 	if (check_eb_alignment(fs_info, start))
3697 		return ERR_PTR(-EINVAL);
3698 
3699 #if BITS_PER_LONG == 32
3700 	if (start >= MAX_LFS_FILESIZE) {
3701 		btrfs_err_rl(fs_info,
3702 		"extent buffer %llu is beyond 32bit page cache limit", start);
3703 		btrfs_err_32bit_limit(fs_info);
3704 		return ERR_PTR(-EOVERFLOW);
3705 	}
3706 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3707 		btrfs_warn_32bit_limit(fs_info);
3708 #endif
3709 
3710 	eb = find_extent_buffer(fs_info, start);
3711 	if (eb)
3712 		return eb;
3713 
3714 	eb = __alloc_extent_buffer(fs_info, start, len);
3715 	if (!eb)
3716 		return ERR_PTR(-ENOMEM);
3717 
3718 	/*
3719 	 * The reloc trees are just snapshots, so we need them to appear to be
3720 	 * just like any other fs tree WRT lockdep.
3721 	 */
3722 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3723 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3724 
3725 	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3726 
3727 	/*
3728 	 * Preallocate folio private for subpage case, so that we won't
3729 	 * allocate memory with i_private_lock nor page lock hold.
3730 	 *
3731 	 * The memory will be freed by attach_extent_buffer_page() or freed
3732 	 * manually if we exit earlier.
3733 	 */
3734 	if (fs_info->nodesize < PAGE_SIZE) {
3735 		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3736 		if (IS_ERR(prealloc)) {
3737 			ret = PTR_ERR(prealloc);
3738 			goto out;
3739 		}
3740 	}
3741 
3742 reallocate:
3743 	/* Allocate all pages first. */
3744 	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3745 	if (ret < 0) {
3746 		btrfs_free_subpage(prealloc);
3747 		goto out;
3748 	}
3749 
3750 	num_folios = num_extent_folios(eb);
3751 	/* Attach all pages to the filemap. */
3752 	for (int i = 0; i < num_folios; i++) {
3753 		struct folio *folio;
3754 
3755 		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3756 		if (ret > 0) {
3757 			ASSERT(existing_eb);
3758 			goto out;
3759 		}
3760 
3761 		/*
3762 		 * TODO: Special handling for a corner case where the order of
3763 		 * folios mismatch between the new eb and filemap.
3764 		 *
3765 		 * This happens when:
3766 		 *
3767 		 * - the new eb is using higher order folio
3768 		 *
3769 		 * - the filemap is still using 0-order folios for the range
3770 		 *   This can happen at the previous eb allocation, and we don't
3771 		 *   have higher order folio for the call.
3772 		 *
3773 		 * - the existing eb has already been freed
3774 		 *
3775 		 * In this case, we have to free the existing folios first, and
3776 		 * re-allocate using the same order.
3777 		 * Thankfully this is not going to happen yet, as we're still
3778 		 * using 0-order folios.
3779 		 */
3780 		if (unlikely(ret == -EAGAIN)) {
3781 			ASSERT(0);
3782 			goto reallocate;
3783 		}
3784 		attached++;
3785 
3786 		/*
3787 		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3788 		 * reliable, as we may choose to reuse the existing page cache
3789 		 * and free the allocated page.
3790 		 */
3791 		folio = eb->folios[i];
3792 		spin_lock(&mapping->i_private_lock);
3793 		/* Should not fail, as we have preallocated the memory */
3794 		ret = attach_extent_buffer_folio(eb, folio, prealloc);
3795 		ASSERT(!ret);
3796 		/*
3797 		 * To inform we have extra eb under allocation, so that
3798 		 * detach_extent_buffer_page() won't release the folio private
3799 		 * when the eb hasn't yet been inserted into radix tree.
3800 		 *
3801 		 * The ref will be decreased when the eb released the page, in
3802 		 * detach_extent_buffer_page().
3803 		 * Thus needs no special handling in error path.
3804 		 */
3805 		btrfs_folio_inc_eb_refs(fs_info, folio);
3806 		spin_unlock(&mapping->i_private_lock);
3807 
3808 		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3809 
3810 		/*
3811 		 * Check if the current page is physically contiguous with previous eb
3812 		 * page.
3813 		 * At this stage, either we allocated a large folio, thus @i
3814 		 * would only be 0, or we fall back to per-page allocation.
3815 		 */
3816 		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3817 			page_contig = false;
3818 
3819 		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3820 			uptodate = 0;
3821 
3822 		/*
3823 		 * We can't unlock the pages just yet since the extent buffer
3824 		 * hasn't been properly inserted in the radix tree, this
3825 		 * opens a race with btree_release_folio which can free a page
3826 		 * while we are still filling in all pages for the buffer and
3827 		 * we could crash.
3828 		 */
3829 	}
3830 	if (uptodate)
3831 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3832 	/* All pages are physically contiguous, can skip cross page handling. */
3833 	if (page_contig)
3834 		eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3835 again:
3836 	ret = radix_tree_preload(GFP_NOFS);
3837 	if (ret)
3838 		goto out;
3839 
3840 	spin_lock(&fs_info->buffer_lock);
3841 	ret = radix_tree_insert(&fs_info->buffer_radix,
3842 				start >> fs_info->sectorsize_bits, eb);
3843 	spin_unlock(&fs_info->buffer_lock);
3844 	radix_tree_preload_end();
3845 	if (ret == -EEXIST) {
3846 		ret = 0;
3847 		existing_eb = find_extent_buffer(fs_info, start);
3848 		if (existing_eb)
3849 			goto out;
3850 		else
3851 			goto again;
3852 	}
3853 	/* add one reference for the tree */
3854 	check_buffer_tree_ref(eb);
3855 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3856 
3857 	/*
3858 	 * Now it's safe to unlock the pages because any calls to
3859 	 * btree_release_folio will correctly detect that a page belongs to a
3860 	 * live buffer and won't free them prematurely.
3861 	 */
3862 	for (int i = 0; i < num_folios; i++)
3863 		unlock_page(folio_page(eb->folios[i], 0));
3864 	return eb;
3865 
3866 out:
3867 	WARN_ON(!atomic_dec_and_test(&eb->refs));
3868 
3869 	/*
3870 	 * Any attached folios need to be detached before we unlock them.  This
3871 	 * is because when we're inserting our new folios into the mapping, and
3872 	 * then attaching our eb to that folio.  If we fail to insert our folio
3873 	 * we'll lookup the folio for that index, and grab that EB.  We do not
3874 	 * want that to grab this eb, as we're getting ready to free it.  So we
3875 	 * have to detach it first and then unlock it.
3876 	 *
3877 	 * We have to drop our reference and NULL it out here because in the
3878 	 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3879 	 * Below when we call btrfs_release_extent_buffer() we will call
3880 	 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3881 	 * case.  If we left eb->folios[i] populated in the subpage case we'd
3882 	 * double put our reference and be super sad.
3883 	 */
3884 	for (int i = 0; i < attached; i++) {
3885 		ASSERT(eb->folios[i]);
3886 		detach_extent_buffer_folio(eb, eb->folios[i]);
3887 		unlock_page(folio_page(eb->folios[i], 0));
3888 		folio_put(eb->folios[i]);
3889 		eb->folios[i] = NULL;
3890 	}
3891 	/*
3892 	 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3893 	 * so it can be cleaned up without utlizing page->mapping.
3894 	 */
3895 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3896 
3897 	btrfs_release_extent_buffer(eb);
3898 	if (ret < 0)
3899 		return ERR_PTR(ret);
3900 	ASSERT(existing_eb);
3901 	return existing_eb;
3902 }
3903 
3904 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3905 {
3906 	struct extent_buffer *eb =
3907 			container_of(head, struct extent_buffer, rcu_head);
3908 
3909 	__free_extent_buffer(eb);
3910 }
3911 
3912 static int release_extent_buffer(struct extent_buffer *eb)
3913 	__releases(&eb->refs_lock)
3914 {
3915 	lockdep_assert_held(&eb->refs_lock);
3916 
3917 	WARN_ON(atomic_read(&eb->refs) == 0);
3918 	if (atomic_dec_and_test(&eb->refs)) {
3919 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3920 			struct btrfs_fs_info *fs_info = eb->fs_info;
3921 
3922 			spin_unlock(&eb->refs_lock);
3923 
3924 			spin_lock(&fs_info->buffer_lock);
3925 			radix_tree_delete(&fs_info->buffer_radix,
3926 					  eb->start >> fs_info->sectorsize_bits);
3927 			spin_unlock(&fs_info->buffer_lock);
3928 		} else {
3929 			spin_unlock(&eb->refs_lock);
3930 		}
3931 
3932 		btrfs_leak_debug_del_eb(eb);
3933 		/* Should be safe to release our pages at this point */
3934 		btrfs_release_extent_buffer_pages(eb);
3935 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3936 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3937 			__free_extent_buffer(eb);
3938 			return 1;
3939 		}
3940 #endif
3941 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3942 		return 1;
3943 	}
3944 	spin_unlock(&eb->refs_lock);
3945 
3946 	return 0;
3947 }
3948 
3949 void free_extent_buffer(struct extent_buffer *eb)
3950 {
3951 	int refs;
3952 	if (!eb)
3953 		return;
3954 
3955 	refs = atomic_read(&eb->refs);
3956 	while (1) {
3957 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3958 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3959 			refs == 1))
3960 			break;
3961 		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3962 			return;
3963 	}
3964 
3965 	spin_lock(&eb->refs_lock);
3966 	if (atomic_read(&eb->refs) == 2 &&
3967 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3968 	    !extent_buffer_under_io(eb) &&
3969 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3970 		atomic_dec(&eb->refs);
3971 
3972 	/*
3973 	 * I know this is terrible, but it's temporary until we stop tracking
3974 	 * the uptodate bits and such for the extent buffers.
3975 	 */
3976 	release_extent_buffer(eb);
3977 }
3978 
3979 void free_extent_buffer_stale(struct extent_buffer *eb)
3980 {
3981 	if (!eb)
3982 		return;
3983 
3984 	spin_lock(&eb->refs_lock);
3985 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3986 
3987 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3988 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3989 		atomic_dec(&eb->refs);
3990 	release_extent_buffer(eb);
3991 }
3992 
3993 static void btree_clear_folio_dirty(struct folio *folio)
3994 {
3995 	ASSERT(folio_test_dirty(folio));
3996 	ASSERT(folio_test_locked(folio));
3997 	folio_clear_dirty_for_io(folio);
3998 	xa_lock_irq(&folio->mapping->i_pages);
3999 	if (!folio_test_dirty(folio))
4000 		__xa_clear_mark(&folio->mapping->i_pages,
4001 				folio_index(folio), PAGECACHE_TAG_DIRTY);
4002 	xa_unlock_irq(&folio->mapping->i_pages);
4003 }
4004 
4005 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4006 {
4007 	struct btrfs_fs_info *fs_info = eb->fs_info;
4008 	struct folio *folio = eb->folios[0];
4009 	bool last;
4010 
4011 	/* btree_clear_folio_dirty() needs page locked. */
4012 	folio_lock(folio);
4013 	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4014 	if (last)
4015 		btree_clear_folio_dirty(folio);
4016 	folio_unlock(folio);
4017 	WARN_ON(atomic_read(&eb->refs) == 0);
4018 }
4019 
4020 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4021 			      struct extent_buffer *eb)
4022 {
4023 	struct btrfs_fs_info *fs_info = eb->fs_info;
4024 	int num_folios;
4025 
4026 	btrfs_assert_tree_write_locked(eb);
4027 
4028 	if (trans && btrfs_header_generation(eb) != trans->transid)
4029 		return;
4030 
4031 	/*
4032 	 * Instead of clearing the dirty flag off of the buffer, mark it as
4033 	 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4034 	 * write-ordering in zoned mode, without the need to later re-dirty
4035 	 * the extent_buffer.
4036 	 *
4037 	 * The actual zeroout of the buffer will happen later in
4038 	 * btree_csum_one_bio.
4039 	 */
4040 	if (btrfs_is_zoned(fs_info)) {
4041 		set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4042 		return;
4043 	}
4044 
4045 	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4046 		return;
4047 
4048 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4049 				 fs_info->dirty_metadata_batch);
4050 
4051 	if (eb->fs_info->nodesize < PAGE_SIZE)
4052 		return clear_subpage_extent_buffer_dirty(eb);
4053 
4054 	num_folios = num_extent_folios(eb);
4055 	for (int i = 0; i < num_folios; i++) {
4056 		struct folio *folio = eb->folios[i];
4057 
4058 		if (!folio_test_dirty(folio))
4059 			continue;
4060 		folio_lock(folio);
4061 		btree_clear_folio_dirty(folio);
4062 		folio_unlock(folio);
4063 	}
4064 	WARN_ON(atomic_read(&eb->refs) == 0);
4065 }
4066 
4067 void set_extent_buffer_dirty(struct extent_buffer *eb)
4068 {
4069 	int num_folios;
4070 	bool was_dirty;
4071 
4072 	check_buffer_tree_ref(eb);
4073 
4074 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4075 
4076 	num_folios = num_extent_folios(eb);
4077 	WARN_ON(atomic_read(&eb->refs) == 0);
4078 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4079 
4080 	if (!was_dirty) {
4081 		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4082 
4083 		/*
4084 		 * For subpage case, we can have other extent buffers in the
4085 		 * same page, and in clear_subpage_extent_buffer_dirty() we
4086 		 * have to clear page dirty without subpage lock held.
4087 		 * This can cause race where our page gets dirty cleared after
4088 		 * we just set it.
4089 		 *
4090 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4091 		 * its page for other reasons, we can use page lock to prevent
4092 		 * the above race.
4093 		 */
4094 		if (subpage)
4095 			lock_page(folio_page(eb->folios[0], 0));
4096 		for (int i = 0; i < num_folios; i++)
4097 			btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4098 					      eb->start, eb->len);
4099 		if (subpage)
4100 			unlock_page(folio_page(eb->folios[0], 0));
4101 		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4102 					 eb->len,
4103 					 eb->fs_info->dirty_metadata_batch);
4104 	}
4105 #ifdef CONFIG_BTRFS_DEBUG
4106 	for (int i = 0; i < num_folios; i++)
4107 		ASSERT(folio_test_dirty(eb->folios[i]));
4108 #endif
4109 }
4110 
4111 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4112 {
4113 	struct btrfs_fs_info *fs_info = eb->fs_info;
4114 	int num_folios = num_extent_folios(eb);
4115 
4116 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4117 	for (int i = 0; i < num_folios; i++) {
4118 		struct folio *folio = eb->folios[i];
4119 
4120 		if (!folio)
4121 			continue;
4122 
4123 		/*
4124 		 * This is special handling for metadata subpage, as regular
4125 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4126 		 */
4127 		if (fs_info->nodesize >= PAGE_SIZE)
4128 			folio_clear_uptodate(folio);
4129 		else
4130 			btrfs_subpage_clear_uptodate(fs_info, folio,
4131 						     eb->start, eb->len);
4132 	}
4133 }
4134 
4135 void set_extent_buffer_uptodate(struct extent_buffer *eb)
4136 {
4137 	struct btrfs_fs_info *fs_info = eb->fs_info;
4138 	int num_folios = num_extent_folios(eb);
4139 
4140 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4141 	for (int i = 0; i < num_folios; i++) {
4142 		struct folio *folio = eb->folios[i];
4143 
4144 		/*
4145 		 * This is special handling for metadata subpage, as regular
4146 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4147 		 */
4148 		if (fs_info->nodesize >= PAGE_SIZE)
4149 			folio_mark_uptodate(folio);
4150 		else
4151 			btrfs_subpage_set_uptodate(fs_info, folio,
4152 						   eb->start, eb->len);
4153 	}
4154 }
4155 
4156 static void end_bbio_meta_read(struct btrfs_bio *bbio)
4157 {
4158 	struct extent_buffer *eb = bbio->private;
4159 	struct btrfs_fs_info *fs_info = eb->fs_info;
4160 	bool uptodate = !bbio->bio.bi_status;
4161 	struct folio_iter fi;
4162 	u32 bio_offset = 0;
4163 
4164 	eb->read_mirror = bbio->mirror_num;
4165 
4166 	if (uptodate &&
4167 	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4168 		uptodate = false;
4169 
4170 	if (uptodate) {
4171 		set_extent_buffer_uptodate(eb);
4172 	} else {
4173 		clear_extent_buffer_uptodate(eb);
4174 		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4175 	}
4176 
4177 	bio_for_each_folio_all(fi, &bbio->bio) {
4178 		struct folio *folio = fi.folio;
4179 		u64 start = eb->start + bio_offset;
4180 		u32 len = fi.length;
4181 
4182 		if (uptodate)
4183 			btrfs_folio_set_uptodate(fs_info, folio, start, len);
4184 		else
4185 			btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4186 
4187 		bio_offset += len;
4188 	}
4189 
4190 	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4191 	smp_mb__after_atomic();
4192 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4193 	free_extent_buffer(eb);
4194 
4195 	bio_put(&bbio->bio);
4196 }
4197 
4198 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4199 			     struct btrfs_tree_parent_check *check)
4200 {
4201 	struct btrfs_bio *bbio;
4202 	bool ret;
4203 
4204 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4205 		return 0;
4206 
4207 	/*
4208 	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4209 	 * operation, which could potentially still be in flight.  In this case
4210 	 * we simply want to return an error.
4211 	 */
4212 	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4213 		return -EIO;
4214 
4215 	/* Someone else is already reading the buffer, just wait for it. */
4216 	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4217 		goto done;
4218 
4219 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4220 	eb->read_mirror = 0;
4221 	check_buffer_tree_ref(eb);
4222 	atomic_inc(&eb->refs);
4223 
4224 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4225 			       REQ_OP_READ | REQ_META, eb->fs_info,
4226 			       end_bbio_meta_read, eb);
4227 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4228 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4229 	bbio->file_offset = eb->start;
4230 	memcpy(&bbio->parent_check, check, sizeof(*check));
4231 	if (eb->fs_info->nodesize < PAGE_SIZE) {
4232 		ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4233 				    eb->start - folio_pos(eb->folios[0]));
4234 		ASSERT(ret);
4235 	} else {
4236 		int num_folios = num_extent_folios(eb);
4237 
4238 		for (int i = 0; i < num_folios; i++) {
4239 			struct folio *folio = eb->folios[i];
4240 
4241 			ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
4242 			ASSERT(ret);
4243 		}
4244 	}
4245 	btrfs_submit_bio(bbio, mirror_num);
4246 
4247 done:
4248 	if (wait == WAIT_COMPLETE) {
4249 		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4250 		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4251 			return -EIO;
4252 	}
4253 
4254 	return 0;
4255 }
4256 
4257 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4258 			    unsigned long len)
4259 {
4260 	btrfs_warn(eb->fs_info,
4261 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
4262 		eb->start, eb->len, start, len);
4263 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4264 
4265 	return true;
4266 }
4267 
4268 /*
4269  * Check if the [start, start + len) range is valid before reading/writing
4270  * the eb.
4271  * NOTE: @start and @len are offset inside the eb, not logical address.
4272  *
4273  * Caller should not touch the dst/src memory if this function returns error.
4274  */
4275 static inline int check_eb_range(const struct extent_buffer *eb,
4276 				 unsigned long start, unsigned long len)
4277 {
4278 	unsigned long offset;
4279 
4280 	/* start, start + len should not go beyond eb->len nor overflow */
4281 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4282 		return report_eb_range(eb, start, len);
4283 
4284 	return false;
4285 }
4286 
4287 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4288 			unsigned long start, unsigned long len)
4289 {
4290 	const int unit_size = folio_size(eb->folios[0]);
4291 	size_t cur;
4292 	size_t offset;
4293 	char *dst = (char *)dstv;
4294 	unsigned long i = get_eb_folio_index(eb, start);
4295 
4296 	if (check_eb_range(eb, start, len)) {
4297 		/*
4298 		 * Invalid range hit, reset the memory, so callers won't get
4299 		 * some random garbage for their uninitialized memory.
4300 		 */
4301 		memset(dstv, 0, len);
4302 		return;
4303 	}
4304 
4305 	if (eb->addr) {
4306 		memcpy(dstv, eb->addr + start, len);
4307 		return;
4308 	}
4309 
4310 	offset = get_eb_offset_in_folio(eb, start);
4311 
4312 	while (len > 0) {
4313 		char *kaddr;
4314 
4315 		cur = min(len, unit_size - offset);
4316 		kaddr = folio_address(eb->folios[i]);
4317 		memcpy(dst, kaddr + offset, cur);
4318 
4319 		dst += cur;
4320 		len -= cur;
4321 		offset = 0;
4322 		i++;
4323 	}
4324 }
4325 
4326 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4327 				       void __user *dstv,
4328 				       unsigned long start, unsigned long len)
4329 {
4330 	const int unit_size = folio_size(eb->folios[0]);
4331 	size_t cur;
4332 	size_t offset;
4333 	char __user *dst = (char __user *)dstv;
4334 	unsigned long i = get_eb_folio_index(eb, start);
4335 	int ret = 0;
4336 
4337 	WARN_ON(start > eb->len);
4338 	WARN_ON(start + len > eb->start + eb->len);
4339 
4340 	if (eb->addr) {
4341 		if (copy_to_user_nofault(dstv, eb->addr + start, len))
4342 			ret = -EFAULT;
4343 		return ret;
4344 	}
4345 
4346 	offset = get_eb_offset_in_folio(eb, start);
4347 
4348 	while (len > 0) {
4349 		char *kaddr;
4350 
4351 		cur = min(len, unit_size - offset);
4352 		kaddr = folio_address(eb->folios[i]);
4353 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4354 			ret = -EFAULT;
4355 			break;
4356 		}
4357 
4358 		dst += cur;
4359 		len -= cur;
4360 		offset = 0;
4361 		i++;
4362 	}
4363 
4364 	return ret;
4365 }
4366 
4367 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4368 			 unsigned long start, unsigned long len)
4369 {
4370 	const int unit_size = folio_size(eb->folios[0]);
4371 	size_t cur;
4372 	size_t offset;
4373 	char *kaddr;
4374 	char *ptr = (char *)ptrv;
4375 	unsigned long i = get_eb_folio_index(eb, start);
4376 	int ret = 0;
4377 
4378 	if (check_eb_range(eb, start, len))
4379 		return -EINVAL;
4380 
4381 	if (eb->addr)
4382 		return memcmp(ptrv, eb->addr + start, len);
4383 
4384 	offset = get_eb_offset_in_folio(eb, start);
4385 
4386 	while (len > 0) {
4387 		cur = min(len, unit_size - offset);
4388 		kaddr = folio_address(eb->folios[i]);
4389 		ret = memcmp(ptr, kaddr + offset, cur);
4390 		if (ret)
4391 			break;
4392 
4393 		ptr += cur;
4394 		len -= cur;
4395 		offset = 0;
4396 		i++;
4397 	}
4398 	return ret;
4399 }
4400 
4401 /*
4402  * Check that the extent buffer is uptodate.
4403  *
4404  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4405  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4406  */
4407 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4408 {
4409 	struct btrfs_fs_info *fs_info = eb->fs_info;
4410 	struct folio *folio = eb->folios[i];
4411 
4412 	ASSERT(folio);
4413 
4414 	/*
4415 	 * If we are using the commit root we could potentially clear a page
4416 	 * Uptodate while we're using the extent buffer that we've previously
4417 	 * looked up.  We don't want to complain in this case, as the page was
4418 	 * valid before, we just didn't write it out.  Instead we want to catch
4419 	 * the case where we didn't actually read the block properly, which
4420 	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4421 	 */
4422 	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4423 		return;
4424 
4425 	if (fs_info->nodesize < PAGE_SIZE) {
4426 		struct folio *folio = eb->folios[0];
4427 
4428 		ASSERT(i == 0);
4429 		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4430 							 eb->start, eb->len)))
4431 			btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4432 	} else {
4433 		WARN_ON(!folio_test_uptodate(folio));
4434 	}
4435 }
4436 
4437 static void __write_extent_buffer(const struct extent_buffer *eb,
4438 				  const void *srcv, unsigned long start,
4439 				  unsigned long len, bool use_memmove)
4440 {
4441 	const int unit_size = folio_size(eb->folios[0]);
4442 	size_t cur;
4443 	size_t offset;
4444 	char *kaddr;
4445 	char *src = (char *)srcv;
4446 	unsigned long i = get_eb_folio_index(eb, start);
4447 	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4448 	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4449 
4450 	if (check_eb_range(eb, start, len))
4451 		return;
4452 
4453 	if (eb->addr) {
4454 		if (use_memmove)
4455 			memmove(eb->addr + start, srcv, len);
4456 		else
4457 			memcpy(eb->addr + start, srcv, len);
4458 		return;
4459 	}
4460 
4461 	offset = get_eb_offset_in_folio(eb, start);
4462 
4463 	while (len > 0) {
4464 		if (check_uptodate)
4465 			assert_eb_folio_uptodate(eb, i);
4466 
4467 		cur = min(len, unit_size - offset);
4468 		kaddr = folio_address(eb->folios[i]);
4469 		if (use_memmove)
4470 			memmove(kaddr + offset, src, cur);
4471 		else
4472 			memcpy(kaddr + offset, src, cur);
4473 
4474 		src += cur;
4475 		len -= cur;
4476 		offset = 0;
4477 		i++;
4478 	}
4479 }
4480 
4481 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4482 			 unsigned long start, unsigned long len)
4483 {
4484 	return __write_extent_buffer(eb, srcv, start, len, false);
4485 }
4486 
4487 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4488 				 unsigned long start, unsigned long len)
4489 {
4490 	const int unit_size = folio_size(eb->folios[0]);
4491 	unsigned long cur = start;
4492 
4493 	if (eb->addr) {
4494 		memset(eb->addr + start, c, len);
4495 		return;
4496 	}
4497 
4498 	while (cur < start + len) {
4499 		unsigned long index = get_eb_folio_index(eb, cur);
4500 		unsigned int offset = get_eb_offset_in_folio(eb, cur);
4501 		unsigned int cur_len = min(start + len - cur, unit_size - offset);
4502 
4503 		assert_eb_folio_uptodate(eb, index);
4504 		memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4505 
4506 		cur += cur_len;
4507 	}
4508 }
4509 
4510 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4511 			   unsigned long len)
4512 {
4513 	if (check_eb_range(eb, start, len))
4514 		return;
4515 	return memset_extent_buffer(eb, 0, start, len);
4516 }
4517 
4518 void copy_extent_buffer_full(const struct extent_buffer *dst,
4519 			     const struct extent_buffer *src)
4520 {
4521 	const int unit_size = folio_size(src->folios[0]);
4522 	unsigned long cur = 0;
4523 
4524 	ASSERT(dst->len == src->len);
4525 
4526 	while (cur < src->len) {
4527 		unsigned long index = get_eb_folio_index(src, cur);
4528 		unsigned long offset = get_eb_offset_in_folio(src, cur);
4529 		unsigned long cur_len = min(src->len, unit_size - offset);
4530 		void *addr = folio_address(src->folios[index]) + offset;
4531 
4532 		write_extent_buffer(dst, addr, cur, cur_len);
4533 
4534 		cur += cur_len;
4535 	}
4536 }
4537 
4538 void copy_extent_buffer(const struct extent_buffer *dst,
4539 			const struct extent_buffer *src,
4540 			unsigned long dst_offset, unsigned long src_offset,
4541 			unsigned long len)
4542 {
4543 	const int unit_size = folio_size(dst->folios[0]);
4544 	u64 dst_len = dst->len;
4545 	size_t cur;
4546 	size_t offset;
4547 	char *kaddr;
4548 	unsigned long i = get_eb_folio_index(dst, dst_offset);
4549 
4550 	if (check_eb_range(dst, dst_offset, len) ||
4551 	    check_eb_range(src, src_offset, len))
4552 		return;
4553 
4554 	WARN_ON(src->len != dst_len);
4555 
4556 	offset = get_eb_offset_in_folio(dst, dst_offset);
4557 
4558 	while (len > 0) {
4559 		assert_eb_folio_uptodate(dst, i);
4560 
4561 		cur = min(len, (unsigned long)(unit_size - offset));
4562 
4563 		kaddr = folio_address(dst->folios[i]);
4564 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4565 
4566 		src_offset += cur;
4567 		len -= cur;
4568 		offset = 0;
4569 		i++;
4570 	}
4571 }
4572 
4573 /*
4574  * Calculate the folio and offset of the byte containing the given bit number.
4575  *
4576  * @eb:           the extent buffer
4577  * @start:        offset of the bitmap item in the extent buffer
4578  * @nr:           bit number
4579  * @folio_index:  return index of the folio in the extent buffer that contains
4580  *                the given bit number
4581  * @folio_offset: return offset into the folio given by folio_index
4582  *
4583  * This helper hides the ugliness of finding the byte in an extent buffer which
4584  * contains a given bit.
4585  */
4586 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4587 				    unsigned long start, unsigned long nr,
4588 				    unsigned long *folio_index,
4589 				    size_t *folio_offset)
4590 {
4591 	size_t byte_offset = BIT_BYTE(nr);
4592 	size_t offset;
4593 
4594 	/*
4595 	 * The byte we want is the offset of the extent buffer + the offset of
4596 	 * the bitmap item in the extent buffer + the offset of the byte in the
4597 	 * bitmap item.
4598 	 */
4599 	offset = start + offset_in_folio(eb->folios[0], eb->start) + byte_offset;
4600 
4601 	*folio_index = offset >> folio_shift(eb->folios[0]);
4602 	*folio_offset = offset_in_folio(eb->folios[0], offset);
4603 }
4604 
4605 /*
4606  * Determine whether a bit in a bitmap item is set.
4607  *
4608  * @eb:     the extent buffer
4609  * @start:  offset of the bitmap item in the extent buffer
4610  * @nr:     bit number to test
4611  */
4612 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4613 			   unsigned long nr)
4614 {
4615 	unsigned long i;
4616 	size_t offset;
4617 	u8 *kaddr;
4618 
4619 	eb_bitmap_offset(eb, start, nr, &i, &offset);
4620 	assert_eb_folio_uptodate(eb, i);
4621 	kaddr = folio_address(eb->folios[i]);
4622 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4623 }
4624 
4625 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4626 {
4627 	unsigned long index = get_eb_folio_index(eb, bytenr);
4628 
4629 	if (check_eb_range(eb, bytenr, 1))
4630 		return NULL;
4631 	return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4632 }
4633 
4634 /*
4635  * Set an area of a bitmap to 1.
4636  *
4637  * @eb:     the extent buffer
4638  * @start:  offset of the bitmap item in the extent buffer
4639  * @pos:    bit number of the first bit
4640  * @len:    number of bits to set
4641  */
4642 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4643 			      unsigned long pos, unsigned long len)
4644 {
4645 	unsigned int first_byte = start + BIT_BYTE(pos);
4646 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4647 	const bool same_byte = (first_byte == last_byte);
4648 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4649 	u8 *kaddr;
4650 
4651 	if (same_byte)
4652 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4653 
4654 	/* Handle the first byte. */
4655 	kaddr = extent_buffer_get_byte(eb, first_byte);
4656 	*kaddr |= mask;
4657 	if (same_byte)
4658 		return;
4659 
4660 	/* Handle the byte aligned part. */
4661 	ASSERT(first_byte + 1 <= last_byte);
4662 	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4663 
4664 	/* Handle the last byte. */
4665 	kaddr = extent_buffer_get_byte(eb, last_byte);
4666 	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4667 }
4668 
4669 
4670 /*
4671  * Clear an area of a bitmap.
4672  *
4673  * @eb:     the extent buffer
4674  * @start:  offset of the bitmap item in the extent buffer
4675  * @pos:    bit number of the first bit
4676  * @len:    number of bits to clear
4677  */
4678 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4679 				unsigned long start, unsigned long pos,
4680 				unsigned long len)
4681 {
4682 	unsigned int first_byte = start + BIT_BYTE(pos);
4683 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4684 	const bool same_byte = (first_byte == last_byte);
4685 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4686 	u8 *kaddr;
4687 
4688 	if (same_byte)
4689 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4690 
4691 	/* Handle the first byte. */
4692 	kaddr = extent_buffer_get_byte(eb, first_byte);
4693 	*kaddr &= ~mask;
4694 	if (same_byte)
4695 		return;
4696 
4697 	/* Handle the byte aligned part. */
4698 	ASSERT(first_byte + 1 <= last_byte);
4699 	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4700 
4701 	/* Handle the last byte. */
4702 	kaddr = extent_buffer_get_byte(eb, last_byte);
4703 	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4704 }
4705 
4706 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4707 {
4708 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4709 	return distance < len;
4710 }
4711 
4712 void memcpy_extent_buffer(const struct extent_buffer *dst,
4713 			  unsigned long dst_offset, unsigned long src_offset,
4714 			  unsigned long len)
4715 {
4716 	const int unit_size = folio_size(dst->folios[0]);
4717 	unsigned long cur_off = 0;
4718 
4719 	if (check_eb_range(dst, dst_offset, len) ||
4720 	    check_eb_range(dst, src_offset, len))
4721 		return;
4722 
4723 	if (dst->addr) {
4724 		const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4725 
4726 		if (use_memmove)
4727 			memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4728 		else
4729 			memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4730 		return;
4731 	}
4732 
4733 	while (cur_off < len) {
4734 		unsigned long cur_src = cur_off + src_offset;
4735 		unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4736 		unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4737 		unsigned long cur_len = min(src_offset + len - cur_src,
4738 					    unit_size - folio_off);
4739 		void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4740 		const bool use_memmove = areas_overlap(src_offset + cur_off,
4741 						       dst_offset + cur_off, cur_len);
4742 
4743 		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4744 				      use_memmove);
4745 		cur_off += cur_len;
4746 	}
4747 }
4748 
4749 void memmove_extent_buffer(const struct extent_buffer *dst,
4750 			   unsigned long dst_offset, unsigned long src_offset,
4751 			   unsigned long len)
4752 {
4753 	unsigned long dst_end = dst_offset + len - 1;
4754 	unsigned long src_end = src_offset + len - 1;
4755 
4756 	if (check_eb_range(dst, dst_offset, len) ||
4757 	    check_eb_range(dst, src_offset, len))
4758 		return;
4759 
4760 	if (dst_offset < src_offset) {
4761 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4762 		return;
4763 	}
4764 
4765 	if (dst->addr) {
4766 		memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4767 		return;
4768 	}
4769 
4770 	while (len > 0) {
4771 		unsigned long src_i;
4772 		size_t cur;
4773 		size_t dst_off_in_folio;
4774 		size_t src_off_in_folio;
4775 		void *src_addr;
4776 		bool use_memmove;
4777 
4778 		src_i = get_eb_folio_index(dst, src_end);
4779 
4780 		dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4781 		src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4782 
4783 		cur = min_t(unsigned long, len, src_off_in_folio + 1);
4784 		cur = min(cur, dst_off_in_folio + 1);
4785 
4786 		src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4787 					 cur + 1;
4788 		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4789 					    cur);
4790 
4791 		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4792 				      use_memmove);
4793 
4794 		dst_end -= cur;
4795 		src_end -= cur;
4796 		len -= cur;
4797 	}
4798 }
4799 
4800 #define GANG_LOOKUP_SIZE	16
4801 static struct extent_buffer *get_next_extent_buffer(
4802 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4803 {
4804 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4805 	struct extent_buffer *found = NULL;
4806 	u64 page_start = page_offset(page);
4807 	u64 cur = page_start;
4808 
4809 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4810 	lockdep_assert_held(&fs_info->buffer_lock);
4811 
4812 	while (cur < page_start + PAGE_SIZE) {
4813 		int ret;
4814 		int i;
4815 
4816 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4817 				(void **)gang, cur >> fs_info->sectorsize_bits,
4818 				min_t(unsigned int, GANG_LOOKUP_SIZE,
4819 				      PAGE_SIZE / fs_info->nodesize));
4820 		if (ret == 0)
4821 			goto out;
4822 		for (i = 0; i < ret; i++) {
4823 			/* Already beyond page end */
4824 			if (gang[i]->start >= page_start + PAGE_SIZE)
4825 				goto out;
4826 			/* Found one */
4827 			if (gang[i]->start >= bytenr) {
4828 				found = gang[i];
4829 				goto out;
4830 			}
4831 		}
4832 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4833 	}
4834 out:
4835 	return found;
4836 }
4837 
4838 static int try_release_subpage_extent_buffer(struct page *page)
4839 {
4840 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4841 	u64 cur = page_offset(page);
4842 	const u64 end = page_offset(page) + PAGE_SIZE;
4843 	int ret;
4844 
4845 	while (cur < end) {
4846 		struct extent_buffer *eb = NULL;
4847 
4848 		/*
4849 		 * Unlike try_release_extent_buffer() which uses folio private
4850 		 * to grab buffer, for subpage case we rely on radix tree, thus
4851 		 * we need to ensure radix tree consistency.
4852 		 *
4853 		 * We also want an atomic snapshot of the radix tree, thus go
4854 		 * with spinlock rather than RCU.
4855 		 */
4856 		spin_lock(&fs_info->buffer_lock);
4857 		eb = get_next_extent_buffer(fs_info, page, cur);
4858 		if (!eb) {
4859 			/* No more eb in the page range after or at cur */
4860 			spin_unlock(&fs_info->buffer_lock);
4861 			break;
4862 		}
4863 		cur = eb->start + eb->len;
4864 
4865 		/*
4866 		 * The same as try_release_extent_buffer(), to ensure the eb
4867 		 * won't disappear out from under us.
4868 		 */
4869 		spin_lock(&eb->refs_lock);
4870 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4871 			spin_unlock(&eb->refs_lock);
4872 			spin_unlock(&fs_info->buffer_lock);
4873 			break;
4874 		}
4875 		spin_unlock(&fs_info->buffer_lock);
4876 
4877 		/*
4878 		 * If tree ref isn't set then we know the ref on this eb is a
4879 		 * real ref, so just return, this eb will likely be freed soon
4880 		 * anyway.
4881 		 */
4882 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4883 			spin_unlock(&eb->refs_lock);
4884 			break;
4885 		}
4886 
4887 		/*
4888 		 * Here we don't care about the return value, we will always
4889 		 * check the folio private at the end.  And
4890 		 * release_extent_buffer() will release the refs_lock.
4891 		 */
4892 		release_extent_buffer(eb);
4893 	}
4894 	/*
4895 	 * Finally to check if we have cleared folio private, as if we have
4896 	 * released all ebs in the page, the folio private should be cleared now.
4897 	 */
4898 	spin_lock(&page->mapping->i_private_lock);
4899 	if (!folio_test_private(page_folio(page)))
4900 		ret = 1;
4901 	else
4902 		ret = 0;
4903 	spin_unlock(&page->mapping->i_private_lock);
4904 	return ret;
4905 
4906 }
4907 
4908 int try_release_extent_buffer(struct page *page)
4909 {
4910 	struct folio *folio = page_folio(page);
4911 	struct extent_buffer *eb;
4912 
4913 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4914 		return try_release_subpage_extent_buffer(page);
4915 
4916 	/*
4917 	 * We need to make sure nobody is changing folio private, as we rely on
4918 	 * folio private as the pointer to extent buffer.
4919 	 */
4920 	spin_lock(&page->mapping->i_private_lock);
4921 	if (!folio_test_private(folio)) {
4922 		spin_unlock(&page->mapping->i_private_lock);
4923 		return 1;
4924 	}
4925 
4926 	eb = folio_get_private(folio);
4927 	BUG_ON(!eb);
4928 
4929 	/*
4930 	 * This is a little awful but should be ok, we need to make sure that
4931 	 * the eb doesn't disappear out from under us while we're looking at
4932 	 * this page.
4933 	 */
4934 	spin_lock(&eb->refs_lock);
4935 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4936 		spin_unlock(&eb->refs_lock);
4937 		spin_unlock(&page->mapping->i_private_lock);
4938 		return 0;
4939 	}
4940 	spin_unlock(&page->mapping->i_private_lock);
4941 
4942 	/*
4943 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4944 	 * so just return, this page will likely be freed soon anyway.
4945 	 */
4946 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4947 		spin_unlock(&eb->refs_lock);
4948 		return 0;
4949 	}
4950 
4951 	return release_extent_buffer(eb);
4952 }
4953 
4954 /*
4955  * Attempt to readahead a child block.
4956  *
4957  * @fs_info:	the fs_info
4958  * @bytenr:	bytenr to read
4959  * @owner_root: objectid of the root that owns this eb
4960  * @gen:	generation for the uptodate check, can be 0
4961  * @level:	level for the eb
4962  *
4963  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4964  * normal uptodate check of the eb, without checking the generation.  If we have
4965  * to read the block we will not block on anything.
4966  */
4967 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4968 				u64 bytenr, u64 owner_root, u64 gen, int level)
4969 {
4970 	struct btrfs_tree_parent_check check = {
4971 		.has_first_key = 0,
4972 		.level = level,
4973 		.transid = gen
4974 	};
4975 	struct extent_buffer *eb;
4976 	int ret;
4977 
4978 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4979 	if (IS_ERR(eb))
4980 		return;
4981 
4982 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4983 		free_extent_buffer(eb);
4984 		return;
4985 	}
4986 
4987 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4988 	if (ret < 0)
4989 		free_extent_buffer_stale(eb);
4990 	else
4991 		free_extent_buffer(eb);
4992 }
4993 
4994 /*
4995  * Readahead a node's child block.
4996  *
4997  * @node:	parent node we're reading from
4998  * @slot:	slot in the parent node for the child we want to read
4999  *
5000  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5001  * the slot in the node provided.
5002  */
5003 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5004 {
5005 	btrfs_readahead_tree_block(node->fs_info,
5006 				   btrfs_node_blockptr(node, slot),
5007 				   btrfs_header_owner(node),
5008 				   btrfs_node_ptr_generation(node, slot),
5009 				   btrfs_header_level(node) - 1);
5010 }
5011