xref: /linux/fs/btrfs/extent_io.c (revision 5755be5f15d9e651ed433e5dbf7b7b968efb3064)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "misc.h"
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
21 #include "ctree.h"
22 #include "btrfs_inode.h"
23 #include "bio.h"
24 #include "locking.h"
25 #include "rcu-string.h"
26 #include "backref.h"
27 #include "disk-io.h"
28 #include "subpage.h"
29 #include "zoned.h"
30 #include "block-group.h"
31 #include "compression.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "file-item.h"
35 #include "file.h"
36 #include "dev-replace.h"
37 #include "super.h"
38 #include "transaction.h"
39 
40 static struct kmem_cache *extent_buffer_cache;
41 
42 #ifdef CONFIG_BTRFS_DEBUG
43 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
44 {
45 	struct btrfs_fs_info *fs_info = eb->fs_info;
46 	unsigned long flags;
47 
48 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
49 	list_add(&eb->leak_list, &fs_info->allocated_ebs);
50 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
51 }
52 
53 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
54 {
55 	struct btrfs_fs_info *fs_info = eb->fs_info;
56 	unsigned long flags;
57 
58 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
59 	list_del(&eb->leak_list);
60 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
61 }
62 
63 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
64 {
65 	struct extent_buffer *eb;
66 	unsigned long flags;
67 
68 	/*
69 	 * If we didn't get into open_ctree our allocated_ebs will not be
70 	 * initialized, so just skip this.
71 	 */
72 	if (!fs_info->allocated_ebs.next)
73 		return;
74 
75 	WARN_ON(!list_empty(&fs_info->allocated_ebs));
76 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
77 	while (!list_empty(&fs_info->allocated_ebs)) {
78 		eb = list_first_entry(&fs_info->allocated_ebs,
79 				      struct extent_buffer, leak_list);
80 		pr_err(
81 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
82 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
83 		       btrfs_header_owner(eb));
84 		list_del(&eb->leak_list);
85 		kmem_cache_free(extent_buffer_cache, eb);
86 	}
87 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
88 }
89 #else
90 #define btrfs_leak_debug_add_eb(eb)			do {} while (0)
91 #define btrfs_leak_debug_del_eb(eb)			do {} while (0)
92 #endif
93 
94 /*
95  * Structure to record info about the bio being assembled, and other info like
96  * how many bytes are there before stripe/ordered extent boundary.
97  */
98 struct btrfs_bio_ctrl {
99 	struct btrfs_bio *bbio;
100 	enum btrfs_compression_type compress_type;
101 	u32 len_to_oe_boundary;
102 	blk_opf_t opf;
103 	btrfs_bio_end_io_t end_io_func;
104 	struct writeback_control *wbc;
105 };
106 
107 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
108 {
109 	struct btrfs_bio *bbio = bio_ctrl->bbio;
110 
111 	if (!bbio)
112 		return;
113 
114 	/* Caller should ensure the bio has at least some range added */
115 	ASSERT(bbio->bio.bi_iter.bi_size);
116 
117 	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
118 	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
119 		btrfs_submit_compressed_read(bbio);
120 	else
121 		btrfs_submit_bio(bbio, 0);
122 
123 	/* The bbio is owned by the end_io handler now */
124 	bio_ctrl->bbio = NULL;
125 }
126 
127 /*
128  * Submit or fail the current bio in the bio_ctrl structure.
129  */
130 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
131 {
132 	struct btrfs_bio *bbio = bio_ctrl->bbio;
133 
134 	if (!bbio)
135 		return;
136 
137 	if (ret) {
138 		ASSERT(ret < 0);
139 		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
140 		/* The bio is owned by the end_io handler now */
141 		bio_ctrl->bbio = NULL;
142 	} else {
143 		submit_one_bio(bio_ctrl);
144 	}
145 }
146 
147 int __init extent_buffer_init_cachep(void)
148 {
149 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
150 			sizeof(struct extent_buffer), 0,
151 			SLAB_MEM_SPREAD, NULL);
152 	if (!extent_buffer_cache)
153 		return -ENOMEM;
154 
155 	return 0;
156 }
157 
158 void __cold extent_buffer_free_cachep(void)
159 {
160 	/*
161 	 * Make sure all delayed rcu free are flushed before we
162 	 * destroy caches.
163 	 */
164 	rcu_barrier();
165 	kmem_cache_destroy(extent_buffer_cache);
166 }
167 
168 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
169 {
170 	unsigned long index = start >> PAGE_SHIFT;
171 	unsigned long end_index = end >> PAGE_SHIFT;
172 	struct page *page;
173 
174 	while (index <= end_index) {
175 		page = find_get_page(inode->i_mapping, index);
176 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
177 		clear_page_dirty_for_io(page);
178 		put_page(page);
179 		index++;
180 	}
181 }
182 
183 static void process_one_page(struct btrfs_fs_info *fs_info,
184 			     struct page *page, struct page *locked_page,
185 			     unsigned long page_ops, u64 start, u64 end)
186 {
187 	u32 len;
188 
189 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
190 	len = end + 1 - start;
191 
192 	if (page_ops & PAGE_SET_ORDERED)
193 		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
194 	if (page_ops & PAGE_START_WRITEBACK) {
195 		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
196 		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
197 	}
198 	if (page_ops & PAGE_END_WRITEBACK)
199 		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
200 
201 	if (page != locked_page && (page_ops & PAGE_UNLOCK))
202 		btrfs_page_end_writer_lock(fs_info, page, start, len);
203 }
204 
205 static void __process_pages_contig(struct address_space *mapping,
206 				   struct page *locked_page, u64 start, u64 end,
207 				   unsigned long page_ops)
208 {
209 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
210 	pgoff_t start_index = start >> PAGE_SHIFT;
211 	pgoff_t end_index = end >> PAGE_SHIFT;
212 	pgoff_t index = start_index;
213 	struct folio_batch fbatch;
214 	int i;
215 
216 	folio_batch_init(&fbatch);
217 	while (index <= end_index) {
218 		int found_folios;
219 
220 		found_folios = filemap_get_folios_contig(mapping, &index,
221 				end_index, &fbatch);
222 		for (i = 0; i < found_folios; i++) {
223 			struct folio *folio = fbatch.folios[i];
224 
225 			process_one_page(fs_info, &folio->page, locked_page,
226 					 page_ops, start, end);
227 		}
228 		folio_batch_release(&fbatch);
229 		cond_resched();
230 	}
231 }
232 
233 static noinline void __unlock_for_delalloc(struct inode *inode,
234 					   struct page *locked_page,
235 					   u64 start, u64 end)
236 {
237 	unsigned long index = start >> PAGE_SHIFT;
238 	unsigned long end_index = end >> PAGE_SHIFT;
239 
240 	ASSERT(locked_page);
241 	if (index == locked_page->index && end_index == index)
242 		return;
243 
244 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
245 			       PAGE_UNLOCK);
246 }
247 
248 static noinline int lock_delalloc_pages(struct inode *inode,
249 					struct page *locked_page,
250 					u64 start,
251 					u64 end)
252 {
253 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
254 	struct address_space *mapping = inode->i_mapping;
255 	pgoff_t start_index = start >> PAGE_SHIFT;
256 	pgoff_t end_index = end >> PAGE_SHIFT;
257 	pgoff_t index = start_index;
258 	u64 processed_end = start;
259 	struct folio_batch fbatch;
260 
261 	if (index == locked_page->index && index == end_index)
262 		return 0;
263 
264 	folio_batch_init(&fbatch);
265 	while (index <= end_index) {
266 		unsigned int found_folios, i;
267 
268 		found_folios = filemap_get_folios_contig(mapping, &index,
269 				end_index, &fbatch);
270 		if (found_folios == 0)
271 			goto out;
272 
273 		for (i = 0; i < found_folios; i++) {
274 			struct page *page = &fbatch.folios[i]->page;
275 			u32 len = end + 1 - start;
276 
277 			if (page == locked_page)
278 				continue;
279 
280 			if (btrfs_page_start_writer_lock(fs_info, page, start,
281 							 len))
282 				goto out;
283 
284 			if (!PageDirty(page) || page->mapping != mapping) {
285 				btrfs_page_end_writer_lock(fs_info, page, start,
286 							   len);
287 				goto out;
288 			}
289 
290 			processed_end = page_offset(page) + PAGE_SIZE - 1;
291 		}
292 		folio_batch_release(&fbatch);
293 		cond_resched();
294 	}
295 
296 	return 0;
297 out:
298 	folio_batch_release(&fbatch);
299 	if (processed_end > start)
300 		__unlock_for_delalloc(inode, locked_page, start, processed_end);
301 	return -EAGAIN;
302 }
303 
304 /*
305  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
306  * more than @max_bytes.
307  *
308  * @start:	The original start bytenr to search.
309  *		Will store the extent range start bytenr.
310  * @end:	The original end bytenr of the search range
311  *		Will store the extent range end bytenr.
312  *
313  * Return true if we find a delalloc range which starts inside the original
314  * range, and @start/@end will store the delalloc range start/end.
315  *
316  * Return false if we can't find any delalloc range which starts inside the
317  * original range, and @start/@end will be the non-delalloc range start/end.
318  */
319 EXPORT_FOR_TESTS
320 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
321 				    struct page *locked_page, u64 *start,
322 				    u64 *end)
323 {
324 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
325 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
326 	const u64 orig_start = *start;
327 	const u64 orig_end = *end;
328 	/* The sanity tests may not set a valid fs_info. */
329 	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
330 	u64 delalloc_start;
331 	u64 delalloc_end;
332 	bool found;
333 	struct extent_state *cached_state = NULL;
334 	int ret;
335 	int loops = 0;
336 
337 	/* Caller should pass a valid @end to indicate the search range end */
338 	ASSERT(orig_end > orig_start);
339 
340 	/* The range should at least cover part of the page */
341 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
342 		 orig_end <= page_offset(locked_page)));
343 again:
344 	/* step one, find a bunch of delalloc bytes starting at start */
345 	delalloc_start = *start;
346 	delalloc_end = 0;
347 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
348 					  max_bytes, &cached_state);
349 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
350 		*start = delalloc_start;
351 
352 		/* @delalloc_end can be -1, never go beyond @orig_end */
353 		*end = min(delalloc_end, orig_end);
354 		free_extent_state(cached_state);
355 		return false;
356 	}
357 
358 	/*
359 	 * start comes from the offset of locked_page.  We have to lock
360 	 * pages in order, so we can't process delalloc bytes before
361 	 * locked_page
362 	 */
363 	if (delalloc_start < *start)
364 		delalloc_start = *start;
365 
366 	/*
367 	 * make sure to limit the number of pages we try to lock down
368 	 */
369 	if (delalloc_end + 1 - delalloc_start > max_bytes)
370 		delalloc_end = delalloc_start + max_bytes - 1;
371 
372 	/* step two, lock all the pages after the page that has start */
373 	ret = lock_delalloc_pages(inode, locked_page,
374 				  delalloc_start, delalloc_end);
375 	ASSERT(!ret || ret == -EAGAIN);
376 	if (ret == -EAGAIN) {
377 		/* some of the pages are gone, lets avoid looping by
378 		 * shortening the size of the delalloc range we're searching
379 		 */
380 		free_extent_state(cached_state);
381 		cached_state = NULL;
382 		if (!loops) {
383 			max_bytes = PAGE_SIZE;
384 			loops = 1;
385 			goto again;
386 		} else {
387 			found = false;
388 			goto out_failed;
389 		}
390 	}
391 
392 	/* step three, lock the state bits for the whole range */
393 	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
394 
395 	/* then test to make sure it is all still delalloc */
396 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
397 			     EXTENT_DELALLOC, cached_state);
398 	if (!ret) {
399 		unlock_extent(tree, delalloc_start, delalloc_end,
400 			      &cached_state);
401 		__unlock_for_delalloc(inode, locked_page,
402 			      delalloc_start, delalloc_end);
403 		cond_resched();
404 		goto again;
405 	}
406 	free_extent_state(cached_state);
407 	*start = delalloc_start;
408 	*end = delalloc_end;
409 out_failed:
410 	return found;
411 }
412 
413 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
414 				  struct page *locked_page,
415 				  u32 clear_bits, unsigned long page_ops)
416 {
417 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
418 
419 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
420 			       start, end, page_ops);
421 }
422 
423 static bool btrfs_verify_page(struct page *page, u64 start)
424 {
425 	if (!fsverity_active(page->mapping->host) ||
426 	    PageUptodate(page) ||
427 	    start >= i_size_read(page->mapping->host))
428 		return true;
429 	return fsverity_verify_page(page);
430 }
431 
432 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
433 {
434 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
435 
436 	ASSERT(page_offset(page) <= start &&
437 	       start + len <= page_offset(page) + PAGE_SIZE);
438 
439 	if (uptodate && btrfs_verify_page(page, start))
440 		btrfs_page_set_uptodate(fs_info, page, start, len);
441 	else
442 		btrfs_page_clear_uptodate(fs_info, page, start, len);
443 
444 	if (!btrfs_is_subpage(fs_info, page))
445 		unlock_page(page);
446 	else
447 		btrfs_subpage_end_reader(fs_info, page, start, len);
448 }
449 
450 /*
451  * after a writepage IO is done, we need to:
452  * clear the uptodate bits on error
453  * clear the writeback bits in the extent tree for this IO
454  * end_page_writeback if the page has no more pending IO
455  *
456  * Scheduling is not allowed, so the extent state tree is expected
457  * to have one and only one object corresponding to this IO.
458  */
459 static void end_bio_extent_writepage(struct btrfs_bio *bbio)
460 {
461 	struct bio *bio = &bbio->bio;
462 	int error = blk_status_to_errno(bio->bi_status);
463 	struct bio_vec *bvec;
464 	struct bvec_iter_all iter_all;
465 
466 	ASSERT(!bio_flagged(bio, BIO_CLONED));
467 	bio_for_each_segment_all(bvec, bio, iter_all) {
468 		struct page *page = bvec->bv_page;
469 		struct inode *inode = page->mapping->host;
470 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
471 		const u32 sectorsize = fs_info->sectorsize;
472 		u64 start = page_offset(page) + bvec->bv_offset;
473 		u32 len = bvec->bv_len;
474 
475 		/* Our read/write should always be sector aligned. */
476 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
477 			btrfs_err(fs_info,
478 		"partial page write in btrfs with offset %u and length %u",
479 				  bvec->bv_offset, bvec->bv_len);
480 		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
481 			btrfs_info(fs_info,
482 		"incomplete page write with offset %u and length %u",
483 				   bvec->bv_offset, bvec->bv_len);
484 
485 		btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
486 		if (error)
487 			mapping_set_error(page->mapping, error);
488 		btrfs_page_clear_writeback(fs_info, page, start, len);
489 	}
490 
491 	bio_put(bio);
492 }
493 
494 /*
495  * Record previously processed extent range
496  *
497  * For endio_readpage_release_extent() to handle a full extent range, reducing
498  * the extent io operations.
499  */
500 struct processed_extent {
501 	struct btrfs_inode *inode;
502 	/* Start of the range in @inode */
503 	u64 start;
504 	/* End of the range in @inode */
505 	u64 end;
506 	bool uptodate;
507 };
508 
509 /*
510  * Try to release processed extent range
511  *
512  * May not release the extent range right now if the current range is
513  * contiguous to processed extent.
514  *
515  * Will release processed extent when any of @inode, @uptodate, the range is
516  * no longer contiguous to the processed range.
517  *
518  * Passing @inode == NULL will force processed extent to be released.
519  */
520 static void endio_readpage_release_extent(struct processed_extent *processed,
521 			      struct btrfs_inode *inode, u64 start, u64 end,
522 			      bool uptodate)
523 {
524 	struct extent_state *cached = NULL;
525 	struct extent_io_tree *tree;
526 
527 	/* The first extent, initialize @processed */
528 	if (!processed->inode)
529 		goto update;
530 
531 	/*
532 	 * Contiguous to processed extent, just uptodate the end.
533 	 *
534 	 * Several things to notice:
535 	 *
536 	 * - bio can be merged as long as on-disk bytenr is contiguous
537 	 *   This means we can have page belonging to other inodes, thus need to
538 	 *   check if the inode still matches.
539 	 * - bvec can contain range beyond current page for multi-page bvec
540 	 *   Thus we need to do processed->end + 1 >= start check
541 	 */
542 	if (processed->inode == inode && processed->uptodate == uptodate &&
543 	    processed->end + 1 >= start && end >= processed->end) {
544 		processed->end = end;
545 		return;
546 	}
547 
548 	tree = &processed->inode->io_tree;
549 	/*
550 	 * Now we don't have range contiguous to the processed range, release
551 	 * the processed range now.
552 	 */
553 	unlock_extent(tree, processed->start, processed->end, &cached);
554 
555 update:
556 	/* Update processed to current range */
557 	processed->inode = inode;
558 	processed->start = start;
559 	processed->end = end;
560 	processed->uptodate = uptodate;
561 }
562 
563 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
564 {
565 	ASSERT(PageLocked(page));
566 	if (!btrfs_is_subpage(fs_info, page))
567 		return;
568 
569 	ASSERT(PagePrivate(page));
570 	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
571 }
572 
573 /*
574  * after a readpage IO is done, we need to:
575  * clear the uptodate bits on error
576  * set the uptodate bits if things worked
577  * set the page up to date if all extents in the tree are uptodate
578  * clear the lock bit in the extent tree
579  * unlock the page if there are no other extents locked for it
580  *
581  * Scheduling is not allowed, so the extent state tree is expected
582  * to have one and only one object corresponding to this IO.
583  */
584 static void end_bio_extent_readpage(struct btrfs_bio *bbio)
585 {
586 	struct bio *bio = &bbio->bio;
587 	struct bio_vec *bvec;
588 	struct processed_extent processed = { 0 };
589 	/*
590 	 * The offset to the beginning of a bio, since one bio can never be
591 	 * larger than UINT_MAX, u32 here is enough.
592 	 */
593 	u32 bio_offset = 0;
594 	struct bvec_iter_all iter_all;
595 
596 	ASSERT(!bio_flagged(bio, BIO_CLONED));
597 	bio_for_each_segment_all(bvec, bio, iter_all) {
598 		bool uptodate = !bio->bi_status;
599 		struct page *page = bvec->bv_page;
600 		struct inode *inode = page->mapping->host;
601 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
602 		const u32 sectorsize = fs_info->sectorsize;
603 		u64 start;
604 		u64 end;
605 		u32 len;
606 
607 		btrfs_debug(fs_info,
608 			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
609 			bio->bi_iter.bi_sector, bio->bi_status,
610 			bbio->mirror_num);
611 
612 		/*
613 		 * We always issue full-sector reads, but if some block in a
614 		 * page fails to read, blk_update_request() will advance
615 		 * bv_offset and adjust bv_len to compensate.  Print a warning
616 		 * for unaligned offsets, and an error if they don't add up to
617 		 * a full sector.
618 		 */
619 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
620 			btrfs_err(fs_info,
621 		"partial page read in btrfs with offset %u and length %u",
622 				  bvec->bv_offset, bvec->bv_len);
623 		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
624 				     sectorsize))
625 			btrfs_info(fs_info,
626 		"incomplete page read with offset %u and length %u",
627 				   bvec->bv_offset, bvec->bv_len);
628 
629 		start = page_offset(page) + bvec->bv_offset;
630 		end = start + bvec->bv_len - 1;
631 		len = bvec->bv_len;
632 
633 		if (likely(uptodate)) {
634 			loff_t i_size = i_size_read(inode);
635 			pgoff_t end_index = i_size >> PAGE_SHIFT;
636 
637 			/*
638 			 * Zero out the remaining part if this range straddles
639 			 * i_size.
640 			 *
641 			 * Here we should only zero the range inside the bvec,
642 			 * not touch anything else.
643 			 *
644 			 * NOTE: i_size is exclusive while end is inclusive.
645 			 */
646 			if (page->index == end_index && i_size <= end) {
647 				u32 zero_start = max(offset_in_page(i_size),
648 						     offset_in_page(start));
649 
650 				zero_user_segment(page, zero_start,
651 						  offset_in_page(end) + 1);
652 			}
653 		}
654 
655 		/* Update page status and unlock. */
656 		end_page_read(page, uptodate, start, len);
657 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
658 					      start, end, uptodate);
659 
660 		ASSERT(bio_offset + len > bio_offset);
661 		bio_offset += len;
662 
663 	}
664 	/* Release the last extent */
665 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
666 	bio_put(bio);
667 }
668 
669 /*
670  * Populate every free slot in a provided array with pages.
671  *
672  * @nr_pages:   number of pages to allocate
673  * @page_array: the array to fill with pages; any existing non-null entries in
674  * 		the array will be skipped
675  *
676  * Return: 0        if all pages were able to be allocated;
677  *         -ENOMEM  otherwise, the partially allocated pages would be freed and
678  *                  the array slots zeroed
679  */
680 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
681 {
682 	unsigned int allocated;
683 
684 	for (allocated = 0; allocated < nr_pages;) {
685 		unsigned int last = allocated;
686 
687 		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
688 
689 		if (allocated == nr_pages)
690 			return 0;
691 
692 		/*
693 		 * During this iteration, no page could be allocated, even
694 		 * though alloc_pages_bulk_array() falls back to alloc_page()
695 		 * if  it could not bulk-allocate. So we must be out of memory.
696 		 */
697 		if (allocated == last) {
698 			for (int i = 0; i < allocated; i++) {
699 				__free_page(page_array[i]);
700 				page_array[i] = NULL;
701 			}
702 			return -ENOMEM;
703 		}
704 
705 		memalloc_retry_wait(GFP_NOFS);
706 	}
707 	return 0;
708 }
709 
710 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
711 				struct page *page, u64 disk_bytenr,
712 				unsigned int pg_offset)
713 {
714 	struct bio *bio = &bio_ctrl->bbio->bio;
715 	struct bio_vec *bvec = bio_last_bvec_all(bio);
716 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
717 
718 	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
719 		/*
720 		 * For compression, all IO should have its logical bytenr set
721 		 * to the starting bytenr of the compressed extent.
722 		 */
723 		return bio->bi_iter.bi_sector == sector;
724 	}
725 
726 	/*
727 	 * The contig check requires the following conditions to be met:
728 	 *
729 	 * 1) The pages are belonging to the same inode
730 	 *    This is implied by the call chain.
731 	 *
732 	 * 2) The range has adjacent logical bytenr
733 	 *
734 	 * 3) The range has adjacent file offset
735 	 *    This is required for the usage of btrfs_bio->file_offset.
736 	 */
737 	return bio_end_sector(bio) == sector &&
738 		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
739 		page_offset(page) + pg_offset;
740 }
741 
742 static void alloc_new_bio(struct btrfs_inode *inode,
743 			  struct btrfs_bio_ctrl *bio_ctrl,
744 			  u64 disk_bytenr, u64 file_offset)
745 {
746 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
747 	struct btrfs_bio *bbio;
748 
749 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
750 			       bio_ctrl->end_io_func, NULL);
751 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
752 	bbio->inode = inode;
753 	bbio->file_offset = file_offset;
754 	bio_ctrl->bbio = bbio;
755 	bio_ctrl->len_to_oe_boundary = U32_MAX;
756 
757 	/* Limit data write bios to the ordered boundary. */
758 	if (bio_ctrl->wbc) {
759 		struct btrfs_ordered_extent *ordered;
760 
761 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
762 		if (ordered) {
763 			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
764 					ordered->file_offset +
765 					ordered->disk_num_bytes - file_offset);
766 			bbio->ordered = ordered;
767 		}
768 
769 		/*
770 		 * Pick the last added device to support cgroup writeback.  For
771 		 * multi-device file systems this means blk-cgroup policies have
772 		 * to always be set on the last added/replaced device.
773 		 * This is a bit odd but has been like that for a long time.
774 		 */
775 		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
776 		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
777 	}
778 }
779 
780 /*
781  * @disk_bytenr: logical bytenr where the write will be
782  * @page:	page to add to the bio
783  * @size:	portion of page that we want to write to
784  * @pg_offset:	offset of the new bio or to check whether we are adding
785  *              a contiguous page to the previous one
786  *
787  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
788  * new one in @bio_ctrl->bbio.
789  * The mirror number for this IO should already be initizlied in
790  * @bio_ctrl->mirror_num.
791  */
792 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
793 			       u64 disk_bytenr, struct page *page,
794 			       size_t size, unsigned long pg_offset)
795 {
796 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
797 
798 	ASSERT(pg_offset + size <= PAGE_SIZE);
799 	ASSERT(bio_ctrl->end_io_func);
800 
801 	if (bio_ctrl->bbio &&
802 	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
803 		submit_one_bio(bio_ctrl);
804 
805 	do {
806 		u32 len = size;
807 
808 		/* Allocate new bio if needed */
809 		if (!bio_ctrl->bbio) {
810 			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
811 				      page_offset(page) + pg_offset);
812 		}
813 
814 		/* Cap to the current ordered extent boundary if there is one. */
815 		if (len > bio_ctrl->len_to_oe_boundary) {
816 			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
817 			ASSERT(is_data_inode(&inode->vfs_inode));
818 			len = bio_ctrl->len_to_oe_boundary;
819 		}
820 
821 		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
822 			/* bio full: move on to a new one */
823 			submit_one_bio(bio_ctrl);
824 			continue;
825 		}
826 
827 		if (bio_ctrl->wbc)
828 			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
829 
830 		size -= len;
831 		pg_offset += len;
832 		disk_bytenr += len;
833 
834 		/*
835 		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
836 		 * sector aligned.  alloc_new_bio() then sets it to the end of
837 		 * our ordered extent for writes into zoned devices.
838 		 *
839 		 * When len_to_oe_boundary is tracking an ordered extent, we
840 		 * trust the ordered extent code to align things properly, and
841 		 * the check above to cap our write to the ordered extent
842 		 * boundary is correct.
843 		 *
844 		 * When len_to_oe_boundary is U32_MAX, the cap above would
845 		 * result in a 4095 byte IO for the last page right before
846 		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
847 		 * the checks required to make sure we don't overflow the bio,
848 		 * and we should just ignore len_to_oe_boundary completely
849 		 * unless we're using it to track an ordered extent.
850 		 *
851 		 * It's pretty hard to make a bio sized U32_MAX, but it can
852 		 * happen when the page cache is able to feed us contiguous
853 		 * pages for large extents.
854 		 */
855 		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
856 			bio_ctrl->len_to_oe_boundary -= len;
857 
858 		/* Ordered extent boundary: move on to a new bio. */
859 		if (bio_ctrl->len_to_oe_boundary == 0)
860 			submit_one_bio(bio_ctrl);
861 	} while (size);
862 }
863 
864 static int attach_extent_buffer_page(struct extent_buffer *eb,
865 				     struct page *page,
866 				     struct btrfs_subpage *prealloc)
867 {
868 	struct btrfs_fs_info *fs_info = eb->fs_info;
869 	int ret = 0;
870 
871 	/*
872 	 * If the page is mapped to btree inode, we should hold the private
873 	 * lock to prevent race.
874 	 * For cloned or dummy extent buffers, their pages are not mapped and
875 	 * will not race with any other ebs.
876 	 */
877 	if (page->mapping)
878 		lockdep_assert_held(&page->mapping->private_lock);
879 
880 	if (fs_info->nodesize >= PAGE_SIZE) {
881 		if (!PagePrivate(page))
882 			attach_page_private(page, eb);
883 		else
884 			WARN_ON(page->private != (unsigned long)eb);
885 		return 0;
886 	}
887 
888 	/* Already mapped, just free prealloc */
889 	if (PagePrivate(page)) {
890 		btrfs_free_subpage(prealloc);
891 		return 0;
892 	}
893 
894 	if (prealloc)
895 		/* Has preallocated memory for subpage */
896 		attach_page_private(page, prealloc);
897 	else
898 		/* Do new allocation to attach subpage */
899 		ret = btrfs_attach_subpage(fs_info, page,
900 					   BTRFS_SUBPAGE_METADATA);
901 	return ret;
902 }
903 
904 int set_page_extent_mapped(struct page *page)
905 {
906 	struct btrfs_fs_info *fs_info;
907 
908 	ASSERT(page->mapping);
909 
910 	if (PagePrivate(page))
911 		return 0;
912 
913 	fs_info = btrfs_sb(page->mapping->host->i_sb);
914 
915 	if (btrfs_is_subpage(fs_info, page))
916 		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
917 
918 	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
919 	return 0;
920 }
921 
922 void clear_page_extent_mapped(struct page *page)
923 {
924 	struct btrfs_fs_info *fs_info;
925 
926 	ASSERT(page->mapping);
927 
928 	if (!PagePrivate(page))
929 		return;
930 
931 	fs_info = btrfs_sb(page->mapping->host->i_sb);
932 	if (btrfs_is_subpage(fs_info, page))
933 		return btrfs_detach_subpage(fs_info, page);
934 
935 	detach_page_private(page);
936 }
937 
938 static struct extent_map *
939 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
940 		 u64 start, u64 len, struct extent_map **em_cached)
941 {
942 	struct extent_map *em;
943 
944 	if (em_cached && *em_cached) {
945 		em = *em_cached;
946 		if (extent_map_in_tree(em) && start >= em->start &&
947 		    start < extent_map_end(em)) {
948 			refcount_inc(&em->refs);
949 			return em;
950 		}
951 
952 		free_extent_map(em);
953 		*em_cached = NULL;
954 	}
955 
956 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
957 	if (em_cached && !IS_ERR(em)) {
958 		BUG_ON(*em_cached);
959 		refcount_inc(&em->refs);
960 		*em_cached = em;
961 	}
962 	return em;
963 }
964 /*
965  * basic readpage implementation.  Locked extent state structs are inserted
966  * into the tree that are removed when the IO is done (by the end_io
967  * handlers)
968  * XXX JDM: This needs looking at to ensure proper page locking
969  * return 0 on success, otherwise return error
970  */
971 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
972 		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
973 {
974 	struct inode *inode = page->mapping->host;
975 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
976 	u64 start = page_offset(page);
977 	const u64 end = start + PAGE_SIZE - 1;
978 	u64 cur = start;
979 	u64 extent_offset;
980 	u64 last_byte = i_size_read(inode);
981 	u64 block_start;
982 	struct extent_map *em;
983 	int ret = 0;
984 	size_t pg_offset = 0;
985 	size_t iosize;
986 	size_t blocksize = inode->i_sb->s_blocksize;
987 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
988 
989 	ret = set_page_extent_mapped(page);
990 	if (ret < 0) {
991 		unlock_extent(tree, start, end, NULL);
992 		unlock_page(page);
993 		return ret;
994 	}
995 
996 	if (page->index == last_byte >> PAGE_SHIFT) {
997 		size_t zero_offset = offset_in_page(last_byte);
998 
999 		if (zero_offset) {
1000 			iosize = PAGE_SIZE - zero_offset;
1001 			memzero_page(page, zero_offset, iosize);
1002 		}
1003 	}
1004 	bio_ctrl->end_io_func = end_bio_extent_readpage;
1005 	begin_page_read(fs_info, page);
1006 	while (cur <= end) {
1007 		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1008 		bool force_bio_submit = false;
1009 		u64 disk_bytenr;
1010 
1011 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1012 		if (cur >= last_byte) {
1013 			iosize = PAGE_SIZE - pg_offset;
1014 			memzero_page(page, pg_offset, iosize);
1015 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1016 			end_page_read(page, true, cur, iosize);
1017 			break;
1018 		}
1019 		em = __get_extent_map(inode, page, pg_offset, cur,
1020 				      end - cur + 1, em_cached);
1021 		if (IS_ERR(em)) {
1022 			unlock_extent(tree, cur, end, NULL);
1023 			end_page_read(page, false, cur, end + 1 - cur);
1024 			return PTR_ERR(em);
1025 		}
1026 		extent_offset = cur - em->start;
1027 		BUG_ON(extent_map_end(em) <= cur);
1028 		BUG_ON(end < cur);
1029 
1030 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1031 			compress_type = em->compress_type;
1032 
1033 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1034 		iosize = ALIGN(iosize, blocksize);
1035 		if (compress_type != BTRFS_COMPRESS_NONE)
1036 			disk_bytenr = em->block_start;
1037 		else
1038 			disk_bytenr = em->block_start + extent_offset;
1039 		block_start = em->block_start;
1040 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1041 			block_start = EXTENT_MAP_HOLE;
1042 
1043 		/*
1044 		 * If we have a file range that points to a compressed extent
1045 		 * and it's followed by a consecutive file range that points
1046 		 * to the same compressed extent (possibly with a different
1047 		 * offset and/or length, so it either points to the whole extent
1048 		 * or only part of it), we must make sure we do not submit a
1049 		 * single bio to populate the pages for the 2 ranges because
1050 		 * this makes the compressed extent read zero out the pages
1051 		 * belonging to the 2nd range. Imagine the following scenario:
1052 		 *
1053 		 *  File layout
1054 		 *  [0 - 8K]                     [8K - 24K]
1055 		 *    |                               |
1056 		 *    |                               |
1057 		 * points to extent X,         points to extent X,
1058 		 * offset 4K, length of 8K     offset 0, length 16K
1059 		 *
1060 		 * [extent X, compressed length = 4K uncompressed length = 16K]
1061 		 *
1062 		 * If the bio to read the compressed extent covers both ranges,
1063 		 * it will decompress extent X into the pages belonging to the
1064 		 * first range and then it will stop, zeroing out the remaining
1065 		 * pages that belong to the other range that points to extent X.
1066 		 * So here we make sure we submit 2 bios, one for the first
1067 		 * range and another one for the third range. Both will target
1068 		 * the same physical extent from disk, but we can't currently
1069 		 * make the compressed bio endio callback populate the pages
1070 		 * for both ranges because each compressed bio is tightly
1071 		 * coupled with a single extent map, and each range can have
1072 		 * an extent map with a different offset value relative to the
1073 		 * uncompressed data of our extent and different lengths. This
1074 		 * is a corner case so we prioritize correctness over
1075 		 * non-optimal behavior (submitting 2 bios for the same extent).
1076 		 */
1077 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
1078 		    prev_em_start && *prev_em_start != (u64)-1 &&
1079 		    *prev_em_start != em->start)
1080 			force_bio_submit = true;
1081 
1082 		if (prev_em_start)
1083 			*prev_em_start = em->start;
1084 
1085 		free_extent_map(em);
1086 		em = NULL;
1087 
1088 		/* we've found a hole, just zero and go on */
1089 		if (block_start == EXTENT_MAP_HOLE) {
1090 			memzero_page(page, pg_offset, iosize);
1091 
1092 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1093 			end_page_read(page, true, cur, iosize);
1094 			cur = cur + iosize;
1095 			pg_offset += iosize;
1096 			continue;
1097 		}
1098 		/* the get_extent function already copied into the page */
1099 		if (block_start == EXTENT_MAP_INLINE) {
1100 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1101 			end_page_read(page, true, cur, iosize);
1102 			cur = cur + iosize;
1103 			pg_offset += iosize;
1104 			continue;
1105 		}
1106 
1107 		if (bio_ctrl->compress_type != compress_type) {
1108 			submit_one_bio(bio_ctrl);
1109 			bio_ctrl->compress_type = compress_type;
1110 		}
1111 
1112 		if (force_bio_submit)
1113 			submit_one_bio(bio_ctrl);
1114 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1115 				   pg_offset);
1116 		cur = cur + iosize;
1117 		pg_offset += iosize;
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 int btrfs_read_folio(struct file *file, struct folio *folio)
1124 {
1125 	struct page *page = &folio->page;
1126 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1127 	u64 start = page_offset(page);
1128 	u64 end = start + PAGE_SIZE - 1;
1129 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1130 	int ret;
1131 
1132 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1133 
1134 	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1135 	/*
1136 	 * If btrfs_do_readpage() failed we will want to submit the assembled
1137 	 * bio to do the cleanup.
1138 	 */
1139 	submit_one_bio(&bio_ctrl);
1140 	return ret;
1141 }
1142 
1143 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1144 					u64 start, u64 end,
1145 					struct extent_map **em_cached,
1146 					struct btrfs_bio_ctrl *bio_ctrl,
1147 					u64 *prev_em_start)
1148 {
1149 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1150 	int index;
1151 
1152 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1153 
1154 	for (index = 0; index < nr_pages; index++) {
1155 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1156 				  prev_em_start);
1157 		put_page(pages[index]);
1158 	}
1159 }
1160 
1161 /*
1162  * helper for __extent_writepage, doing all of the delayed allocation setup.
1163  *
1164  * This returns 1 if btrfs_run_delalloc_range function did all the work required
1165  * to write the page (copy into inline extent).  In this case the IO has
1166  * been started and the page is already unlocked.
1167  *
1168  * This returns 0 if all went well (page still locked)
1169  * This returns < 0 if there were errors (page still locked)
1170  */
1171 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1172 		struct page *page, struct writeback_control *wbc)
1173 {
1174 	const u64 page_start = page_offset(page);
1175 	const u64 page_end = page_start + PAGE_SIZE - 1;
1176 	u64 delalloc_start = page_start;
1177 	u64 delalloc_end = page_end;
1178 	u64 delalloc_to_write = 0;
1179 	int ret = 0;
1180 
1181 	while (delalloc_start < page_end) {
1182 		delalloc_end = page_end;
1183 		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1184 					      &delalloc_start, &delalloc_end)) {
1185 			delalloc_start = delalloc_end + 1;
1186 			continue;
1187 		}
1188 
1189 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1190 					       delalloc_end, wbc);
1191 		if (ret < 0)
1192 			return ret;
1193 
1194 		delalloc_start = delalloc_end + 1;
1195 	}
1196 
1197 	/*
1198 	 * delalloc_end is already one less than the total length, so
1199 	 * we don't subtract one from PAGE_SIZE
1200 	 */
1201 	delalloc_to_write +=
1202 		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1203 
1204 	/*
1205 	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1206 	 * the pages, we just need to account for them here.
1207 	 */
1208 	if (ret == 1) {
1209 		wbc->nr_to_write -= delalloc_to_write;
1210 		return 1;
1211 	}
1212 
1213 	if (wbc->nr_to_write < delalloc_to_write) {
1214 		int thresh = 8192;
1215 
1216 		if (delalloc_to_write < thresh * 2)
1217 			thresh = delalloc_to_write;
1218 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1219 					 thresh);
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 /*
1226  * Find the first byte we need to write.
1227  *
1228  * For subpage, one page can contain several sectors, and
1229  * __extent_writepage_io() will just grab all extent maps in the page
1230  * range and try to submit all non-inline/non-compressed extents.
1231  *
1232  * This is a big problem for subpage, we shouldn't re-submit already written
1233  * data at all.
1234  * This function will lookup subpage dirty bit to find which range we really
1235  * need to submit.
1236  *
1237  * Return the next dirty range in [@start, @end).
1238  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1239  */
1240 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1241 				 struct page *page, u64 *start, u64 *end)
1242 {
1243 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1244 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1245 	u64 orig_start = *start;
1246 	/* Declare as unsigned long so we can use bitmap ops */
1247 	unsigned long flags;
1248 	int range_start_bit;
1249 	int range_end_bit;
1250 
1251 	/*
1252 	 * For regular sector size == page size case, since one page only
1253 	 * contains one sector, we return the page offset directly.
1254 	 */
1255 	if (!btrfs_is_subpage(fs_info, page)) {
1256 		*start = page_offset(page);
1257 		*end = page_offset(page) + PAGE_SIZE;
1258 		return;
1259 	}
1260 
1261 	range_start_bit = spi->dirty_offset +
1262 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1263 
1264 	/* We should have the page locked, but just in case */
1265 	spin_lock_irqsave(&subpage->lock, flags);
1266 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1267 			       spi->dirty_offset + spi->bitmap_nr_bits);
1268 	spin_unlock_irqrestore(&subpage->lock, flags);
1269 
1270 	range_start_bit -= spi->dirty_offset;
1271 	range_end_bit -= spi->dirty_offset;
1272 
1273 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1274 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1275 }
1276 
1277 /*
1278  * helper for __extent_writepage.  This calls the writepage start hooks,
1279  * and does the loop to map the page into extents and bios.
1280  *
1281  * We return 1 if the IO is started and the page is unlocked,
1282  * 0 if all went well (page still locked)
1283  * < 0 if there were errors (page still locked)
1284  */
1285 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1286 				 struct page *page,
1287 				 struct btrfs_bio_ctrl *bio_ctrl,
1288 				 loff_t i_size,
1289 				 int *nr_ret)
1290 {
1291 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1292 	u64 cur = page_offset(page);
1293 	u64 end = cur + PAGE_SIZE - 1;
1294 	u64 extent_offset;
1295 	u64 block_start;
1296 	struct extent_map *em;
1297 	int ret = 0;
1298 	int nr = 0;
1299 
1300 	ret = btrfs_writepage_cow_fixup(page);
1301 	if (ret) {
1302 		/* Fixup worker will requeue */
1303 		redirty_page_for_writepage(bio_ctrl->wbc, page);
1304 		unlock_page(page);
1305 		return 1;
1306 	}
1307 
1308 	bio_ctrl->end_io_func = end_bio_extent_writepage;
1309 	while (cur <= end) {
1310 		u32 len = end - cur + 1;
1311 		u64 disk_bytenr;
1312 		u64 em_end;
1313 		u64 dirty_range_start = cur;
1314 		u64 dirty_range_end;
1315 		u32 iosize;
1316 
1317 		if (cur >= i_size) {
1318 			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1319 						       true);
1320 			/*
1321 			 * This range is beyond i_size, thus we don't need to
1322 			 * bother writing back.
1323 			 * But we still need to clear the dirty subpage bit, or
1324 			 * the next time the page gets dirtied, we will try to
1325 			 * writeback the sectors with subpage dirty bits,
1326 			 * causing writeback without ordered extent.
1327 			 */
1328 			btrfs_page_clear_dirty(fs_info, page, cur, len);
1329 			break;
1330 		}
1331 
1332 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1333 				     &dirty_range_end);
1334 		if (cur < dirty_range_start) {
1335 			cur = dirty_range_start;
1336 			continue;
1337 		}
1338 
1339 		em = btrfs_get_extent(inode, NULL, 0, cur, len);
1340 		if (IS_ERR(em)) {
1341 			ret = PTR_ERR_OR_ZERO(em);
1342 			goto out_error;
1343 		}
1344 
1345 		extent_offset = cur - em->start;
1346 		em_end = extent_map_end(em);
1347 		ASSERT(cur <= em_end);
1348 		ASSERT(cur < end);
1349 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1350 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1351 
1352 		block_start = em->block_start;
1353 		disk_bytenr = em->block_start + extent_offset;
1354 
1355 		ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
1356 		ASSERT(block_start != EXTENT_MAP_HOLE);
1357 		ASSERT(block_start != EXTENT_MAP_INLINE);
1358 
1359 		/*
1360 		 * Note that em_end from extent_map_end() and dirty_range_end from
1361 		 * find_next_dirty_byte() are all exclusive
1362 		 */
1363 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1364 		free_extent_map(em);
1365 		em = NULL;
1366 
1367 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1368 		if (!PageWriteback(page)) {
1369 			btrfs_err(inode->root->fs_info,
1370 				   "page %lu not writeback, cur %llu end %llu",
1371 			       page->index, cur, end);
1372 		}
1373 
1374 		/*
1375 		 * Although the PageDirty bit is cleared before entering this
1376 		 * function, subpage dirty bit is not cleared.
1377 		 * So clear subpage dirty bit here so next time we won't submit
1378 		 * page for range already written to disk.
1379 		 */
1380 		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
1381 
1382 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1383 				   cur - page_offset(page));
1384 		cur += iosize;
1385 		nr++;
1386 	}
1387 
1388 	btrfs_page_assert_not_dirty(fs_info, page);
1389 	*nr_ret = nr;
1390 	return 0;
1391 
1392 out_error:
1393 	/*
1394 	 * If we finish without problem, we should not only clear page dirty,
1395 	 * but also empty subpage dirty bits
1396 	 */
1397 	*nr_ret = nr;
1398 	return ret;
1399 }
1400 
1401 /*
1402  * the writepage semantics are similar to regular writepage.  extent
1403  * records are inserted to lock ranges in the tree, and as dirty areas
1404  * are found, they are marked writeback.  Then the lock bits are removed
1405  * and the end_io handler clears the writeback ranges
1406  *
1407  * Return 0 if everything goes well.
1408  * Return <0 for error.
1409  */
1410 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1411 {
1412 	struct folio *folio = page_folio(page);
1413 	struct inode *inode = page->mapping->host;
1414 	const u64 page_start = page_offset(page);
1415 	int ret;
1416 	int nr = 0;
1417 	size_t pg_offset;
1418 	loff_t i_size = i_size_read(inode);
1419 	unsigned long end_index = i_size >> PAGE_SHIFT;
1420 
1421 	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1422 
1423 	WARN_ON(!PageLocked(page));
1424 
1425 	pg_offset = offset_in_page(i_size);
1426 	if (page->index > end_index ||
1427 	   (page->index == end_index && !pg_offset)) {
1428 		folio_invalidate(folio, 0, folio_size(folio));
1429 		folio_unlock(folio);
1430 		return 0;
1431 	}
1432 
1433 	if (page->index == end_index)
1434 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1435 
1436 	ret = set_page_extent_mapped(page);
1437 	if (ret < 0)
1438 		goto done;
1439 
1440 	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1441 	if (ret == 1)
1442 		return 0;
1443 	if (ret)
1444 		goto done;
1445 
1446 	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1447 	if (ret == 1)
1448 		return 0;
1449 
1450 	bio_ctrl->wbc->nr_to_write--;
1451 
1452 done:
1453 	if (nr == 0) {
1454 		/* make sure the mapping tag for page dirty gets cleared */
1455 		set_page_writeback(page);
1456 		end_page_writeback(page);
1457 	}
1458 	if (ret) {
1459 		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1460 					       PAGE_SIZE, !ret);
1461 		mapping_set_error(page->mapping, ret);
1462 	}
1463 	unlock_page(page);
1464 	ASSERT(ret <= 0);
1465 	return ret;
1466 }
1467 
1468 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1469 {
1470 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1471 		       TASK_UNINTERRUPTIBLE);
1472 }
1473 
1474 /*
1475  * Lock extent buffer status and pages for writeback.
1476  *
1477  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1478  * extent buffer is not dirty)
1479  * Return %true is the extent buffer is submitted to bio.
1480  */
1481 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1482 			  struct writeback_control *wbc)
1483 {
1484 	struct btrfs_fs_info *fs_info = eb->fs_info;
1485 	bool ret = false;
1486 
1487 	btrfs_tree_lock(eb);
1488 	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1489 		btrfs_tree_unlock(eb);
1490 		if (wbc->sync_mode != WB_SYNC_ALL)
1491 			return false;
1492 		wait_on_extent_buffer_writeback(eb);
1493 		btrfs_tree_lock(eb);
1494 	}
1495 
1496 	/*
1497 	 * We need to do this to prevent races in people who check if the eb is
1498 	 * under IO since we can end up having no IO bits set for a short period
1499 	 * of time.
1500 	 */
1501 	spin_lock(&eb->refs_lock);
1502 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1503 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1504 		spin_unlock(&eb->refs_lock);
1505 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1506 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1507 					 -eb->len,
1508 					 fs_info->dirty_metadata_batch);
1509 		ret = true;
1510 	} else {
1511 		spin_unlock(&eb->refs_lock);
1512 	}
1513 	btrfs_tree_unlock(eb);
1514 	return ret;
1515 }
1516 
1517 static void set_btree_ioerr(struct extent_buffer *eb)
1518 {
1519 	struct btrfs_fs_info *fs_info = eb->fs_info;
1520 
1521 	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1522 
1523 	/*
1524 	 * A read may stumble upon this buffer later, make sure that it gets an
1525 	 * error and knows there was an error.
1526 	 */
1527 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1528 
1529 	/*
1530 	 * We need to set the mapping with the io error as well because a write
1531 	 * error will flip the file system readonly, and then syncfs() will
1532 	 * return a 0 because we are readonly if we don't modify the err seq for
1533 	 * the superblock.
1534 	 */
1535 	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1536 
1537 	/*
1538 	 * If writeback for a btree extent that doesn't belong to a log tree
1539 	 * failed, increment the counter transaction->eb_write_errors.
1540 	 * We do this because while the transaction is running and before it's
1541 	 * committing (when we call filemap_fdata[write|wait]_range against
1542 	 * the btree inode), we might have
1543 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1544 	 * returns an error or an error happens during writeback, when we're
1545 	 * committing the transaction we wouldn't know about it, since the pages
1546 	 * can be no longer dirty nor marked anymore for writeback (if a
1547 	 * subsequent modification to the extent buffer didn't happen before the
1548 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1549 	 * able to find the pages tagged with SetPageError at transaction
1550 	 * commit time. So if this happens we must abort the transaction,
1551 	 * otherwise we commit a super block with btree roots that point to
1552 	 * btree nodes/leafs whose content on disk is invalid - either garbage
1553 	 * or the content of some node/leaf from a past generation that got
1554 	 * cowed or deleted and is no longer valid.
1555 	 *
1556 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1557 	 * not be enough - we need to distinguish between log tree extents vs
1558 	 * non-log tree extents, and the next filemap_fdatawait_range() call
1559 	 * will catch and clear such errors in the mapping - and that call might
1560 	 * be from a log sync and not from a transaction commit. Also, checking
1561 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1562 	 * not done and would not be reliable - the eb might have been released
1563 	 * from memory and reading it back again means that flag would not be
1564 	 * set (since it's a runtime flag, not persisted on disk).
1565 	 *
1566 	 * Using the flags below in the btree inode also makes us achieve the
1567 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1568 	 * writeback for all dirty pages and before filemap_fdatawait_range()
1569 	 * is called, the writeback for all dirty pages had already finished
1570 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1571 	 * filemap_fdatawait_range() would return success, as it could not know
1572 	 * that writeback errors happened (the pages were no longer tagged for
1573 	 * writeback).
1574 	 */
1575 	switch (eb->log_index) {
1576 	case -1:
1577 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1578 		break;
1579 	case 0:
1580 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1581 		break;
1582 	case 1:
1583 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1584 		break;
1585 	default:
1586 		BUG(); /* unexpected, logic error */
1587 	}
1588 }
1589 
1590 /*
1591  * The endio specific version which won't touch any unsafe spinlock in endio
1592  * context.
1593  */
1594 static struct extent_buffer *find_extent_buffer_nolock(
1595 		struct btrfs_fs_info *fs_info, u64 start)
1596 {
1597 	struct extent_buffer *eb;
1598 
1599 	rcu_read_lock();
1600 	eb = radix_tree_lookup(&fs_info->buffer_radix,
1601 			       start >> fs_info->sectorsize_bits);
1602 	if (eb && atomic_inc_not_zero(&eb->refs)) {
1603 		rcu_read_unlock();
1604 		return eb;
1605 	}
1606 	rcu_read_unlock();
1607 	return NULL;
1608 }
1609 
1610 static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
1611 {
1612 	struct extent_buffer *eb = bbio->private;
1613 	struct btrfs_fs_info *fs_info = eb->fs_info;
1614 	bool uptodate = !bbio->bio.bi_status;
1615 	struct bvec_iter_all iter_all;
1616 	struct bio_vec *bvec;
1617 	u32 bio_offset = 0;
1618 
1619 	if (!uptodate)
1620 		set_btree_ioerr(eb);
1621 
1622 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
1623 		u64 start = eb->start + bio_offset;
1624 		struct page *page = bvec->bv_page;
1625 		u32 len = bvec->bv_len;
1626 
1627 		btrfs_page_clear_writeback(fs_info, page, start, len);
1628 		bio_offset += len;
1629 	}
1630 
1631 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1632 	smp_mb__after_atomic();
1633 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1634 
1635 	bio_put(&bbio->bio);
1636 }
1637 
1638 static void prepare_eb_write(struct extent_buffer *eb)
1639 {
1640 	u32 nritems;
1641 	unsigned long start;
1642 	unsigned long end;
1643 
1644 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1645 
1646 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1647 	nritems = btrfs_header_nritems(eb);
1648 	if (btrfs_header_level(eb) > 0) {
1649 		end = btrfs_node_key_ptr_offset(eb, nritems);
1650 		memzero_extent_buffer(eb, end, eb->len - end);
1651 	} else {
1652 		/*
1653 		 * Leaf:
1654 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1655 		 */
1656 		start = btrfs_item_nr_offset(eb, nritems);
1657 		end = btrfs_item_nr_offset(eb, 0);
1658 		if (nritems == 0)
1659 			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1660 		else
1661 			end += btrfs_item_offset(eb, nritems - 1);
1662 		memzero_extent_buffer(eb, start, end - start);
1663 	}
1664 }
1665 
1666 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1667 					    struct writeback_control *wbc)
1668 {
1669 	struct btrfs_fs_info *fs_info = eb->fs_info;
1670 	struct btrfs_bio *bbio;
1671 
1672 	prepare_eb_write(eb);
1673 
1674 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1675 			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1676 			       eb->fs_info, extent_buffer_write_end_io, eb);
1677 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1678 	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1679 	wbc_init_bio(wbc, &bbio->bio);
1680 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1681 	bbio->file_offset = eb->start;
1682 	if (fs_info->nodesize < PAGE_SIZE) {
1683 		struct page *p = eb->pages[0];
1684 
1685 		lock_page(p);
1686 		btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len);
1687 		if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start,
1688 						       eb->len)) {
1689 			clear_page_dirty_for_io(p);
1690 			wbc->nr_to_write--;
1691 		}
1692 		__bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p));
1693 		wbc_account_cgroup_owner(wbc, p, eb->len);
1694 		unlock_page(p);
1695 	} else {
1696 		for (int i = 0; i < num_extent_pages(eb); i++) {
1697 			struct page *p = eb->pages[i];
1698 
1699 			lock_page(p);
1700 			clear_page_dirty_for_io(p);
1701 			set_page_writeback(p);
1702 			__bio_add_page(&bbio->bio, p, PAGE_SIZE, 0);
1703 			wbc_account_cgroup_owner(wbc, p, PAGE_SIZE);
1704 			wbc->nr_to_write--;
1705 			unlock_page(p);
1706 		}
1707 	}
1708 	btrfs_submit_bio(bbio, 0);
1709 }
1710 
1711 /*
1712  * Submit one subpage btree page.
1713  *
1714  * The main difference to submit_eb_page() is:
1715  * - Page locking
1716  *   For subpage, we don't rely on page locking at all.
1717  *
1718  * - Flush write bio
1719  *   We only flush bio if we may be unable to fit current extent buffers into
1720  *   current bio.
1721  *
1722  * Return >=0 for the number of submitted extent buffers.
1723  * Return <0 for fatal error.
1724  */
1725 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1726 {
1727 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1728 	int submitted = 0;
1729 	u64 page_start = page_offset(page);
1730 	int bit_start = 0;
1731 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1732 
1733 	/* Lock and write each dirty extent buffers in the range */
1734 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1735 		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1736 		struct extent_buffer *eb;
1737 		unsigned long flags;
1738 		u64 start;
1739 
1740 		/*
1741 		 * Take private lock to ensure the subpage won't be detached
1742 		 * in the meantime.
1743 		 */
1744 		spin_lock(&page->mapping->private_lock);
1745 		if (!PagePrivate(page)) {
1746 			spin_unlock(&page->mapping->private_lock);
1747 			break;
1748 		}
1749 		spin_lock_irqsave(&subpage->lock, flags);
1750 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1751 			      subpage->bitmaps)) {
1752 			spin_unlock_irqrestore(&subpage->lock, flags);
1753 			spin_unlock(&page->mapping->private_lock);
1754 			bit_start++;
1755 			continue;
1756 		}
1757 
1758 		start = page_start + bit_start * fs_info->sectorsize;
1759 		bit_start += sectors_per_node;
1760 
1761 		/*
1762 		 * Here we just want to grab the eb without touching extra
1763 		 * spin locks, so call find_extent_buffer_nolock().
1764 		 */
1765 		eb = find_extent_buffer_nolock(fs_info, start);
1766 		spin_unlock_irqrestore(&subpage->lock, flags);
1767 		spin_unlock(&page->mapping->private_lock);
1768 
1769 		/*
1770 		 * The eb has already reached 0 refs thus find_extent_buffer()
1771 		 * doesn't return it. We don't need to write back such eb
1772 		 * anyway.
1773 		 */
1774 		if (!eb)
1775 			continue;
1776 
1777 		if (lock_extent_buffer_for_io(eb, wbc)) {
1778 			write_one_eb(eb, wbc);
1779 			submitted++;
1780 		}
1781 		free_extent_buffer(eb);
1782 	}
1783 	return submitted;
1784 }
1785 
1786 /*
1787  * Submit all page(s) of one extent buffer.
1788  *
1789  * @page:	the page of one extent buffer
1790  * @eb_context:	to determine if we need to submit this page, if current page
1791  *		belongs to this eb, we don't need to submit
1792  *
1793  * The caller should pass each page in their bytenr order, and here we use
1794  * @eb_context to determine if we have submitted pages of one extent buffer.
1795  *
1796  * If we have, we just skip until we hit a new page that doesn't belong to
1797  * current @eb_context.
1798  *
1799  * If not, we submit all the page(s) of the extent buffer.
1800  *
1801  * Return >0 if we have submitted the extent buffer successfully.
1802  * Return 0 if we don't need to submit the page, as it's already submitted by
1803  * previous call.
1804  * Return <0 for fatal error.
1805  */
1806 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1807 {
1808 	struct writeback_control *wbc = ctx->wbc;
1809 	struct address_space *mapping = page->mapping;
1810 	struct extent_buffer *eb;
1811 	int ret;
1812 
1813 	if (!PagePrivate(page))
1814 		return 0;
1815 
1816 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1817 		return submit_eb_subpage(page, wbc);
1818 
1819 	spin_lock(&mapping->private_lock);
1820 	if (!PagePrivate(page)) {
1821 		spin_unlock(&mapping->private_lock);
1822 		return 0;
1823 	}
1824 
1825 	eb = (struct extent_buffer *)page->private;
1826 
1827 	/*
1828 	 * Shouldn't happen and normally this would be a BUG_ON but no point
1829 	 * crashing the machine for something we can survive anyway.
1830 	 */
1831 	if (WARN_ON(!eb)) {
1832 		spin_unlock(&mapping->private_lock);
1833 		return 0;
1834 	}
1835 
1836 	if (eb == ctx->eb) {
1837 		spin_unlock(&mapping->private_lock);
1838 		return 0;
1839 	}
1840 	ret = atomic_inc_not_zero(&eb->refs);
1841 	spin_unlock(&mapping->private_lock);
1842 	if (!ret)
1843 		return 0;
1844 
1845 	ctx->eb = eb;
1846 
1847 	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1848 	if (ret) {
1849 		if (ret == -EBUSY)
1850 			ret = 0;
1851 		free_extent_buffer(eb);
1852 		return ret;
1853 	}
1854 
1855 	if (!lock_extent_buffer_for_io(eb, wbc)) {
1856 		free_extent_buffer(eb);
1857 		return 0;
1858 	}
1859 	/* Implies write in zoned mode. */
1860 	if (ctx->zoned_bg) {
1861 		/* Mark the last eb in the block group. */
1862 		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1863 		ctx->zoned_bg->meta_write_pointer += eb->len;
1864 	}
1865 	write_one_eb(eb, wbc);
1866 	free_extent_buffer(eb);
1867 	return 1;
1868 }
1869 
1870 int btree_write_cache_pages(struct address_space *mapping,
1871 				   struct writeback_control *wbc)
1872 {
1873 	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1874 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1875 	int ret = 0;
1876 	int done = 0;
1877 	int nr_to_write_done = 0;
1878 	struct folio_batch fbatch;
1879 	unsigned int nr_folios;
1880 	pgoff_t index;
1881 	pgoff_t end;		/* Inclusive */
1882 	int scanned = 0;
1883 	xa_mark_t tag;
1884 
1885 	folio_batch_init(&fbatch);
1886 	if (wbc->range_cyclic) {
1887 		index = mapping->writeback_index; /* Start from prev offset */
1888 		end = -1;
1889 		/*
1890 		 * Start from the beginning does not need to cycle over the
1891 		 * range, mark it as scanned.
1892 		 */
1893 		scanned = (index == 0);
1894 	} else {
1895 		index = wbc->range_start >> PAGE_SHIFT;
1896 		end = wbc->range_end >> PAGE_SHIFT;
1897 		scanned = 1;
1898 	}
1899 	if (wbc->sync_mode == WB_SYNC_ALL)
1900 		tag = PAGECACHE_TAG_TOWRITE;
1901 	else
1902 		tag = PAGECACHE_TAG_DIRTY;
1903 	btrfs_zoned_meta_io_lock(fs_info);
1904 retry:
1905 	if (wbc->sync_mode == WB_SYNC_ALL)
1906 		tag_pages_for_writeback(mapping, index, end);
1907 	while (!done && !nr_to_write_done && (index <= end) &&
1908 	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1909 					    tag, &fbatch))) {
1910 		unsigned i;
1911 
1912 		for (i = 0; i < nr_folios; i++) {
1913 			struct folio *folio = fbatch.folios[i];
1914 
1915 			ret = submit_eb_page(&folio->page, &ctx);
1916 			if (ret == 0)
1917 				continue;
1918 			if (ret < 0) {
1919 				done = 1;
1920 				break;
1921 			}
1922 
1923 			/*
1924 			 * the filesystem may choose to bump up nr_to_write.
1925 			 * We have to make sure to honor the new nr_to_write
1926 			 * at any time
1927 			 */
1928 			nr_to_write_done = wbc->nr_to_write <= 0;
1929 		}
1930 		folio_batch_release(&fbatch);
1931 		cond_resched();
1932 	}
1933 	if (!scanned && !done) {
1934 		/*
1935 		 * We hit the last page and there is more work to be done: wrap
1936 		 * back to the start of the file
1937 		 */
1938 		scanned = 1;
1939 		index = 0;
1940 		goto retry;
1941 	}
1942 	/*
1943 	 * If something went wrong, don't allow any metadata write bio to be
1944 	 * submitted.
1945 	 *
1946 	 * This would prevent use-after-free if we had dirty pages not
1947 	 * cleaned up, which can still happen by fuzzed images.
1948 	 *
1949 	 * - Bad extent tree
1950 	 *   Allowing existing tree block to be allocated for other trees.
1951 	 *
1952 	 * - Log tree operations
1953 	 *   Exiting tree blocks get allocated to log tree, bumps its
1954 	 *   generation, then get cleaned in tree re-balance.
1955 	 *   Such tree block will not be written back, since it's clean,
1956 	 *   thus no WRITTEN flag set.
1957 	 *   And after log writes back, this tree block is not traced by
1958 	 *   any dirty extent_io_tree.
1959 	 *
1960 	 * - Offending tree block gets re-dirtied from its original owner
1961 	 *   Since it has bumped generation, no WRITTEN flag, it can be
1962 	 *   reused without COWing. This tree block will not be traced
1963 	 *   by btrfs_transaction::dirty_pages.
1964 	 *
1965 	 *   Now such dirty tree block will not be cleaned by any dirty
1966 	 *   extent io tree. Thus we don't want to submit such wild eb
1967 	 *   if the fs already has error.
1968 	 *
1969 	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
1970 	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
1971 	 */
1972 	if (ret > 0)
1973 		ret = 0;
1974 	if (!ret && BTRFS_FS_ERROR(fs_info))
1975 		ret = -EROFS;
1976 
1977 	if (ctx.zoned_bg)
1978 		btrfs_put_block_group(ctx.zoned_bg);
1979 	btrfs_zoned_meta_io_unlock(fs_info);
1980 	return ret;
1981 }
1982 
1983 /*
1984  * Walk the list of dirty pages of the given address space and write all of them.
1985  *
1986  * @mapping:   address space structure to write
1987  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
1988  * @bio_ctrl:  holds context for the write, namely the bio
1989  *
1990  * If a page is already under I/O, write_cache_pages() skips it, even
1991  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1992  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1993  * and msync() need to guarantee that all the data which was dirty at the time
1994  * the call was made get new I/O started against them.  If wbc->sync_mode is
1995  * WB_SYNC_ALL then we were called for data integrity and we must wait for
1996  * existing IO to complete.
1997  */
1998 static int extent_write_cache_pages(struct address_space *mapping,
1999 			     struct btrfs_bio_ctrl *bio_ctrl)
2000 {
2001 	struct writeback_control *wbc = bio_ctrl->wbc;
2002 	struct inode *inode = mapping->host;
2003 	int ret = 0;
2004 	int done = 0;
2005 	int nr_to_write_done = 0;
2006 	struct folio_batch fbatch;
2007 	unsigned int nr_folios;
2008 	pgoff_t index;
2009 	pgoff_t end;		/* Inclusive */
2010 	pgoff_t done_index;
2011 	int range_whole = 0;
2012 	int scanned = 0;
2013 	xa_mark_t tag;
2014 
2015 	/*
2016 	 * We have to hold onto the inode so that ordered extents can do their
2017 	 * work when the IO finishes.  The alternative to this is failing to add
2018 	 * an ordered extent if the igrab() fails there and that is a huge pain
2019 	 * to deal with, so instead just hold onto the inode throughout the
2020 	 * writepages operation.  If it fails here we are freeing up the inode
2021 	 * anyway and we'd rather not waste our time writing out stuff that is
2022 	 * going to be truncated anyway.
2023 	 */
2024 	if (!igrab(inode))
2025 		return 0;
2026 
2027 	folio_batch_init(&fbatch);
2028 	if (wbc->range_cyclic) {
2029 		index = mapping->writeback_index; /* Start from prev offset */
2030 		end = -1;
2031 		/*
2032 		 * Start from the beginning does not need to cycle over the
2033 		 * range, mark it as scanned.
2034 		 */
2035 		scanned = (index == 0);
2036 	} else {
2037 		index = wbc->range_start >> PAGE_SHIFT;
2038 		end = wbc->range_end >> PAGE_SHIFT;
2039 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2040 			range_whole = 1;
2041 		scanned = 1;
2042 	}
2043 
2044 	/*
2045 	 * We do the tagged writepage as long as the snapshot flush bit is set
2046 	 * and we are the first one who do the filemap_flush() on this inode.
2047 	 *
2048 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2049 	 * not race in and drop the bit.
2050 	 */
2051 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2052 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2053 			       &BTRFS_I(inode)->runtime_flags))
2054 		wbc->tagged_writepages = 1;
2055 
2056 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2057 		tag = PAGECACHE_TAG_TOWRITE;
2058 	else
2059 		tag = PAGECACHE_TAG_DIRTY;
2060 retry:
2061 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2062 		tag_pages_for_writeback(mapping, index, end);
2063 	done_index = index;
2064 	while (!done && !nr_to_write_done && (index <= end) &&
2065 			(nr_folios = filemap_get_folios_tag(mapping, &index,
2066 							end, tag, &fbatch))) {
2067 		unsigned i;
2068 
2069 		for (i = 0; i < nr_folios; i++) {
2070 			struct folio *folio = fbatch.folios[i];
2071 
2072 			done_index = folio_next_index(folio);
2073 			/*
2074 			 * At this point we hold neither the i_pages lock nor
2075 			 * the page lock: the page may be truncated or
2076 			 * invalidated (changing page->mapping to NULL),
2077 			 * or even swizzled back from swapper_space to
2078 			 * tmpfs file mapping
2079 			 */
2080 			if (!folio_trylock(folio)) {
2081 				submit_write_bio(bio_ctrl, 0);
2082 				folio_lock(folio);
2083 			}
2084 
2085 			if (unlikely(folio->mapping != mapping)) {
2086 				folio_unlock(folio);
2087 				continue;
2088 			}
2089 
2090 			if (!folio_test_dirty(folio)) {
2091 				/* Someone wrote it for us. */
2092 				folio_unlock(folio);
2093 				continue;
2094 			}
2095 
2096 			if (wbc->sync_mode != WB_SYNC_NONE) {
2097 				if (folio_test_writeback(folio))
2098 					submit_write_bio(bio_ctrl, 0);
2099 				folio_wait_writeback(folio);
2100 			}
2101 
2102 			if (folio_test_writeback(folio) ||
2103 			    !folio_clear_dirty_for_io(folio)) {
2104 				folio_unlock(folio);
2105 				continue;
2106 			}
2107 
2108 			ret = __extent_writepage(&folio->page, bio_ctrl);
2109 			if (ret < 0) {
2110 				done = 1;
2111 				break;
2112 			}
2113 
2114 			/*
2115 			 * The filesystem may choose to bump up nr_to_write.
2116 			 * We have to make sure to honor the new nr_to_write
2117 			 * at any time.
2118 			 */
2119 			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2120 					    wbc->nr_to_write <= 0);
2121 		}
2122 		folio_batch_release(&fbatch);
2123 		cond_resched();
2124 	}
2125 	if (!scanned && !done) {
2126 		/*
2127 		 * We hit the last page and there is more work to be done: wrap
2128 		 * back to the start of the file
2129 		 */
2130 		scanned = 1;
2131 		index = 0;
2132 
2133 		/*
2134 		 * If we're looping we could run into a page that is locked by a
2135 		 * writer and that writer could be waiting on writeback for a
2136 		 * page in our current bio, and thus deadlock, so flush the
2137 		 * write bio here.
2138 		 */
2139 		submit_write_bio(bio_ctrl, 0);
2140 		goto retry;
2141 	}
2142 
2143 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2144 		mapping->writeback_index = done_index;
2145 
2146 	btrfs_add_delayed_iput(BTRFS_I(inode));
2147 	return ret;
2148 }
2149 
2150 /*
2151  * Submit the pages in the range to bio for call sites which delalloc range has
2152  * already been ran (aka, ordered extent inserted) and all pages are still
2153  * locked.
2154  */
2155 void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2156 			       u64 start, u64 end, struct writeback_control *wbc,
2157 			       bool pages_dirty)
2158 {
2159 	bool found_error = false;
2160 	int ret = 0;
2161 	struct address_space *mapping = inode->i_mapping;
2162 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2163 	const u32 sectorsize = fs_info->sectorsize;
2164 	loff_t i_size = i_size_read(inode);
2165 	u64 cur = start;
2166 	struct btrfs_bio_ctrl bio_ctrl = {
2167 		.wbc = wbc,
2168 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2169 	};
2170 
2171 	if (wbc->no_cgroup_owner)
2172 		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2173 
2174 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2175 
2176 	while (cur <= end) {
2177 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2178 		u32 cur_len = cur_end + 1 - cur;
2179 		struct page *page;
2180 		int nr = 0;
2181 
2182 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2183 		ASSERT(PageLocked(page));
2184 		if (pages_dirty && page != locked_page) {
2185 			ASSERT(PageDirty(page));
2186 			clear_page_dirty_for_io(page);
2187 		}
2188 
2189 		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2190 					    i_size, &nr);
2191 		if (ret == 1)
2192 			goto next_page;
2193 
2194 		/* Make sure the mapping tag for page dirty gets cleared. */
2195 		if (nr == 0) {
2196 			set_page_writeback(page);
2197 			end_page_writeback(page);
2198 		}
2199 		if (ret) {
2200 			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2201 						       cur, cur_len, !ret);
2202 			mapping_set_error(page->mapping, ret);
2203 		}
2204 		btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
2205 		if (ret < 0)
2206 			found_error = true;
2207 next_page:
2208 		put_page(page);
2209 		cur = cur_end + 1;
2210 	}
2211 
2212 	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2213 }
2214 
2215 int extent_writepages(struct address_space *mapping,
2216 		      struct writeback_control *wbc)
2217 {
2218 	struct inode *inode = mapping->host;
2219 	int ret = 0;
2220 	struct btrfs_bio_ctrl bio_ctrl = {
2221 		.wbc = wbc,
2222 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2223 	};
2224 
2225 	/*
2226 	 * Allow only a single thread to do the reloc work in zoned mode to
2227 	 * protect the write pointer updates.
2228 	 */
2229 	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2230 	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2231 	submit_write_bio(&bio_ctrl, ret);
2232 	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2233 	return ret;
2234 }
2235 
2236 void extent_readahead(struct readahead_control *rac)
2237 {
2238 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2239 	struct page *pagepool[16];
2240 	struct extent_map *em_cached = NULL;
2241 	u64 prev_em_start = (u64)-1;
2242 	int nr;
2243 
2244 	while ((nr = readahead_page_batch(rac, pagepool))) {
2245 		u64 contig_start = readahead_pos(rac);
2246 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2247 
2248 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2249 				&em_cached, &bio_ctrl, &prev_em_start);
2250 	}
2251 
2252 	if (em_cached)
2253 		free_extent_map(em_cached);
2254 	submit_one_bio(&bio_ctrl);
2255 }
2256 
2257 /*
2258  * basic invalidate_folio code, this waits on any locked or writeback
2259  * ranges corresponding to the folio, and then deletes any extent state
2260  * records from the tree
2261  */
2262 int extent_invalidate_folio(struct extent_io_tree *tree,
2263 			  struct folio *folio, size_t offset)
2264 {
2265 	struct extent_state *cached_state = NULL;
2266 	u64 start = folio_pos(folio);
2267 	u64 end = start + folio_size(folio) - 1;
2268 	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
2269 
2270 	/* This function is only called for the btree inode */
2271 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2272 
2273 	start += ALIGN(offset, blocksize);
2274 	if (start > end)
2275 		return 0;
2276 
2277 	lock_extent(tree, start, end, &cached_state);
2278 	folio_wait_writeback(folio);
2279 
2280 	/*
2281 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2282 	 * so here we only need to unlock the extent range to free any
2283 	 * existing extent state.
2284 	 */
2285 	unlock_extent(tree, start, end, &cached_state);
2286 	return 0;
2287 }
2288 
2289 /*
2290  * a helper for release_folio, this tests for areas of the page that
2291  * are locked or under IO and drops the related state bits if it is safe
2292  * to drop the page.
2293  */
2294 static int try_release_extent_state(struct extent_io_tree *tree,
2295 				    struct page *page, gfp_t mask)
2296 {
2297 	u64 start = page_offset(page);
2298 	u64 end = start + PAGE_SIZE - 1;
2299 	int ret = 1;
2300 
2301 	if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2302 		ret = 0;
2303 	} else {
2304 		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2305 				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
2306 
2307 		/*
2308 		 * At this point we can safely clear everything except the
2309 		 * locked bit, the nodatasum bit and the delalloc new bit.
2310 		 * The delalloc new bit will be cleared by ordered extent
2311 		 * completion.
2312 		 */
2313 		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2314 
2315 		/* if clear_extent_bit failed for enomem reasons,
2316 		 * we can't allow the release to continue.
2317 		 */
2318 		if (ret < 0)
2319 			ret = 0;
2320 		else
2321 			ret = 1;
2322 	}
2323 	return ret;
2324 }
2325 
2326 /*
2327  * a helper for release_folio.  As long as there are no locked extents
2328  * in the range corresponding to the page, both state records and extent
2329  * map records are removed
2330  */
2331 int try_release_extent_mapping(struct page *page, gfp_t mask)
2332 {
2333 	struct extent_map *em;
2334 	u64 start = page_offset(page);
2335 	u64 end = start + PAGE_SIZE - 1;
2336 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2337 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2338 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2339 
2340 	if (gfpflags_allow_blocking(mask) &&
2341 	    page->mapping->host->i_size > SZ_16M) {
2342 		u64 len;
2343 		while (start <= end) {
2344 			struct btrfs_fs_info *fs_info;
2345 			u64 cur_gen;
2346 
2347 			len = end - start + 1;
2348 			write_lock(&map->lock);
2349 			em = lookup_extent_mapping(map, start, len);
2350 			if (!em) {
2351 				write_unlock(&map->lock);
2352 				break;
2353 			}
2354 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2355 			    em->start != start) {
2356 				write_unlock(&map->lock);
2357 				free_extent_map(em);
2358 				break;
2359 			}
2360 			if (test_range_bit_exists(tree, em->start,
2361 						  extent_map_end(em) - 1,
2362 						  EXTENT_LOCKED))
2363 				goto next;
2364 			/*
2365 			 * If it's not in the list of modified extents, used
2366 			 * by a fast fsync, we can remove it. If it's being
2367 			 * logged we can safely remove it since fsync took an
2368 			 * extra reference on the em.
2369 			 */
2370 			if (list_empty(&em->list) ||
2371 			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
2372 				goto remove_em;
2373 			/*
2374 			 * If it's in the list of modified extents, remove it
2375 			 * only if its generation is older then the current one,
2376 			 * in which case we don't need it for a fast fsync.
2377 			 * Otherwise don't remove it, we could be racing with an
2378 			 * ongoing fast fsync that could miss the new extent.
2379 			 */
2380 			fs_info = btrfs_inode->root->fs_info;
2381 			spin_lock(&fs_info->trans_lock);
2382 			cur_gen = fs_info->generation;
2383 			spin_unlock(&fs_info->trans_lock);
2384 			if (em->generation >= cur_gen)
2385 				goto next;
2386 remove_em:
2387 			/*
2388 			 * We only remove extent maps that are not in the list of
2389 			 * modified extents or that are in the list but with a
2390 			 * generation lower then the current generation, so there
2391 			 * is no need to set the full fsync flag on the inode (it
2392 			 * hurts the fsync performance for workloads with a data
2393 			 * size that exceeds or is close to the system's memory).
2394 			 */
2395 			remove_extent_mapping(map, em);
2396 			/* once for the rb tree */
2397 			free_extent_map(em);
2398 next:
2399 			start = extent_map_end(em);
2400 			write_unlock(&map->lock);
2401 
2402 			/* once for us */
2403 			free_extent_map(em);
2404 
2405 			cond_resched(); /* Allow large-extent preemption. */
2406 		}
2407 	}
2408 	return try_release_extent_state(tree, page, mask);
2409 }
2410 
2411 /*
2412  * To cache previous fiemap extent
2413  *
2414  * Will be used for merging fiemap extent
2415  */
2416 struct fiemap_cache {
2417 	u64 offset;
2418 	u64 phys;
2419 	u64 len;
2420 	u32 flags;
2421 	bool cached;
2422 };
2423 
2424 /*
2425  * Helper to submit fiemap extent.
2426  *
2427  * Will try to merge current fiemap extent specified by @offset, @phys,
2428  * @len and @flags with cached one.
2429  * And only when we fails to merge, cached one will be submitted as
2430  * fiemap extent.
2431  *
2432  * Return value is the same as fiemap_fill_next_extent().
2433  */
2434 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2435 				struct fiemap_cache *cache,
2436 				u64 offset, u64 phys, u64 len, u32 flags)
2437 {
2438 	int ret = 0;
2439 
2440 	/* Set at the end of extent_fiemap(). */
2441 	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2442 
2443 	if (!cache->cached)
2444 		goto assign;
2445 
2446 	/*
2447 	 * Sanity check, extent_fiemap() should have ensured that new
2448 	 * fiemap extent won't overlap with cached one.
2449 	 * Not recoverable.
2450 	 *
2451 	 * NOTE: Physical address can overlap, due to compression
2452 	 */
2453 	if (cache->offset + cache->len > offset) {
2454 		WARN_ON(1);
2455 		return -EINVAL;
2456 	}
2457 
2458 	/*
2459 	 * Only merges fiemap extents if
2460 	 * 1) Their logical addresses are continuous
2461 	 *
2462 	 * 2) Their physical addresses are continuous
2463 	 *    So truly compressed (physical size smaller than logical size)
2464 	 *    extents won't get merged with each other
2465 	 *
2466 	 * 3) Share same flags
2467 	 */
2468 	if (cache->offset + cache->len  == offset &&
2469 	    cache->phys + cache->len == phys  &&
2470 	    cache->flags == flags) {
2471 		cache->len += len;
2472 		return 0;
2473 	}
2474 
2475 	/* Not mergeable, need to submit cached one */
2476 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2477 				      cache->len, cache->flags);
2478 	cache->cached = false;
2479 	if (ret)
2480 		return ret;
2481 assign:
2482 	cache->cached = true;
2483 	cache->offset = offset;
2484 	cache->phys = phys;
2485 	cache->len = len;
2486 	cache->flags = flags;
2487 
2488 	return 0;
2489 }
2490 
2491 /*
2492  * Emit last fiemap cache
2493  *
2494  * The last fiemap cache may still be cached in the following case:
2495  * 0		      4k		    8k
2496  * |<- Fiemap range ->|
2497  * |<------------  First extent ----------->|
2498  *
2499  * In this case, the first extent range will be cached but not emitted.
2500  * So we must emit it before ending extent_fiemap().
2501  */
2502 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2503 				  struct fiemap_cache *cache)
2504 {
2505 	int ret;
2506 
2507 	if (!cache->cached)
2508 		return 0;
2509 
2510 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2511 				      cache->len, cache->flags);
2512 	cache->cached = false;
2513 	if (ret > 0)
2514 		ret = 0;
2515 	return ret;
2516 }
2517 
2518 static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2519 {
2520 	struct extent_buffer *clone;
2521 	struct btrfs_key key;
2522 	int slot;
2523 	int ret;
2524 
2525 	path->slots[0]++;
2526 	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2527 		return 0;
2528 
2529 	ret = btrfs_next_leaf(inode->root, path);
2530 	if (ret != 0)
2531 		return ret;
2532 
2533 	/*
2534 	 * Don't bother with cloning if there are no more file extent items for
2535 	 * our inode.
2536 	 */
2537 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2538 	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2539 		return 1;
2540 
2541 	/* See the comment at fiemap_search_slot() about why we clone. */
2542 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2543 	if (!clone)
2544 		return -ENOMEM;
2545 
2546 	slot = path->slots[0];
2547 	btrfs_release_path(path);
2548 	path->nodes[0] = clone;
2549 	path->slots[0] = slot;
2550 
2551 	return 0;
2552 }
2553 
2554 /*
2555  * Search for the first file extent item that starts at a given file offset or
2556  * the one that starts immediately before that offset.
2557  * Returns: 0 on success, < 0 on error, 1 if not found.
2558  */
2559 static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2560 			      u64 file_offset)
2561 {
2562 	const u64 ino = btrfs_ino(inode);
2563 	struct btrfs_root *root = inode->root;
2564 	struct extent_buffer *clone;
2565 	struct btrfs_key key;
2566 	int slot;
2567 	int ret;
2568 
2569 	key.objectid = ino;
2570 	key.type = BTRFS_EXTENT_DATA_KEY;
2571 	key.offset = file_offset;
2572 
2573 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2574 	if (ret < 0)
2575 		return ret;
2576 
2577 	if (ret > 0 && path->slots[0] > 0) {
2578 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2579 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2580 			path->slots[0]--;
2581 	}
2582 
2583 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2584 		ret = btrfs_next_leaf(root, path);
2585 		if (ret != 0)
2586 			return ret;
2587 
2588 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2589 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2590 			return 1;
2591 	}
2592 
2593 	/*
2594 	 * We clone the leaf and use it during fiemap. This is because while
2595 	 * using the leaf we do expensive things like checking if an extent is
2596 	 * shared, which can take a long time. In order to prevent blocking
2597 	 * other tasks for too long, we use a clone of the leaf. We have locked
2598 	 * the file range in the inode's io tree, so we know none of our file
2599 	 * extent items can change. This way we avoid blocking other tasks that
2600 	 * want to insert items for other inodes in the same leaf or b+tree
2601 	 * rebalance operations (triggered for example when someone is trying
2602 	 * to push items into this leaf when trying to insert an item in a
2603 	 * neighbour leaf).
2604 	 * We also need the private clone because holding a read lock on an
2605 	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2606 	 * when we call fiemap_fill_next_extent(), because that may cause a page
2607 	 * fault when filling the user space buffer with fiemap data.
2608 	 */
2609 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2610 	if (!clone)
2611 		return -ENOMEM;
2612 
2613 	slot = path->slots[0];
2614 	btrfs_release_path(path);
2615 	path->nodes[0] = clone;
2616 	path->slots[0] = slot;
2617 
2618 	return 0;
2619 }
2620 
2621 /*
2622  * Process a range which is a hole or a prealloc extent in the inode's subvolume
2623  * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2624  * extent. The end offset (@end) is inclusive.
2625  */
2626 static int fiemap_process_hole(struct btrfs_inode *inode,
2627 			       struct fiemap_extent_info *fieinfo,
2628 			       struct fiemap_cache *cache,
2629 			       struct extent_state **delalloc_cached_state,
2630 			       struct btrfs_backref_share_check_ctx *backref_ctx,
2631 			       u64 disk_bytenr, u64 extent_offset,
2632 			       u64 extent_gen,
2633 			       u64 start, u64 end)
2634 {
2635 	const u64 i_size = i_size_read(&inode->vfs_inode);
2636 	u64 cur_offset = start;
2637 	u64 last_delalloc_end = 0;
2638 	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2639 	bool checked_extent_shared = false;
2640 	int ret;
2641 
2642 	/*
2643 	 * There can be no delalloc past i_size, so don't waste time looking for
2644 	 * it beyond i_size.
2645 	 */
2646 	while (cur_offset < end && cur_offset < i_size) {
2647 		u64 delalloc_start;
2648 		u64 delalloc_end;
2649 		u64 prealloc_start;
2650 		u64 prealloc_len = 0;
2651 		bool delalloc;
2652 
2653 		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2654 							delalloc_cached_state,
2655 							&delalloc_start,
2656 							&delalloc_end);
2657 		if (!delalloc)
2658 			break;
2659 
2660 		/*
2661 		 * If this is a prealloc extent we have to report every section
2662 		 * of it that has no delalloc.
2663 		 */
2664 		if (disk_bytenr != 0) {
2665 			if (last_delalloc_end == 0) {
2666 				prealloc_start = start;
2667 				prealloc_len = delalloc_start - start;
2668 			} else {
2669 				prealloc_start = last_delalloc_end + 1;
2670 				prealloc_len = delalloc_start - prealloc_start;
2671 			}
2672 		}
2673 
2674 		if (prealloc_len > 0) {
2675 			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2676 				ret = btrfs_is_data_extent_shared(inode,
2677 								  disk_bytenr,
2678 								  extent_gen,
2679 								  backref_ctx);
2680 				if (ret < 0)
2681 					return ret;
2682 				else if (ret > 0)
2683 					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2684 
2685 				checked_extent_shared = true;
2686 			}
2687 			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2688 						 disk_bytenr + extent_offset,
2689 						 prealloc_len, prealloc_flags);
2690 			if (ret)
2691 				return ret;
2692 			extent_offset += prealloc_len;
2693 		}
2694 
2695 		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2696 					 delalloc_end + 1 - delalloc_start,
2697 					 FIEMAP_EXTENT_DELALLOC |
2698 					 FIEMAP_EXTENT_UNKNOWN);
2699 		if (ret)
2700 			return ret;
2701 
2702 		last_delalloc_end = delalloc_end;
2703 		cur_offset = delalloc_end + 1;
2704 		extent_offset += cur_offset - delalloc_start;
2705 		cond_resched();
2706 	}
2707 
2708 	/*
2709 	 * Either we found no delalloc for the whole prealloc extent or we have
2710 	 * a prealloc extent that spans i_size or starts at or after i_size.
2711 	 */
2712 	if (disk_bytenr != 0 && last_delalloc_end < end) {
2713 		u64 prealloc_start;
2714 		u64 prealloc_len;
2715 
2716 		if (last_delalloc_end == 0) {
2717 			prealloc_start = start;
2718 			prealloc_len = end + 1 - start;
2719 		} else {
2720 			prealloc_start = last_delalloc_end + 1;
2721 			prealloc_len = end + 1 - prealloc_start;
2722 		}
2723 
2724 		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2725 			ret = btrfs_is_data_extent_shared(inode,
2726 							  disk_bytenr,
2727 							  extent_gen,
2728 							  backref_ctx);
2729 			if (ret < 0)
2730 				return ret;
2731 			else if (ret > 0)
2732 				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2733 		}
2734 		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2735 					 disk_bytenr + extent_offset,
2736 					 prealloc_len, prealloc_flags);
2737 		if (ret)
2738 			return ret;
2739 	}
2740 
2741 	return 0;
2742 }
2743 
2744 static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2745 					  struct btrfs_path *path,
2746 					  u64 *last_extent_end_ret)
2747 {
2748 	const u64 ino = btrfs_ino(inode);
2749 	struct btrfs_root *root = inode->root;
2750 	struct extent_buffer *leaf;
2751 	struct btrfs_file_extent_item *ei;
2752 	struct btrfs_key key;
2753 	u64 disk_bytenr;
2754 	int ret;
2755 
2756 	/*
2757 	 * Lookup the last file extent. We're not using i_size here because
2758 	 * there might be preallocation past i_size.
2759 	 */
2760 	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2761 	/* There can't be a file extent item at offset (u64)-1 */
2762 	ASSERT(ret != 0);
2763 	if (ret < 0)
2764 		return ret;
2765 
2766 	/*
2767 	 * For a non-existing key, btrfs_search_slot() always leaves us at a
2768 	 * slot > 0, except if the btree is empty, which is impossible because
2769 	 * at least it has the inode item for this inode and all the items for
2770 	 * the root inode 256.
2771 	 */
2772 	ASSERT(path->slots[0] > 0);
2773 	path->slots[0]--;
2774 	leaf = path->nodes[0];
2775 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2776 	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2777 		/* No file extent items in the subvolume tree. */
2778 		*last_extent_end_ret = 0;
2779 		return 0;
2780 	}
2781 
2782 	/*
2783 	 * For an inline extent, the disk_bytenr is where inline data starts at,
2784 	 * so first check if we have an inline extent item before checking if we
2785 	 * have an implicit hole (disk_bytenr == 0).
2786 	 */
2787 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2788 	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2789 		*last_extent_end_ret = btrfs_file_extent_end(path);
2790 		return 0;
2791 	}
2792 
2793 	/*
2794 	 * Find the last file extent item that is not a hole (when NO_HOLES is
2795 	 * not enabled). This should take at most 2 iterations in the worst
2796 	 * case: we have one hole file extent item at slot 0 of a leaf and
2797 	 * another hole file extent item as the last item in the previous leaf.
2798 	 * This is because we merge file extent items that represent holes.
2799 	 */
2800 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2801 	while (disk_bytenr == 0) {
2802 		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2803 		if (ret < 0) {
2804 			return ret;
2805 		} else if (ret > 0) {
2806 			/* No file extent items that are not holes. */
2807 			*last_extent_end_ret = 0;
2808 			return 0;
2809 		}
2810 		leaf = path->nodes[0];
2811 		ei = btrfs_item_ptr(leaf, path->slots[0],
2812 				    struct btrfs_file_extent_item);
2813 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2814 	}
2815 
2816 	*last_extent_end_ret = btrfs_file_extent_end(path);
2817 	return 0;
2818 }
2819 
2820 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2821 		  u64 start, u64 len)
2822 {
2823 	const u64 ino = btrfs_ino(inode);
2824 	struct extent_state *cached_state = NULL;
2825 	struct extent_state *delalloc_cached_state = NULL;
2826 	struct btrfs_path *path;
2827 	struct fiemap_cache cache = { 0 };
2828 	struct btrfs_backref_share_check_ctx *backref_ctx;
2829 	u64 last_extent_end;
2830 	u64 prev_extent_end;
2831 	u64 lockstart;
2832 	u64 lockend;
2833 	bool stopped = false;
2834 	int ret;
2835 
2836 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
2837 	path = btrfs_alloc_path();
2838 	if (!backref_ctx || !path) {
2839 		ret = -ENOMEM;
2840 		goto out;
2841 	}
2842 
2843 	lockstart = round_down(start, inode->root->fs_info->sectorsize);
2844 	lockend = round_up(start + len, inode->root->fs_info->sectorsize);
2845 	prev_extent_end = lockstart;
2846 
2847 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
2848 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2849 
2850 	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
2851 	if (ret < 0)
2852 		goto out_unlock;
2853 	btrfs_release_path(path);
2854 
2855 	path->reada = READA_FORWARD;
2856 	ret = fiemap_search_slot(inode, path, lockstart);
2857 	if (ret < 0) {
2858 		goto out_unlock;
2859 	} else if (ret > 0) {
2860 		/*
2861 		 * No file extent item found, but we may have delalloc between
2862 		 * the current offset and i_size. So check for that.
2863 		 */
2864 		ret = 0;
2865 		goto check_eof_delalloc;
2866 	}
2867 
2868 	while (prev_extent_end < lockend) {
2869 		struct extent_buffer *leaf = path->nodes[0];
2870 		struct btrfs_file_extent_item *ei;
2871 		struct btrfs_key key;
2872 		u64 extent_end;
2873 		u64 extent_len;
2874 		u64 extent_offset = 0;
2875 		u64 extent_gen;
2876 		u64 disk_bytenr = 0;
2877 		u64 flags = 0;
2878 		int extent_type;
2879 		u8 compression;
2880 
2881 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2882 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2883 			break;
2884 
2885 		extent_end = btrfs_file_extent_end(path);
2886 
2887 		/*
2888 		 * The first iteration can leave us at an extent item that ends
2889 		 * before our range's start. Move to the next item.
2890 		 */
2891 		if (extent_end <= lockstart)
2892 			goto next_item;
2893 
2894 		backref_ctx->curr_leaf_bytenr = leaf->start;
2895 
2896 		/* We have in implicit hole (NO_HOLES feature enabled). */
2897 		if (prev_extent_end < key.offset) {
2898 			const u64 range_end = min(key.offset, lockend) - 1;
2899 
2900 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2901 						  &delalloc_cached_state,
2902 						  backref_ctx, 0, 0, 0,
2903 						  prev_extent_end, range_end);
2904 			if (ret < 0) {
2905 				goto out_unlock;
2906 			} else if (ret > 0) {
2907 				/* fiemap_fill_next_extent() told us to stop. */
2908 				stopped = true;
2909 				break;
2910 			}
2911 
2912 			/* We've reached the end of the fiemap range, stop. */
2913 			if (key.offset >= lockend) {
2914 				stopped = true;
2915 				break;
2916 			}
2917 		}
2918 
2919 		extent_len = extent_end - key.offset;
2920 		ei = btrfs_item_ptr(leaf, path->slots[0],
2921 				    struct btrfs_file_extent_item);
2922 		compression = btrfs_file_extent_compression(leaf, ei);
2923 		extent_type = btrfs_file_extent_type(leaf, ei);
2924 		extent_gen = btrfs_file_extent_generation(leaf, ei);
2925 
2926 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2927 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2928 			if (compression == BTRFS_COMPRESS_NONE)
2929 				extent_offset = btrfs_file_extent_offset(leaf, ei);
2930 		}
2931 
2932 		if (compression != BTRFS_COMPRESS_NONE)
2933 			flags |= FIEMAP_EXTENT_ENCODED;
2934 
2935 		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2936 			flags |= FIEMAP_EXTENT_DATA_INLINE;
2937 			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
2938 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
2939 						 extent_len, flags);
2940 		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2941 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2942 						  &delalloc_cached_state,
2943 						  backref_ctx,
2944 						  disk_bytenr, extent_offset,
2945 						  extent_gen, key.offset,
2946 						  extent_end - 1);
2947 		} else if (disk_bytenr == 0) {
2948 			/* We have an explicit hole. */
2949 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2950 						  &delalloc_cached_state,
2951 						  backref_ctx, 0, 0, 0,
2952 						  key.offset, extent_end - 1);
2953 		} else {
2954 			/* We have a regular extent. */
2955 			if (fieinfo->fi_extents_max) {
2956 				ret = btrfs_is_data_extent_shared(inode,
2957 								  disk_bytenr,
2958 								  extent_gen,
2959 								  backref_ctx);
2960 				if (ret < 0)
2961 					goto out_unlock;
2962 				else if (ret > 0)
2963 					flags |= FIEMAP_EXTENT_SHARED;
2964 			}
2965 
2966 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
2967 						 disk_bytenr + extent_offset,
2968 						 extent_len, flags);
2969 		}
2970 
2971 		if (ret < 0) {
2972 			goto out_unlock;
2973 		} else if (ret > 0) {
2974 			/* fiemap_fill_next_extent() told us to stop. */
2975 			stopped = true;
2976 			break;
2977 		}
2978 
2979 		prev_extent_end = extent_end;
2980 next_item:
2981 		if (fatal_signal_pending(current)) {
2982 			ret = -EINTR;
2983 			goto out_unlock;
2984 		}
2985 
2986 		ret = fiemap_next_leaf_item(inode, path);
2987 		if (ret < 0) {
2988 			goto out_unlock;
2989 		} else if (ret > 0) {
2990 			/* No more file extent items for this inode. */
2991 			break;
2992 		}
2993 		cond_resched();
2994 	}
2995 
2996 check_eof_delalloc:
2997 	/*
2998 	 * Release (and free) the path before emitting any final entries to
2999 	 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3000 	 * once we find no more file extent items exist, we may have a
3001 	 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3002 	 * faults when copying data to the user space buffer.
3003 	 */
3004 	btrfs_free_path(path);
3005 	path = NULL;
3006 
3007 	if (!stopped && prev_extent_end < lockend) {
3008 		ret = fiemap_process_hole(inode, fieinfo, &cache,
3009 					  &delalloc_cached_state, backref_ctx,
3010 					  0, 0, 0, prev_extent_end, lockend - 1);
3011 		if (ret < 0)
3012 			goto out_unlock;
3013 		prev_extent_end = lockend;
3014 	}
3015 
3016 	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3017 		const u64 i_size = i_size_read(&inode->vfs_inode);
3018 
3019 		if (prev_extent_end < i_size) {
3020 			u64 delalloc_start;
3021 			u64 delalloc_end;
3022 			bool delalloc;
3023 
3024 			delalloc = btrfs_find_delalloc_in_range(inode,
3025 								prev_extent_end,
3026 								i_size - 1,
3027 								&delalloc_cached_state,
3028 								&delalloc_start,
3029 								&delalloc_end);
3030 			if (!delalloc)
3031 				cache.flags |= FIEMAP_EXTENT_LAST;
3032 		} else {
3033 			cache.flags |= FIEMAP_EXTENT_LAST;
3034 		}
3035 	}
3036 
3037 	ret = emit_last_fiemap_cache(fieinfo, &cache);
3038 
3039 out_unlock:
3040 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3041 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3042 out:
3043 	free_extent_state(delalloc_cached_state);
3044 	btrfs_free_backref_share_ctx(backref_ctx);
3045 	btrfs_free_path(path);
3046 	return ret;
3047 }
3048 
3049 static void __free_extent_buffer(struct extent_buffer *eb)
3050 {
3051 	kmem_cache_free(extent_buffer_cache, eb);
3052 }
3053 
3054 static int extent_buffer_under_io(const struct extent_buffer *eb)
3055 {
3056 	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3057 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3058 }
3059 
3060 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
3061 {
3062 	struct btrfs_subpage *subpage;
3063 
3064 	lockdep_assert_held(&page->mapping->private_lock);
3065 
3066 	if (PagePrivate(page)) {
3067 		subpage = (struct btrfs_subpage *)page->private;
3068 		if (atomic_read(&subpage->eb_refs))
3069 			return true;
3070 		/*
3071 		 * Even there is no eb refs here, we may still have
3072 		 * end_page_read() call relying on page::private.
3073 		 */
3074 		if (atomic_read(&subpage->readers))
3075 			return true;
3076 	}
3077 	return false;
3078 }
3079 
3080 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
3081 {
3082 	struct btrfs_fs_info *fs_info = eb->fs_info;
3083 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3084 
3085 	/*
3086 	 * For mapped eb, we're going to change the page private, which should
3087 	 * be done under the private_lock.
3088 	 */
3089 	if (mapped)
3090 		spin_lock(&page->mapping->private_lock);
3091 
3092 	if (!PagePrivate(page)) {
3093 		if (mapped)
3094 			spin_unlock(&page->mapping->private_lock);
3095 		return;
3096 	}
3097 
3098 	if (fs_info->nodesize >= PAGE_SIZE) {
3099 		/*
3100 		 * We do this since we'll remove the pages after we've
3101 		 * removed the eb from the radix tree, so we could race
3102 		 * and have this page now attached to the new eb.  So
3103 		 * only clear page_private if it's still connected to
3104 		 * this eb.
3105 		 */
3106 		if (PagePrivate(page) &&
3107 		    page->private == (unsigned long)eb) {
3108 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3109 			BUG_ON(PageDirty(page));
3110 			BUG_ON(PageWriteback(page));
3111 			/*
3112 			 * We need to make sure we haven't be attached
3113 			 * to a new eb.
3114 			 */
3115 			detach_page_private(page);
3116 		}
3117 		if (mapped)
3118 			spin_unlock(&page->mapping->private_lock);
3119 		return;
3120 	}
3121 
3122 	/*
3123 	 * For subpage, we can have dummy eb with page private.  In this case,
3124 	 * we can directly detach the private as such page is only attached to
3125 	 * one dummy eb, no sharing.
3126 	 */
3127 	if (!mapped) {
3128 		btrfs_detach_subpage(fs_info, page);
3129 		return;
3130 	}
3131 
3132 	btrfs_page_dec_eb_refs(fs_info, page);
3133 
3134 	/*
3135 	 * We can only detach the page private if there are no other ebs in the
3136 	 * page range and no unfinished IO.
3137 	 */
3138 	if (!page_range_has_eb(fs_info, page))
3139 		btrfs_detach_subpage(fs_info, page);
3140 
3141 	spin_unlock(&page->mapping->private_lock);
3142 }
3143 
3144 /* Release all pages attached to the extent buffer */
3145 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3146 {
3147 	int i;
3148 	int num_pages;
3149 
3150 	ASSERT(!extent_buffer_under_io(eb));
3151 
3152 	num_pages = num_extent_pages(eb);
3153 	for (i = 0; i < num_pages; i++) {
3154 		struct page *page = eb->pages[i];
3155 
3156 		if (!page)
3157 			continue;
3158 
3159 		detach_extent_buffer_page(eb, page);
3160 
3161 		/* One for when we allocated the page */
3162 		put_page(page);
3163 	}
3164 }
3165 
3166 /*
3167  * Helper for releasing the extent buffer.
3168  */
3169 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3170 {
3171 	btrfs_release_extent_buffer_pages(eb);
3172 	btrfs_leak_debug_del_eb(eb);
3173 	__free_extent_buffer(eb);
3174 }
3175 
3176 static struct extent_buffer *
3177 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3178 		      unsigned long len)
3179 {
3180 	struct extent_buffer *eb = NULL;
3181 
3182 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3183 	eb->start = start;
3184 	eb->len = len;
3185 	eb->fs_info = fs_info;
3186 	init_rwsem(&eb->lock);
3187 
3188 	btrfs_leak_debug_add_eb(eb);
3189 
3190 	spin_lock_init(&eb->refs_lock);
3191 	atomic_set(&eb->refs, 1);
3192 
3193 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3194 
3195 	return eb;
3196 }
3197 
3198 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3199 {
3200 	int i;
3201 	struct extent_buffer *new;
3202 	int num_pages = num_extent_pages(src);
3203 	int ret;
3204 
3205 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3206 	if (new == NULL)
3207 		return NULL;
3208 
3209 	/*
3210 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3211 	 * btrfs_release_extent_buffer() have different behavior for
3212 	 * UNMAPPED subpage extent buffer.
3213 	 */
3214 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3215 
3216 	ret = btrfs_alloc_page_array(num_pages, new->pages);
3217 	if (ret) {
3218 		btrfs_release_extent_buffer(new);
3219 		return NULL;
3220 	}
3221 
3222 	for (i = 0; i < num_pages; i++) {
3223 		int ret;
3224 		struct page *p = new->pages[i];
3225 
3226 		ret = attach_extent_buffer_page(new, p, NULL);
3227 		if (ret < 0) {
3228 			btrfs_release_extent_buffer(new);
3229 			return NULL;
3230 		}
3231 		WARN_ON(PageDirty(p));
3232 	}
3233 	copy_extent_buffer_full(new, src);
3234 	set_extent_buffer_uptodate(new);
3235 
3236 	return new;
3237 }
3238 
3239 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3240 						  u64 start, unsigned long len)
3241 {
3242 	struct extent_buffer *eb;
3243 	int num_pages;
3244 	int i;
3245 	int ret;
3246 
3247 	eb = __alloc_extent_buffer(fs_info, start, len);
3248 	if (!eb)
3249 		return NULL;
3250 
3251 	num_pages = num_extent_pages(eb);
3252 	ret = btrfs_alloc_page_array(num_pages, eb->pages);
3253 	if (ret)
3254 		goto err;
3255 
3256 	for (i = 0; i < num_pages; i++) {
3257 		struct page *p = eb->pages[i];
3258 
3259 		ret = attach_extent_buffer_page(eb, p, NULL);
3260 		if (ret < 0)
3261 			goto err;
3262 	}
3263 
3264 	set_extent_buffer_uptodate(eb);
3265 	btrfs_set_header_nritems(eb, 0);
3266 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3267 
3268 	return eb;
3269 err:
3270 	for (i = 0; i < num_pages; i++) {
3271 		if (eb->pages[i]) {
3272 			detach_extent_buffer_page(eb, eb->pages[i]);
3273 			__free_page(eb->pages[i]);
3274 		}
3275 	}
3276 	__free_extent_buffer(eb);
3277 	return NULL;
3278 }
3279 
3280 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3281 						u64 start)
3282 {
3283 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3284 }
3285 
3286 static void check_buffer_tree_ref(struct extent_buffer *eb)
3287 {
3288 	int refs;
3289 	/*
3290 	 * The TREE_REF bit is first set when the extent_buffer is added
3291 	 * to the radix tree. It is also reset, if unset, when a new reference
3292 	 * is created by find_extent_buffer.
3293 	 *
3294 	 * It is only cleared in two cases: freeing the last non-tree
3295 	 * reference to the extent_buffer when its STALE bit is set or
3296 	 * calling release_folio when the tree reference is the only reference.
3297 	 *
3298 	 * In both cases, care is taken to ensure that the extent_buffer's
3299 	 * pages are not under io. However, release_folio can be concurrently
3300 	 * called with creating new references, which is prone to race
3301 	 * conditions between the calls to check_buffer_tree_ref in those
3302 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3303 	 *
3304 	 * The actual lifetime of the extent_buffer in the radix tree is
3305 	 * adequately protected by the refcount, but the TREE_REF bit and
3306 	 * its corresponding reference are not. To protect against this
3307 	 * class of races, we call check_buffer_tree_ref from the codepaths
3308 	 * which trigger io. Note that once io is initiated, TREE_REF can no
3309 	 * longer be cleared, so that is the moment at which any such race is
3310 	 * best fixed.
3311 	 */
3312 	refs = atomic_read(&eb->refs);
3313 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3314 		return;
3315 
3316 	spin_lock(&eb->refs_lock);
3317 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3318 		atomic_inc(&eb->refs);
3319 	spin_unlock(&eb->refs_lock);
3320 }
3321 
3322 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
3323 		struct page *accessed)
3324 {
3325 	int num_pages, i;
3326 
3327 	check_buffer_tree_ref(eb);
3328 
3329 	num_pages = num_extent_pages(eb);
3330 	for (i = 0; i < num_pages; i++) {
3331 		struct page *p = eb->pages[i];
3332 
3333 		if (p != accessed)
3334 			mark_page_accessed(p);
3335 	}
3336 }
3337 
3338 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3339 					 u64 start)
3340 {
3341 	struct extent_buffer *eb;
3342 
3343 	eb = find_extent_buffer_nolock(fs_info, start);
3344 	if (!eb)
3345 		return NULL;
3346 	/*
3347 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3348 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3349 	 * another task running free_extent_buffer() might have seen that flag
3350 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3351 	 * writeback flags not set) and it's still in the tree (flag
3352 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3353 	 * decrementing the extent buffer's reference count twice.  So here we
3354 	 * could race and increment the eb's reference count, clear its stale
3355 	 * flag, mark it as dirty and drop our reference before the other task
3356 	 * finishes executing free_extent_buffer, which would later result in
3357 	 * an attempt to free an extent buffer that is dirty.
3358 	 */
3359 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3360 		spin_lock(&eb->refs_lock);
3361 		spin_unlock(&eb->refs_lock);
3362 	}
3363 	mark_extent_buffer_accessed(eb, NULL);
3364 	return eb;
3365 }
3366 
3367 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3368 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3369 					u64 start)
3370 {
3371 	struct extent_buffer *eb, *exists = NULL;
3372 	int ret;
3373 
3374 	eb = find_extent_buffer(fs_info, start);
3375 	if (eb)
3376 		return eb;
3377 	eb = alloc_dummy_extent_buffer(fs_info, start);
3378 	if (!eb)
3379 		return ERR_PTR(-ENOMEM);
3380 	eb->fs_info = fs_info;
3381 again:
3382 	ret = radix_tree_preload(GFP_NOFS);
3383 	if (ret) {
3384 		exists = ERR_PTR(ret);
3385 		goto free_eb;
3386 	}
3387 	spin_lock(&fs_info->buffer_lock);
3388 	ret = radix_tree_insert(&fs_info->buffer_radix,
3389 				start >> fs_info->sectorsize_bits, eb);
3390 	spin_unlock(&fs_info->buffer_lock);
3391 	radix_tree_preload_end();
3392 	if (ret == -EEXIST) {
3393 		exists = find_extent_buffer(fs_info, start);
3394 		if (exists)
3395 			goto free_eb;
3396 		else
3397 			goto again;
3398 	}
3399 	check_buffer_tree_ref(eb);
3400 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3401 
3402 	return eb;
3403 free_eb:
3404 	btrfs_release_extent_buffer(eb);
3405 	return exists;
3406 }
3407 #endif
3408 
3409 static struct extent_buffer *grab_extent_buffer(
3410 		struct btrfs_fs_info *fs_info, struct page *page)
3411 {
3412 	struct extent_buffer *exists;
3413 
3414 	/*
3415 	 * For subpage case, we completely rely on radix tree to ensure we
3416 	 * don't try to insert two ebs for the same bytenr.  So here we always
3417 	 * return NULL and just continue.
3418 	 */
3419 	if (fs_info->nodesize < PAGE_SIZE)
3420 		return NULL;
3421 
3422 	/* Page not yet attached to an extent buffer */
3423 	if (!PagePrivate(page))
3424 		return NULL;
3425 
3426 	/*
3427 	 * We could have already allocated an eb for this page and attached one
3428 	 * so lets see if we can get a ref on the existing eb, and if we can we
3429 	 * know it's good and we can just return that one, else we know we can
3430 	 * just overwrite page->private.
3431 	 */
3432 	exists = (struct extent_buffer *)page->private;
3433 	if (atomic_inc_not_zero(&exists->refs))
3434 		return exists;
3435 
3436 	WARN_ON(PageDirty(page));
3437 	detach_page_private(page);
3438 	return NULL;
3439 }
3440 
3441 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3442 {
3443 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3444 		btrfs_err(fs_info, "bad tree block start %llu", start);
3445 		return -EINVAL;
3446 	}
3447 
3448 	if (fs_info->nodesize < PAGE_SIZE &&
3449 	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3450 		btrfs_err(fs_info,
3451 		"tree block crosses page boundary, start %llu nodesize %u",
3452 			  start, fs_info->nodesize);
3453 		return -EINVAL;
3454 	}
3455 	if (fs_info->nodesize >= PAGE_SIZE &&
3456 	    !PAGE_ALIGNED(start)) {
3457 		btrfs_err(fs_info,
3458 		"tree block is not page aligned, start %llu nodesize %u",
3459 			  start, fs_info->nodesize);
3460 		return -EINVAL;
3461 	}
3462 	if (!IS_ALIGNED(start, fs_info->nodesize) &&
3463 	    !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3464 		btrfs_warn(fs_info,
3465 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3466 			      start, fs_info->nodesize);
3467 	}
3468 	return 0;
3469 }
3470 
3471 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3472 					  u64 start, u64 owner_root, int level)
3473 {
3474 	unsigned long len = fs_info->nodesize;
3475 	int num_pages;
3476 	int i;
3477 	unsigned long index = start >> PAGE_SHIFT;
3478 	struct extent_buffer *eb;
3479 	struct extent_buffer *exists = NULL;
3480 	struct page *p;
3481 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3482 	struct btrfs_subpage *prealloc = NULL;
3483 	u64 lockdep_owner = owner_root;
3484 	int uptodate = 1;
3485 	int ret;
3486 
3487 	if (check_eb_alignment(fs_info, start))
3488 		return ERR_PTR(-EINVAL);
3489 
3490 #if BITS_PER_LONG == 32
3491 	if (start >= MAX_LFS_FILESIZE) {
3492 		btrfs_err_rl(fs_info,
3493 		"extent buffer %llu is beyond 32bit page cache limit", start);
3494 		btrfs_err_32bit_limit(fs_info);
3495 		return ERR_PTR(-EOVERFLOW);
3496 	}
3497 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3498 		btrfs_warn_32bit_limit(fs_info);
3499 #endif
3500 
3501 	eb = find_extent_buffer(fs_info, start);
3502 	if (eb)
3503 		return eb;
3504 
3505 	eb = __alloc_extent_buffer(fs_info, start, len);
3506 	if (!eb)
3507 		return ERR_PTR(-ENOMEM);
3508 
3509 	/*
3510 	 * The reloc trees are just snapshots, so we need them to appear to be
3511 	 * just like any other fs tree WRT lockdep.
3512 	 */
3513 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3514 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3515 
3516 	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3517 
3518 	num_pages = num_extent_pages(eb);
3519 
3520 	/*
3521 	 * Preallocate page->private for subpage case, so that we won't
3522 	 * allocate memory with private_lock nor page lock hold.
3523 	 *
3524 	 * The memory will be freed by attach_extent_buffer_page() or freed
3525 	 * manually if we exit earlier.
3526 	 */
3527 	if (fs_info->nodesize < PAGE_SIZE) {
3528 		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3529 		if (IS_ERR(prealloc)) {
3530 			exists = ERR_CAST(prealloc);
3531 			goto free_eb;
3532 		}
3533 	}
3534 
3535 	for (i = 0; i < num_pages; i++, index++) {
3536 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
3537 		if (!p) {
3538 			exists = ERR_PTR(-ENOMEM);
3539 			btrfs_free_subpage(prealloc);
3540 			goto free_eb;
3541 		}
3542 
3543 		spin_lock(&mapping->private_lock);
3544 		exists = grab_extent_buffer(fs_info, p);
3545 		if (exists) {
3546 			spin_unlock(&mapping->private_lock);
3547 			unlock_page(p);
3548 			put_page(p);
3549 			mark_extent_buffer_accessed(exists, p);
3550 			btrfs_free_subpage(prealloc);
3551 			goto free_eb;
3552 		}
3553 		/* Should not fail, as we have preallocated the memory */
3554 		ret = attach_extent_buffer_page(eb, p, prealloc);
3555 		ASSERT(!ret);
3556 		/*
3557 		 * To inform we have extra eb under allocation, so that
3558 		 * detach_extent_buffer_page() won't release the page private
3559 		 * when the eb hasn't yet been inserted into radix tree.
3560 		 *
3561 		 * The ref will be decreased when the eb released the page, in
3562 		 * detach_extent_buffer_page().
3563 		 * Thus needs no special handling in error path.
3564 		 */
3565 		btrfs_page_inc_eb_refs(fs_info, p);
3566 		spin_unlock(&mapping->private_lock);
3567 
3568 		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
3569 		eb->pages[i] = p;
3570 		if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
3571 			uptodate = 0;
3572 
3573 		/*
3574 		 * We can't unlock the pages just yet since the extent buffer
3575 		 * hasn't been properly inserted in the radix tree, this
3576 		 * opens a race with btree_release_folio which can free a page
3577 		 * while we are still filling in all pages for the buffer and
3578 		 * we could crash.
3579 		 */
3580 	}
3581 	if (uptodate)
3582 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3583 again:
3584 	ret = radix_tree_preload(GFP_NOFS);
3585 	if (ret) {
3586 		exists = ERR_PTR(ret);
3587 		goto free_eb;
3588 	}
3589 
3590 	spin_lock(&fs_info->buffer_lock);
3591 	ret = radix_tree_insert(&fs_info->buffer_radix,
3592 				start >> fs_info->sectorsize_bits, eb);
3593 	spin_unlock(&fs_info->buffer_lock);
3594 	radix_tree_preload_end();
3595 	if (ret == -EEXIST) {
3596 		exists = find_extent_buffer(fs_info, start);
3597 		if (exists)
3598 			goto free_eb;
3599 		else
3600 			goto again;
3601 	}
3602 	/* add one reference for the tree */
3603 	check_buffer_tree_ref(eb);
3604 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3605 
3606 	/*
3607 	 * Now it's safe to unlock the pages because any calls to
3608 	 * btree_release_folio will correctly detect that a page belongs to a
3609 	 * live buffer and won't free them prematurely.
3610 	 */
3611 	for (i = 0; i < num_pages; i++)
3612 		unlock_page(eb->pages[i]);
3613 	return eb;
3614 
3615 free_eb:
3616 	WARN_ON(!atomic_dec_and_test(&eb->refs));
3617 	for (i = 0; i < num_pages; i++) {
3618 		if (eb->pages[i])
3619 			unlock_page(eb->pages[i]);
3620 	}
3621 
3622 	btrfs_release_extent_buffer(eb);
3623 	return exists;
3624 }
3625 
3626 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3627 {
3628 	struct extent_buffer *eb =
3629 			container_of(head, struct extent_buffer, rcu_head);
3630 
3631 	__free_extent_buffer(eb);
3632 }
3633 
3634 static int release_extent_buffer(struct extent_buffer *eb)
3635 	__releases(&eb->refs_lock)
3636 {
3637 	lockdep_assert_held(&eb->refs_lock);
3638 
3639 	WARN_ON(atomic_read(&eb->refs) == 0);
3640 	if (atomic_dec_and_test(&eb->refs)) {
3641 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3642 			struct btrfs_fs_info *fs_info = eb->fs_info;
3643 
3644 			spin_unlock(&eb->refs_lock);
3645 
3646 			spin_lock(&fs_info->buffer_lock);
3647 			radix_tree_delete(&fs_info->buffer_radix,
3648 					  eb->start >> fs_info->sectorsize_bits);
3649 			spin_unlock(&fs_info->buffer_lock);
3650 		} else {
3651 			spin_unlock(&eb->refs_lock);
3652 		}
3653 
3654 		btrfs_leak_debug_del_eb(eb);
3655 		/* Should be safe to release our pages at this point */
3656 		btrfs_release_extent_buffer_pages(eb);
3657 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3658 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3659 			__free_extent_buffer(eb);
3660 			return 1;
3661 		}
3662 #endif
3663 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3664 		return 1;
3665 	}
3666 	spin_unlock(&eb->refs_lock);
3667 
3668 	return 0;
3669 }
3670 
3671 void free_extent_buffer(struct extent_buffer *eb)
3672 {
3673 	int refs;
3674 	if (!eb)
3675 		return;
3676 
3677 	refs = atomic_read(&eb->refs);
3678 	while (1) {
3679 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3680 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3681 			refs == 1))
3682 			break;
3683 		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3684 			return;
3685 	}
3686 
3687 	spin_lock(&eb->refs_lock);
3688 	if (atomic_read(&eb->refs) == 2 &&
3689 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3690 	    !extent_buffer_under_io(eb) &&
3691 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3692 		atomic_dec(&eb->refs);
3693 
3694 	/*
3695 	 * I know this is terrible, but it's temporary until we stop tracking
3696 	 * the uptodate bits and such for the extent buffers.
3697 	 */
3698 	release_extent_buffer(eb);
3699 }
3700 
3701 void free_extent_buffer_stale(struct extent_buffer *eb)
3702 {
3703 	if (!eb)
3704 		return;
3705 
3706 	spin_lock(&eb->refs_lock);
3707 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3708 
3709 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3710 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3711 		atomic_dec(&eb->refs);
3712 	release_extent_buffer(eb);
3713 }
3714 
3715 static void btree_clear_page_dirty(struct page *page)
3716 {
3717 	ASSERT(PageDirty(page));
3718 	ASSERT(PageLocked(page));
3719 	clear_page_dirty_for_io(page);
3720 	xa_lock_irq(&page->mapping->i_pages);
3721 	if (!PageDirty(page))
3722 		__xa_clear_mark(&page->mapping->i_pages,
3723 				page_index(page), PAGECACHE_TAG_DIRTY);
3724 	xa_unlock_irq(&page->mapping->i_pages);
3725 }
3726 
3727 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3728 {
3729 	struct btrfs_fs_info *fs_info = eb->fs_info;
3730 	struct page *page = eb->pages[0];
3731 	bool last;
3732 
3733 	/* btree_clear_page_dirty() needs page locked */
3734 	lock_page(page);
3735 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
3736 						  eb->len);
3737 	if (last)
3738 		btree_clear_page_dirty(page);
3739 	unlock_page(page);
3740 	WARN_ON(atomic_read(&eb->refs) == 0);
3741 }
3742 
3743 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3744 			      struct extent_buffer *eb)
3745 {
3746 	struct btrfs_fs_info *fs_info = eb->fs_info;
3747 	int i;
3748 	int num_pages;
3749 	struct page *page;
3750 
3751 	btrfs_assert_tree_write_locked(eb);
3752 
3753 	if (trans && btrfs_header_generation(eb) != trans->transid)
3754 		return;
3755 
3756 	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3757 		return;
3758 
3759 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3760 				 fs_info->dirty_metadata_batch);
3761 
3762 	if (eb->fs_info->nodesize < PAGE_SIZE)
3763 		return clear_subpage_extent_buffer_dirty(eb);
3764 
3765 	num_pages = num_extent_pages(eb);
3766 
3767 	for (i = 0; i < num_pages; i++) {
3768 		page = eb->pages[i];
3769 		if (!PageDirty(page))
3770 			continue;
3771 		lock_page(page);
3772 		btree_clear_page_dirty(page);
3773 		unlock_page(page);
3774 	}
3775 	WARN_ON(atomic_read(&eb->refs) == 0);
3776 }
3777 
3778 void set_extent_buffer_dirty(struct extent_buffer *eb)
3779 {
3780 	int i;
3781 	int num_pages;
3782 	bool was_dirty;
3783 
3784 	check_buffer_tree_ref(eb);
3785 
3786 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3787 
3788 	num_pages = num_extent_pages(eb);
3789 	WARN_ON(atomic_read(&eb->refs) == 0);
3790 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3791 
3792 	if (!was_dirty) {
3793 		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3794 
3795 		/*
3796 		 * For subpage case, we can have other extent buffers in the
3797 		 * same page, and in clear_subpage_extent_buffer_dirty() we
3798 		 * have to clear page dirty without subpage lock held.
3799 		 * This can cause race where our page gets dirty cleared after
3800 		 * we just set it.
3801 		 *
3802 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3803 		 * its page for other reasons, we can use page lock to prevent
3804 		 * the above race.
3805 		 */
3806 		if (subpage)
3807 			lock_page(eb->pages[0]);
3808 		for (i = 0; i < num_pages; i++)
3809 			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
3810 					     eb->start, eb->len);
3811 		if (subpage)
3812 			unlock_page(eb->pages[0]);
3813 		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3814 					 eb->len,
3815 					 eb->fs_info->dirty_metadata_batch);
3816 	}
3817 #ifdef CONFIG_BTRFS_DEBUG
3818 	for (i = 0; i < num_pages; i++)
3819 		ASSERT(PageDirty(eb->pages[i]));
3820 #endif
3821 }
3822 
3823 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3824 {
3825 	struct btrfs_fs_info *fs_info = eb->fs_info;
3826 	struct page *page;
3827 	int num_pages;
3828 	int i;
3829 
3830 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3831 	num_pages = num_extent_pages(eb);
3832 	for (i = 0; i < num_pages; i++) {
3833 		page = eb->pages[i];
3834 		if (!page)
3835 			continue;
3836 
3837 		/*
3838 		 * This is special handling for metadata subpage, as regular
3839 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3840 		 */
3841 		if (fs_info->nodesize >= PAGE_SIZE)
3842 			ClearPageUptodate(page);
3843 		else
3844 			btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
3845 						     eb->len);
3846 	}
3847 }
3848 
3849 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3850 {
3851 	struct btrfs_fs_info *fs_info = eb->fs_info;
3852 	struct page *page;
3853 	int num_pages;
3854 	int i;
3855 
3856 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3857 	num_pages = num_extent_pages(eb);
3858 	for (i = 0; i < num_pages; i++) {
3859 		page = eb->pages[i];
3860 
3861 		/*
3862 		 * This is special handling for metadata subpage, as regular
3863 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3864 		 */
3865 		if (fs_info->nodesize >= PAGE_SIZE)
3866 			SetPageUptodate(page);
3867 		else
3868 			btrfs_subpage_set_uptodate(fs_info, page, eb->start,
3869 						   eb->len);
3870 	}
3871 }
3872 
3873 static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
3874 {
3875 	struct extent_buffer *eb = bbio->private;
3876 	struct btrfs_fs_info *fs_info = eb->fs_info;
3877 	bool uptodate = !bbio->bio.bi_status;
3878 	struct bvec_iter_all iter_all;
3879 	struct bio_vec *bvec;
3880 	u32 bio_offset = 0;
3881 
3882 	eb->read_mirror = bbio->mirror_num;
3883 
3884 	if (uptodate &&
3885 	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3886 		uptodate = false;
3887 
3888 	if (uptodate) {
3889 		set_extent_buffer_uptodate(eb);
3890 	} else {
3891 		clear_extent_buffer_uptodate(eb);
3892 		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3893 	}
3894 
3895 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
3896 		u64 start = eb->start + bio_offset;
3897 		struct page *page = bvec->bv_page;
3898 		u32 len = bvec->bv_len;
3899 
3900 		if (uptodate)
3901 			btrfs_page_set_uptodate(fs_info, page, start, len);
3902 		else
3903 			btrfs_page_clear_uptodate(fs_info, page, start, len);
3904 
3905 		bio_offset += len;
3906 	}
3907 
3908 	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3909 	smp_mb__after_atomic();
3910 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3911 	free_extent_buffer(eb);
3912 
3913 	bio_put(&bbio->bio);
3914 }
3915 
3916 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3917 			     struct btrfs_tree_parent_check *check)
3918 {
3919 	int num_pages = num_extent_pages(eb), i;
3920 	struct btrfs_bio *bbio;
3921 
3922 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3923 		return 0;
3924 
3925 	/*
3926 	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3927 	 * operation, which could potentially still be in flight.  In this case
3928 	 * we simply want to return an error.
3929 	 */
3930 	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3931 		return -EIO;
3932 
3933 	/* Someone else is already reading the buffer, just wait for it. */
3934 	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3935 		goto done;
3936 
3937 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3938 	eb->read_mirror = 0;
3939 	check_buffer_tree_ref(eb);
3940 	atomic_inc(&eb->refs);
3941 
3942 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3943 			       REQ_OP_READ | REQ_META, eb->fs_info,
3944 			       extent_buffer_read_end_io, eb);
3945 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3946 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3947 	bbio->file_offset = eb->start;
3948 	memcpy(&bbio->parent_check, check, sizeof(*check));
3949 	if (eb->fs_info->nodesize < PAGE_SIZE) {
3950 		__bio_add_page(&bbio->bio, eb->pages[0], eb->len,
3951 			       eb->start - page_offset(eb->pages[0]));
3952 	} else {
3953 		for (i = 0; i < num_pages; i++)
3954 			__bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
3955 	}
3956 	btrfs_submit_bio(bbio, mirror_num);
3957 
3958 done:
3959 	if (wait == WAIT_COMPLETE) {
3960 		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3961 		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3962 			return -EIO;
3963 	}
3964 
3965 	return 0;
3966 }
3967 
3968 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3969 			    unsigned long len)
3970 {
3971 	btrfs_warn(eb->fs_info,
3972 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
3973 		eb->start, eb->len, start, len);
3974 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3975 
3976 	return true;
3977 }
3978 
3979 /*
3980  * Check if the [start, start + len) range is valid before reading/writing
3981  * the eb.
3982  * NOTE: @start and @len are offset inside the eb, not logical address.
3983  *
3984  * Caller should not touch the dst/src memory if this function returns error.
3985  */
3986 static inline int check_eb_range(const struct extent_buffer *eb,
3987 				 unsigned long start, unsigned long len)
3988 {
3989 	unsigned long offset;
3990 
3991 	/* start, start + len should not go beyond eb->len nor overflow */
3992 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3993 		return report_eb_range(eb, start, len);
3994 
3995 	return false;
3996 }
3997 
3998 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3999 			unsigned long start, unsigned long len)
4000 {
4001 	size_t cur;
4002 	size_t offset;
4003 	struct page *page;
4004 	char *kaddr;
4005 	char *dst = (char *)dstv;
4006 	unsigned long i = get_eb_page_index(start);
4007 
4008 	if (check_eb_range(eb, start, len)) {
4009 		/*
4010 		 * Invalid range hit, reset the memory, so callers won't get
4011 		 * some random garbage for their uninitialzed memory.
4012 		 */
4013 		memset(dstv, 0, len);
4014 		return;
4015 	}
4016 
4017 	offset = get_eb_offset_in_page(eb, start);
4018 
4019 	while (len > 0) {
4020 		page = eb->pages[i];
4021 
4022 		cur = min(len, (PAGE_SIZE - offset));
4023 		kaddr = page_address(page);
4024 		memcpy(dst, kaddr + offset, cur);
4025 
4026 		dst += cur;
4027 		len -= cur;
4028 		offset = 0;
4029 		i++;
4030 	}
4031 }
4032 
4033 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4034 				       void __user *dstv,
4035 				       unsigned long start, unsigned long len)
4036 {
4037 	size_t cur;
4038 	size_t offset;
4039 	struct page *page;
4040 	char *kaddr;
4041 	char __user *dst = (char __user *)dstv;
4042 	unsigned long i = get_eb_page_index(start);
4043 	int ret = 0;
4044 
4045 	WARN_ON(start > eb->len);
4046 	WARN_ON(start + len > eb->start + eb->len);
4047 
4048 	offset = get_eb_offset_in_page(eb, start);
4049 
4050 	while (len > 0) {
4051 		page = eb->pages[i];
4052 
4053 		cur = min(len, (PAGE_SIZE - offset));
4054 		kaddr = page_address(page);
4055 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4056 			ret = -EFAULT;
4057 			break;
4058 		}
4059 
4060 		dst += cur;
4061 		len -= cur;
4062 		offset = 0;
4063 		i++;
4064 	}
4065 
4066 	return ret;
4067 }
4068 
4069 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4070 			 unsigned long start, unsigned long len)
4071 {
4072 	size_t cur;
4073 	size_t offset;
4074 	struct page *page;
4075 	char *kaddr;
4076 	char *ptr = (char *)ptrv;
4077 	unsigned long i = get_eb_page_index(start);
4078 	int ret = 0;
4079 
4080 	if (check_eb_range(eb, start, len))
4081 		return -EINVAL;
4082 
4083 	offset = get_eb_offset_in_page(eb, start);
4084 
4085 	while (len > 0) {
4086 		page = eb->pages[i];
4087 
4088 		cur = min(len, (PAGE_SIZE - offset));
4089 
4090 		kaddr = page_address(page);
4091 		ret = memcmp(ptr, kaddr + offset, cur);
4092 		if (ret)
4093 			break;
4094 
4095 		ptr += cur;
4096 		len -= cur;
4097 		offset = 0;
4098 		i++;
4099 	}
4100 	return ret;
4101 }
4102 
4103 /*
4104  * Check that the extent buffer is uptodate.
4105  *
4106  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4107  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4108  */
4109 static void assert_eb_page_uptodate(const struct extent_buffer *eb,
4110 				    struct page *page)
4111 {
4112 	struct btrfs_fs_info *fs_info = eb->fs_info;
4113 
4114 	/*
4115 	 * If we are using the commit root we could potentially clear a page
4116 	 * Uptodate while we're using the extent buffer that we've previously
4117 	 * looked up.  We don't want to complain in this case, as the page was
4118 	 * valid before, we just didn't write it out.  Instead we want to catch
4119 	 * the case where we didn't actually read the block properly, which
4120 	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4121 	 */
4122 	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4123 		return;
4124 
4125 	if (fs_info->nodesize < PAGE_SIZE) {
4126 		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page,
4127 							 eb->start, eb->len)))
4128 			btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len);
4129 	} else {
4130 		WARN_ON(!PageUptodate(page));
4131 	}
4132 }
4133 
4134 static void __write_extent_buffer(const struct extent_buffer *eb,
4135 				  const void *srcv, unsigned long start,
4136 				  unsigned long len, bool use_memmove)
4137 {
4138 	size_t cur;
4139 	size_t offset;
4140 	struct page *page;
4141 	char *kaddr;
4142 	char *src = (char *)srcv;
4143 	unsigned long i = get_eb_page_index(start);
4144 	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4145 	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4146 
4147 	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
4148 
4149 	if (check_eb_range(eb, start, len))
4150 		return;
4151 
4152 	offset = get_eb_offset_in_page(eb, start);
4153 
4154 	while (len > 0) {
4155 		page = eb->pages[i];
4156 		if (check_uptodate)
4157 			assert_eb_page_uptodate(eb, page);
4158 
4159 		cur = min(len, PAGE_SIZE - offset);
4160 		kaddr = page_address(page);
4161 		if (use_memmove)
4162 			memmove(kaddr + offset, src, cur);
4163 		else
4164 			memcpy(kaddr + offset, src, cur);
4165 
4166 		src += cur;
4167 		len -= cur;
4168 		offset = 0;
4169 		i++;
4170 	}
4171 }
4172 
4173 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4174 			 unsigned long start, unsigned long len)
4175 {
4176 	return __write_extent_buffer(eb, srcv, start, len, false);
4177 }
4178 
4179 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4180 				 unsigned long start, unsigned long len)
4181 {
4182 	unsigned long cur = start;
4183 
4184 	while (cur < start + len) {
4185 		unsigned long index = get_eb_page_index(cur);
4186 		unsigned int offset = get_eb_offset_in_page(eb, cur);
4187 		unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset);
4188 		struct page *page = eb->pages[index];
4189 
4190 		assert_eb_page_uptodate(eb, page);
4191 		memset(page_address(page) + offset, c, cur_len);
4192 
4193 		cur += cur_len;
4194 	}
4195 }
4196 
4197 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4198 			   unsigned long len)
4199 {
4200 	if (check_eb_range(eb, start, len))
4201 		return;
4202 	return memset_extent_buffer(eb, 0, start, len);
4203 }
4204 
4205 void copy_extent_buffer_full(const struct extent_buffer *dst,
4206 			     const struct extent_buffer *src)
4207 {
4208 	unsigned long cur = 0;
4209 
4210 	ASSERT(dst->len == src->len);
4211 
4212 	while (cur < src->len) {
4213 		unsigned long index = get_eb_page_index(cur);
4214 		unsigned long offset = get_eb_offset_in_page(src, cur);
4215 		unsigned long cur_len = min(src->len, PAGE_SIZE - offset);
4216 		void *addr = page_address(src->pages[index]) + offset;
4217 
4218 		write_extent_buffer(dst, addr, cur, cur_len);
4219 
4220 		cur += cur_len;
4221 	}
4222 }
4223 
4224 void copy_extent_buffer(const struct extent_buffer *dst,
4225 			const struct extent_buffer *src,
4226 			unsigned long dst_offset, unsigned long src_offset,
4227 			unsigned long len)
4228 {
4229 	u64 dst_len = dst->len;
4230 	size_t cur;
4231 	size_t offset;
4232 	struct page *page;
4233 	char *kaddr;
4234 	unsigned long i = get_eb_page_index(dst_offset);
4235 
4236 	if (check_eb_range(dst, dst_offset, len) ||
4237 	    check_eb_range(src, src_offset, len))
4238 		return;
4239 
4240 	WARN_ON(src->len != dst_len);
4241 
4242 	offset = get_eb_offset_in_page(dst, dst_offset);
4243 
4244 	while (len > 0) {
4245 		page = dst->pages[i];
4246 		assert_eb_page_uptodate(dst, page);
4247 
4248 		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
4249 
4250 		kaddr = page_address(page);
4251 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4252 
4253 		src_offset += cur;
4254 		len -= cur;
4255 		offset = 0;
4256 		i++;
4257 	}
4258 }
4259 
4260 /*
4261  * Calculate the page and offset of the byte containing the given bit number.
4262  *
4263  * @eb:           the extent buffer
4264  * @start:        offset of the bitmap item in the extent buffer
4265  * @nr:           bit number
4266  * @page_index:   return index of the page in the extent buffer that contains
4267  *                the given bit number
4268  * @page_offset:  return offset into the page given by page_index
4269  *
4270  * This helper hides the ugliness of finding the byte in an extent buffer which
4271  * contains a given bit.
4272  */
4273 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4274 				    unsigned long start, unsigned long nr,
4275 				    unsigned long *page_index,
4276 				    size_t *page_offset)
4277 {
4278 	size_t byte_offset = BIT_BYTE(nr);
4279 	size_t offset;
4280 
4281 	/*
4282 	 * The byte we want is the offset of the extent buffer + the offset of
4283 	 * the bitmap item in the extent buffer + the offset of the byte in the
4284 	 * bitmap item.
4285 	 */
4286 	offset = start + offset_in_page(eb->start) + byte_offset;
4287 
4288 	*page_index = offset >> PAGE_SHIFT;
4289 	*page_offset = offset_in_page(offset);
4290 }
4291 
4292 /*
4293  * Determine whether a bit in a bitmap item is set.
4294  *
4295  * @eb:     the extent buffer
4296  * @start:  offset of the bitmap item in the extent buffer
4297  * @nr:     bit number to test
4298  */
4299 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4300 			   unsigned long nr)
4301 {
4302 	u8 *kaddr;
4303 	struct page *page;
4304 	unsigned long i;
4305 	size_t offset;
4306 
4307 	eb_bitmap_offset(eb, start, nr, &i, &offset);
4308 	page = eb->pages[i];
4309 	assert_eb_page_uptodate(eb, page);
4310 	kaddr = page_address(page);
4311 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4312 }
4313 
4314 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4315 {
4316 	unsigned long index = get_eb_page_index(bytenr);
4317 
4318 	if (check_eb_range(eb, bytenr, 1))
4319 		return NULL;
4320 	return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr);
4321 }
4322 
4323 /*
4324  * Set an area of a bitmap to 1.
4325  *
4326  * @eb:     the extent buffer
4327  * @start:  offset of the bitmap item in the extent buffer
4328  * @pos:    bit number of the first bit
4329  * @len:    number of bits to set
4330  */
4331 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4332 			      unsigned long pos, unsigned long len)
4333 {
4334 	unsigned int first_byte = start + BIT_BYTE(pos);
4335 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4336 	const bool same_byte = (first_byte == last_byte);
4337 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4338 	u8 *kaddr;
4339 
4340 	if (same_byte)
4341 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4342 
4343 	/* Handle the first byte. */
4344 	kaddr = extent_buffer_get_byte(eb, first_byte);
4345 	*kaddr |= mask;
4346 	if (same_byte)
4347 		return;
4348 
4349 	/* Handle the byte aligned part. */
4350 	ASSERT(first_byte + 1 <= last_byte);
4351 	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4352 
4353 	/* Handle the last byte. */
4354 	kaddr = extent_buffer_get_byte(eb, last_byte);
4355 	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4356 }
4357 
4358 
4359 /*
4360  * Clear an area of a bitmap.
4361  *
4362  * @eb:     the extent buffer
4363  * @start:  offset of the bitmap item in the extent buffer
4364  * @pos:    bit number of the first bit
4365  * @len:    number of bits to clear
4366  */
4367 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4368 				unsigned long start, unsigned long pos,
4369 				unsigned long len)
4370 {
4371 	unsigned int first_byte = start + BIT_BYTE(pos);
4372 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4373 	const bool same_byte = (first_byte == last_byte);
4374 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4375 	u8 *kaddr;
4376 
4377 	if (same_byte)
4378 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4379 
4380 	/* Handle the first byte. */
4381 	kaddr = extent_buffer_get_byte(eb, first_byte);
4382 	*kaddr &= ~mask;
4383 	if (same_byte)
4384 		return;
4385 
4386 	/* Handle the byte aligned part. */
4387 	ASSERT(first_byte + 1 <= last_byte);
4388 	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4389 
4390 	/* Handle the last byte. */
4391 	kaddr = extent_buffer_get_byte(eb, last_byte);
4392 	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4393 }
4394 
4395 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4396 {
4397 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4398 	return distance < len;
4399 }
4400 
4401 void memcpy_extent_buffer(const struct extent_buffer *dst,
4402 			  unsigned long dst_offset, unsigned long src_offset,
4403 			  unsigned long len)
4404 {
4405 	unsigned long cur_off = 0;
4406 
4407 	if (check_eb_range(dst, dst_offset, len) ||
4408 	    check_eb_range(dst, src_offset, len))
4409 		return;
4410 
4411 	while (cur_off < len) {
4412 		unsigned long cur_src = cur_off + src_offset;
4413 		unsigned long pg_index = get_eb_page_index(cur_src);
4414 		unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
4415 		unsigned long cur_len = min(src_offset + len - cur_src,
4416 					    PAGE_SIZE - pg_off);
4417 		void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
4418 		const bool use_memmove = areas_overlap(src_offset + cur_off,
4419 						       dst_offset + cur_off, cur_len);
4420 
4421 		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4422 				      use_memmove);
4423 		cur_off += cur_len;
4424 	}
4425 }
4426 
4427 void memmove_extent_buffer(const struct extent_buffer *dst,
4428 			   unsigned long dst_offset, unsigned long src_offset,
4429 			   unsigned long len)
4430 {
4431 	unsigned long dst_end = dst_offset + len - 1;
4432 	unsigned long src_end = src_offset + len - 1;
4433 
4434 	if (check_eb_range(dst, dst_offset, len) ||
4435 	    check_eb_range(dst, src_offset, len))
4436 		return;
4437 
4438 	if (dst_offset < src_offset) {
4439 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4440 		return;
4441 	}
4442 
4443 	while (len > 0) {
4444 		unsigned long src_i;
4445 		size_t cur;
4446 		size_t dst_off_in_page;
4447 		size_t src_off_in_page;
4448 		void *src_addr;
4449 		bool use_memmove;
4450 
4451 		src_i = get_eb_page_index(src_end);
4452 
4453 		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
4454 		src_off_in_page = get_eb_offset_in_page(dst, src_end);
4455 
4456 		cur = min_t(unsigned long, len, src_off_in_page + 1);
4457 		cur = min(cur, dst_off_in_page + 1);
4458 
4459 		src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
4460 					cur + 1;
4461 		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4462 					    cur);
4463 
4464 		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4465 				      use_memmove);
4466 
4467 		dst_end -= cur;
4468 		src_end -= cur;
4469 		len -= cur;
4470 	}
4471 }
4472 
4473 #define GANG_LOOKUP_SIZE	16
4474 static struct extent_buffer *get_next_extent_buffer(
4475 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4476 {
4477 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4478 	struct extent_buffer *found = NULL;
4479 	u64 page_start = page_offset(page);
4480 	u64 cur = page_start;
4481 
4482 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4483 	lockdep_assert_held(&fs_info->buffer_lock);
4484 
4485 	while (cur < page_start + PAGE_SIZE) {
4486 		int ret;
4487 		int i;
4488 
4489 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4490 				(void **)gang, cur >> fs_info->sectorsize_bits,
4491 				min_t(unsigned int, GANG_LOOKUP_SIZE,
4492 				      PAGE_SIZE / fs_info->nodesize));
4493 		if (ret == 0)
4494 			goto out;
4495 		for (i = 0; i < ret; i++) {
4496 			/* Already beyond page end */
4497 			if (gang[i]->start >= page_start + PAGE_SIZE)
4498 				goto out;
4499 			/* Found one */
4500 			if (gang[i]->start >= bytenr) {
4501 				found = gang[i];
4502 				goto out;
4503 			}
4504 		}
4505 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4506 	}
4507 out:
4508 	return found;
4509 }
4510 
4511 static int try_release_subpage_extent_buffer(struct page *page)
4512 {
4513 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4514 	u64 cur = page_offset(page);
4515 	const u64 end = page_offset(page) + PAGE_SIZE;
4516 	int ret;
4517 
4518 	while (cur < end) {
4519 		struct extent_buffer *eb = NULL;
4520 
4521 		/*
4522 		 * Unlike try_release_extent_buffer() which uses page->private
4523 		 * to grab buffer, for subpage case we rely on radix tree, thus
4524 		 * we need to ensure radix tree consistency.
4525 		 *
4526 		 * We also want an atomic snapshot of the radix tree, thus go
4527 		 * with spinlock rather than RCU.
4528 		 */
4529 		spin_lock(&fs_info->buffer_lock);
4530 		eb = get_next_extent_buffer(fs_info, page, cur);
4531 		if (!eb) {
4532 			/* No more eb in the page range after or at cur */
4533 			spin_unlock(&fs_info->buffer_lock);
4534 			break;
4535 		}
4536 		cur = eb->start + eb->len;
4537 
4538 		/*
4539 		 * The same as try_release_extent_buffer(), to ensure the eb
4540 		 * won't disappear out from under us.
4541 		 */
4542 		spin_lock(&eb->refs_lock);
4543 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4544 			spin_unlock(&eb->refs_lock);
4545 			spin_unlock(&fs_info->buffer_lock);
4546 			break;
4547 		}
4548 		spin_unlock(&fs_info->buffer_lock);
4549 
4550 		/*
4551 		 * If tree ref isn't set then we know the ref on this eb is a
4552 		 * real ref, so just return, this eb will likely be freed soon
4553 		 * anyway.
4554 		 */
4555 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4556 			spin_unlock(&eb->refs_lock);
4557 			break;
4558 		}
4559 
4560 		/*
4561 		 * Here we don't care about the return value, we will always
4562 		 * check the page private at the end.  And
4563 		 * release_extent_buffer() will release the refs_lock.
4564 		 */
4565 		release_extent_buffer(eb);
4566 	}
4567 	/*
4568 	 * Finally to check if we have cleared page private, as if we have
4569 	 * released all ebs in the page, the page private should be cleared now.
4570 	 */
4571 	spin_lock(&page->mapping->private_lock);
4572 	if (!PagePrivate(page))
4573 		ret = 1;
4574 	else
4575 		ret = 0;
4576 	spin_unlock(&page->mapping->private_lock);
4577 	return ret;
4578 
4579 }
4580 
4581 int try_release_extent_buffer(struct page *page)
4582 {
4583 	struct extent_buffer *eb;
4584 
4585 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4586 		return try_release_subpage_extent_buffer(page);
4587 
4588 	/*
4589 	 * We need to make sure nobody is changing page->private, as we rely on
4590 	 * page->private as the pointer to extent buffer.
4591 	 */
4592 	spin_lock(&page->mapping->private_lock);
4593 	if (!PagePrivate(page)) {
4594 		spin_unlock(&page->mapping->private_lock);
4595 		return 1;
4596 	}
4597 
4598 	eb = (struct extent_buffer *)page->private;
4599 	BUG_ON(!eb);
4600 
4601 	/*
4602 	 * This is a little awful but should be ok, we need to make sure that
4603 	 * the eb doesn't disappear out from under us while we're looking at
4604 	 * this page.
4605 	 */
4606 	spin_lock(&eb->refs_lock);
4607 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4608 		spin_unlock(&eb->refs_lock);
4609 		spin_unlock(&page->mapping->private_lock);
4610 		return 0;
4611 	}
4612 	spin_unlock(&page->mapping->private_lock);
4613 
4614 	/*
4615 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4616 	 * so just return, this page will likely be freed soon anyway.
4617 	 */
4618 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4619 		spin_unlock(&eb->refs_lock);
4620 		return 0;
4621 	}
4622 
4623 	return release_extent_buffer(eb);
4624 }
4625 
4626 /*
4627  * Attempt to readahead a child block.
4628  *
4629  * @fs_info:	the fs_info
4630  * @bytenr:	bytenr to read
4631  * @owner_root: objectid of the root that owns this eb
4632  * @gen:	generation for the uptodate check, can be 0
4633  * @level:	level for the eb
4634  *
4635  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4636  * normal uptodate check of the eb, without checking the generation.  If we have
4637  * to read the block we will not block on anything.
4638  */
4639 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4640 				u64 bytenr, u64 owner_root, u64 gen, int level)
4641 {
4642 	struct btrfs_tree_parent_check check = {
4643 		.has_first_key = 0,
4644 		.level = level,
4645 		.transid = gen
4646 	};
4647 	struct extent_buffer *eb;
4648 	int ret;
4649 
4650 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4651 	if (IS_ERR(eb))
4652 		return;
4653 
4654 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4655 		free_extent_buffer(eb);
4656 		return;
4657 	}
4658 
4659 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4660 	if (ret < 0)
4661 		free_extent_buffer_stale(eb);
4662 	else
4663 		free_extent_buffer(eb);
4664 }
4665 
4666 /*
4667  * Readahead a node's child block.
4668  *
4669  * @node:	parent node we're reading from
4670  * @slot:	slot in the parent node for the child we want to read
4671  *
4672  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4673  * the slot in the node provided.
4674  */
4675 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4676 {
4677 	btrfs_readahead_tree_block(node->fs_info,
4678 				   btrfs_node_blockptr(node, slot),
4679 				   btrfs_header_owner(node),
4680 				   btrfs_node_ptr_generation(node, slot),
4681 				   btrfs_header_level(node) - 1);
4682 }
4683