xref: /linux/fs/btrfs/extent_io.c (revision 561add0da6d3d07c9bccb0832fb6ed5619167d26)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "misc.h"
18 #include "extent_io.h"
19 #include "extent-io-tree.h"
20 #include "extent_map.h"
21 #include "ctree.h"
22 #include "btrfs_inode.h"
23 #include "bio.h"
24 #include "check-integrity.h"
25 #include "locking.h"
26 #include "rcu-string.h"
27 #include "backref.h"
28 #include "disk-io.h"
29 #include "subpage.h"
30 #include "zoned.h"
31 #include "block-group.h"
32 #include "compression.h"
33 #include "fs.h"
34 #include "accessors.h"
35 #include "file-item.h"
36 #include "file.h"
37 #include "dev-replace.h"
38 #include "super.h"
39 #include "transaction.h"
40 
41 static struct kmem_cache *extent_buffer_cache;
42 
43 #ifdef CONFIG_BTRFS_DEBUG
44 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
45 {
46 	struct btrfs_fs_info *fs_info = eb->fs_info;
47 	unsigned long flags;
48 
49 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
50 	list_add(&eb->leak_list, &fs_info->allocated_ebs);
51 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
52 }
53 
54 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
55 {
56 	struct btrfs_fs_info *fs_info = eb->fs_info;
57 	unsigned long flags;
58 
59 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
60 	list_del(&eb->leak_list);
61 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
62 }
63 
64 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
65 {
66 	struct extent_buffer *eb;
67 	unsigned long flags;
68 
69 	/*
70 	 * If we didn't get into open_ctree our allocated_ebs will not be
71 	 * initialized, so just skip this.
72 	 */
73 	if (!fs_info->allocated_ebs.next)
74 		return;
75 
76 	WARN_ON(!list_empty(&fs_info->allocated_ebs));
77 	spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
78 	while (!list_empty(&fs_info->allocated_ebs)) {
79 		eb = list_first_entry(&fs_info->allocated_ebs,
80 				      struct extent_buffer, leak_list);
81 		pr_err(
82 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
83 		       eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
84 		       btrfs_header_owner(eb));
85 		list_del(&eb->leak_list);
86 		kmem_cache_free(extent_buffer_cache, eb);
87 	}
88 	spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
89 }
90 #else
91 #define btrfs_leak_debug_add_eb(eb)			do {} while (0)
92 #define btrfs_leak_debug_del_eb(eb)			do {} while (0)
93 #endif
94 
95 /*
96  * Structure to record info about the bio being assembled, and other info like
97  * how many bytes are there before stripe/ordered extent boundary.
98  */
99 struct btrfs_bio_ctrl {
100 	struct btrfs_bio *bbio;
101 	enum btrfs_compression_type compress_type;
102 	u32 len_to_oe_boundary;
103 	blk_opf_t opf;
104 	btrfs_bio_end_io_t end_io_func;
105 	struct writeback_control *wbc;
106 };
107 
108 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
109 {
110 	struct btrfs_bio *bbio = bio_ctrl->bbio;
111 
112 	if (!bbio)
113 		return;
114 
115 	/* Caller should ensure the bio has at least some range added */
116 	ASSERT(bbio->bio.bi_iter.bi_size);
117 
118 	if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
119 	    bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
120 		btrfs_submit_compressed_read(bbio);
121 	else
122 		btrfs_submit_bio(bbio, 0);
123 
124 	/* The bbio is owned by the end_io handler now */
125 	bio_ctrl->bbio = NULL;
126 }
127 
128 /*
129  * Submit or fail the current bio in the bio_ctrl structure.
130  */
131 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
132 {
133 	struct btrfs_bio *bbio = bio_ctrl->bbio;
134 
135 	if (!bbio)
136 		return;
137 
138 	if (ret) {
139 		ASSERT(ret < 0);
140 		btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
141 		/* The bio is owned by the end_io handler now */
142 		bio_ctrl->bbio = NULL;
143 	} else {
144 		submit_one_bio(bio_ctrl);
145 	}
146 }
147 
148 int __init extent_buffer_init_cachep(void)
149 {
150 	extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
151 			sizeof(struct extent_buffer), 0,
152 			SLAB_MEM_SPREAD, NULL);
153 	if (!extent_buffer_cache)
154 		return -ENOMEM;
155 
156 	return 0;
157 }
158 
159 void __cold extent_buffer_free_cachep(void)
160 {
161 	/*
162 	 * Make sure all delayed rcu free are flushed before we
163 	 * destroy caches.
164 	 */
165 	rcu_barrier();
166 	kmem_cache_destroy(extent_buffer_cache);
167 }
168 
169 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
170 {
171 	unsigned long index = start >> PAGE_SHIFT;
172 	unsigned long end_index = end >> PAGE_SHIFT;
173 	struct page *page;
174 
175 	while (index <= end_index) {
176 		page = find_get_page(inode->i_mapping, index);
177 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
178 		clear_page_dirty_for_io(page);
179 		put_page(page);
180 		index++;
181 	}
182 }
183 
184 static void process_one_page(struct btrfs_fs_info *fs_info,
185 			     struct page *page, struct page *locked_page,
186 			     unsigned long page_ops, u64 start, u64 end)
187 {
188 	u32 len;
189 
190 	ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
191 	len = end + 1 - start;
192 
193 	if (page_ops & PAGE_SET_ORDERED)
194 		btrfs_page_clamp_set_ordered(fs_info, page, start, len);
195 	if (page_ops & PAGE_START_WRITEBACK) {
196 		btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
197 		btrfs_page_clamp_set_writeback(fs_info, page, start, len);
198 	}
199 	if (page_ops & PAGE_END_WRITEBACK)
200 		btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
201 
202 	if (page != locked_page && (page_ops & PAGE_UNLOCK))
203 		btrfs_page_end_writer_lock(fs_info, page, start, len);
204 }
205 
206 static void __process_pages_contig(struct address_space *mapping,
207 				   struct page *locked_page, u64 start, u64 end,
208 				   unsigned long page_ops)
209 {
210 	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
211 	pgoff_t start_index = start >> PAGE_SHIFT;
212 	pgoff_t end_index = end >> PAGE_SHIFT;
213 	pgoff_t index = start_index;
214 	struct folio_batch fbatch;
215 	int i;
216 
217 	folio_batch_init(&fbatch);
218 	while (index <= end_index) {
219 		int found_folios;
220 
221 		found_folios = filemap_get_folios_contig(mapping, &index,
222 				end_index, &fbatch);
223 		for (i = 0; i < found_folios; i++) {
224 			struct folio *folio = fbatch.folios[i];
225 
226 			process_one_page(fs_info, &folio->page, locked_page,
227 					 page_ops, start, end);
228 		}
229 		folio_batch_release(&fbatch);
230 		cond_resched();
231 	}
232 }
233 
234 static noinline void __unlock_for_delalloc(struct inode *inode,
235 					   struct page *locked_page,
236 					   u64 start, u64 end)
237 {
238 	unsigned long index = start >> PAGE_SHIFT;
239 	unsigned long end_index = end >> PAGE_SHIFT;
240 
241 	ASSERT(locked_page);
242 	if (index == locked_page->index && end_index == index)
243 		return;
244 
245 	__process_pages_contig(inode->i_mapping, locked_page, start, end,
246 			       PAGE_UNLOCK);
247 }
248 
249 static noinline int lock_delalloc_pages(struct inode *inode,
250 					struct page *locked_page,
251 					u64 start,
252 					u64 end)
253 {
254 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
255 	struct address_space *mapping = inode->i_mapping;
256 	pgoff_t start_index = start >> PAGE_SHIFT;
257 	pgoff_t end_index = end >> PAGE_SHIFT;
258 	pgoff_t index = start_index;
259 	u64 processed_end = start;
260 	struct folio_batch fbatch;
261 
262 	if (index == locked_page->index && index == end_index)
263 		return 0;
264 
265 	folio_batch_init(&fbatch);
266 	while (index <= end_index) {
267 		unsigned int found_folios, i;
268 
269 		found_folios = filemap_get_folios_contig(mapping, &index,
270 				end_index, &fbatch);
271 		if (found_folios == 0)
272 			goto out;
273 
274 		for (i = 0; i < found_folios; i++) {
275 			struct page *page = &fbatch.folios[i]->page;
276 			u32 len = end + 1 - start;
277 
278 			if (page == locked_page)
279 				continue;
280 
281 			if (btrfs_page_start_writer_lock(fs_info, page, start,
282 							 len))
283 				goto out;
284 
285 			if (!PageDirty(page) || page->mapping != mapping) {
286 				btrfs_page_end_writer_lock(fs_info, page, start,
287 							   len);
288 				goto out;
289 			}
290 
291 			processed_end = page_offset(page) + PAGE_SIZE - 1;
292 		}
293 		folio_batch_release(&fbatch);
294 		cond_resched();
295 	}
296 
297 	return 0;
298 out:
299 	folio_batch_release(&fbatch);
300 	if (processed_end > start)
301 		__unlock_for_delalloc(inode, locked_page, start, processed_end);
302 	return -EAGAIN;
303 }
304 
305 /*
306  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
307  * more than @max_bytes.
308  *
309  * @start:	The original start bytenr to search.
310  *		Will store the extent range start bytenr.
311  * @end:	The original end bytenr of the search range
312  *		Will store the extent range end bytenr.
313  *
314  * Return true if we find a delalloc range which starts inside the original
315  * range, and @start/@end will store the delalloc range start/end.
316  *
317  * Return false if we can't find any delalloc range which starts inside the
318  * original range, and @start/@end will be the non-delalloc range start/end.
319  */
320 EXPORT_FOR_TESTS
321 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
322 				    struct page *locked_page, u64 *start,
323 				    u64 *end)
324 {
325 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
326 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
327 	const u64 orig_start = *start;
328 	const u64 orig_end = *end;
329 	/* The sanity tests may not set a valid fs_info. */
330 	u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
331 	u64 delalloc_start;
332 	u64 delalloc_end;
333 	bool found;
334 	struct extent_state *cached_state = NULL;
335 	int ret;
336 	int loops = 0;
337 
338 	/* Caller should pass a valid @end to indicate the search range end */
339 	ASSERT(orig_end > orig_start);
340 
341 	/* The range should at least cover part of the page */
342 	ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
343 		 orig_end <= page_offset(locked_page)));
344 again:
345 	/* step one, find a bunch of delalloc bytes starting at start */
346 	delalloc_start = *start;
347 	delalloc_end = 0;
348 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
349 					  max_bytes, &cached_state);
350 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
351 		*start = delalloc_start;
352 
353 		/* @delalloc_end can be -1, never go beyond @orig_end */
354 		*end = min(delalloc_end, orig_end);
355 		free_extent_state(cached_state);
356 		return false;
357 	}
358 
359 	/*
360 	 * start comes from the offset of locked_page.  We have to lock
361 	 * pages in order, so we can't process delalloc bytes before
362 	 * locked_page
363 	 */
364 	if (delalloc_start < *start)
365 		delalloc_start = *start;
366 
367 	/*
368 	 * make sure to limit the number of pages we try to lock down
369 	 */
370 	if (delalloc_end + 1 - delalloc_start > max_bytes)
371 		delalloc_end = delalloc_start + max_bytes - 1;
372 
373 	/* step two, lock all the pages after the page that has start */
374 	ret = lock_delalloc_pages(inode, locked_page,
375 				  delalloc_start, delalloc_end);
376 	ASSERT(!ret || ret == -EAGAIN);
377 	if (ret == -EAGAIN) {
378 		/* some of the pages are gone, lets avoid looping by
379 		 * shortening the size of the delalloc range we're searching
380 		 */
381 		free_extent_state(cached_state);
382 		cached_state = NULL;
383 		if (!loops) {
384 			max_bytes = PAGE_SIZE;
385 			loops = 1;
386 			goto again;
387 		} else {
388 			found = false;
389 			goto out_failed;
390 		}
391 	}
392 
393 	/* step three, lock the state bits for the whole range */
394 	lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
395 
396 	/* then test to make sure it is all still delalloc */
397 	ret = test_range_bit(tree, delalloc_start, delalloc_end,
398 			     EXTENT_DELALLOC, 1, cached_state);
399 	if (!ret) {
400 		unlock_extent(tree, delalloc_start, delalloc_end,
401 			      &cached_state);
402 		__unlock_for_delalloc(inode, locked_page,
403 			      delalloc_start, delalloc_end);
404 		cond_resched();
405 		goto again;
406 	}
407 	free_extent_state(cached_state);
408 	*start = delalloc_start;
409 	*end = delalloc_end;
410 out_failed:
411 	return found;
412 }
413 
414 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
415 				  struct page *locked_page,
416 				  u32 clear_bits, unsigned long page_ops)
417 {
418 	clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
419 
420 	__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
421 			       start, end, page_ops);
422 }
423 
424 static bool btrfs_verify_page(struct page *page, u64 start)
425 {
426 	if (!fsverity_active(page->mapping->host) ||
427 	    PageUptodate(page) ||
428 	    start >= i_size_read(page->mapping->host))
429 		return true;
430 	return fsverity_verify_page(page);
431 }
432 
433 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
434 {
435 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
436 
437 	ASSERT(page_offset(page) <= start &&
438 	       start + len <= page_offset(page) + PAGE_SIZE);
439 
440 	if (uptodate && btrfs_verify_page(page, start))
441 		btrfs_page_set_uptodate(fs_info, page, start, len);
442 	else
443 		btrfs_page_clear_uptodate(fs_info, page, start, len);
444 
445 	if (!btrfs_is_subpage(fs_info, page))
446 		unlock_page(page);
447 	else
448 		btrfs_subpage_end_reader(fs_info, page, start, len);
449 }
450 
451 /*
452  * after a writepage IO is done, we need to:
453  * clear the uptodate bits on error
454  * clear the writeback bits in the extent tree for this IO
455  * end_page_writeback if the page has no more pending IO
456  *
457  * Scheduling is not allowed, so the extent state tree is expected
458  * to have one and only one object corresponding to this IO.
459  */
460 static void end_bio_extent_writepage(struct btrfs_bio *bbio)
461 {
462 	struct bio *bio = &bbio->bio;
463 	int error = blk_status_to_errno(bio->bi_status);
464 	struct bio_vec *bvec;
465 	struct bvec_iter_all iter_all;
466 
467 	ASSERT(!bio_flagged(bio, BIO_CLONED));
468 	bio_for_each_segment_all(bvec, bio, iter_all) {
469 		struct page *page = bvec->bv_page;
470 		struct inode *inode = page->mapping->host;
471 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
472 		const u32 sectorsize = fs_info->sectorsize;
473 		u64 start = page_offset(page) + bvec->bv_offset;
474 		u32 len = bvec->bv_len;
475 
476 		/* Our read/write should always be sector aligned. */
477 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
478 			btrfs_err(fs_info,
479 		"partial page write in btrfs with offset %u and length %u",
480 				  bvec->bv_offset, bvec->bv_len);
481 		else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
482 			btrfs_info(fs_info,
483 		"incomplete page write with offset %u and length %u",
484 				   bvec->bv_offset, bvec->bv_len);
485 
486 		btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
487 		if (error) {
488 			btrfs_page_clear_uptodate(fs_info, page, start, len);
489 			mapping_set_error(page->mapping, error);
490 		}
491 		btrfs_page_clear_writeback(fs_info, page, start, len);
492 	}
493 
494 	bio_put(bio);
495 }
496 
497 /*
498  * Record previously processed extent range
499  *
500  * For endio_readpage_release_extent() to handle a full extent range, reducing
501  * the extent io operations.
502  */
503 struct processed_extent {
504 	struct btrfs_inode *inode;
505 	/* Start of the range in @inode */
506 	u64 start;
507 	/* End of the range in @inode */
508 	u64 end;
509 	bool uptodate;
510 };
511 
512 /*
513  * Try to release processed extent range
514  *
515  * May not release the extent range right now if the current range is
516  * contiguous to processed extent.
517  *
518  * Will release processed extent when any of @inode, @uptodate, the range is
519  * no longer contiguous to the processed range.
520  *
521  * Passing @inode == NULL will force processed extent to be released.
522  */
523 static void endio_readpage_release_extent(struct processed_extent *processed,
524 			      struct btrfs_inode *inode, u64 start, u64 end,
525 			      bool uptodate)
526 {
527 	struct extent_state *cached = NULL;
528 	struct extent_io_tree *tree;
529 
530 	/* The first extent, initialize @processed */
531 	if (!processed->inode)
532 		goto update;
533 
534 	/*
535 	 * Contiguous to processed extent, just uptodate the end.
536 	 *
537 	 * Several things to notice:
538 	 *
539 	 * - bio can be merged as long as on-disk bytenr is contiguous
540 	 *   This means we can have page belonging to other inodes, thus need to
541 	 *   check if the inode still matches.
542 	 * - bvec can contain range beyond current page for multi-page bvec
543 	 *   Thus we need to do processed->end + 1 >= start check
544 	 */
545 	if (processed->inode == inode && processed->uptodate == uptodate &&
546 	    processed->end + 1 >= start && end >= processed->end) {
547 		processed->end = end;
548 		return;
549 	}
550 
551 	tree = &processed->inode->io_tree;
552 	/*
553 	 * Now we don't have range contiguous to the processed range, release
554 	 * the processed range now.
555 	 */
556 	unlock_extent(tree, processed->start, processed->end, &cached);
557 
558 update:
559 	/* Update processed to current range */
560 	processed->inode = inode;
561 	processed->start = start;
562 	processed->end = end;
563 	processed->uptodate = uptodate;
564 }
565 
566 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
567 {
568 	ASSERT(PageLocked(page));
569 	if (!btrfs_is_subpage(fs_info, page))
570 		return;
571 
572 	ASSERT(PagePrivate(page));
573 	btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
574 }
575 
576 /*
577  * after a readpage IO is done, we need to:
578  * clear the uptodate bits on error
579  * set the uptodate bits if things worked
580  * set the page up to date if all extents in the tree are uptodate
581  * clear the lock bit in the extent tree
582  * unlock the page if there are no other extents locked for it
583  *
584  * Scheduling is not allowed, so the extent state tree is expected
585  * to have one and only one object corresponding to this IO.
586  */
587 static void end_bio_extent_readpage(struct btrfs_bio *bbio)
588 {
589 	struct bio *bio = &bbio->bio;
590 	struct bio_vec *bvec;
591 	struct processed_extent processed = { 0 };
592 	/*
593 	 * The offset to the beginning of a bio, since one bio can never be
594 	 * larger than UINT_MAX, u32 here is enough.
595 	 */
596 	u32 bio_offset = 0;
597 	struct bvec_iter_all iter_all;
598 
599 	ASSERT(!bio_flagged(bio, BIO_CLONED));
600 	bio_for_each_segment_all(bvec, bio, iter_all) {
601 		bool uptodate = !bio->bi_status;
602 		struct page *page = bvec->bv_page;
603 		struct inode *inode = page->mapping->host;
604 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
605 		const u32 sectorsize = fs_info->sectorsize;
606 		u64 start;
607 		u64 end;
608 		u32 len;
609 
610 		btrfs_debug(fs_info,
611 			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
612 			bio->bi_iter.bi_sector, bio->bi_status,
613 			bbio->mirror_num);
614 
615 		/*
616 		 * We always issue full-sector reads, but if some block in a
617 		 * page fails to read, blk_update_request() will advance
618 		 * bv_offset and adjust bv_len to compensate.  Print a warning
619 		 * for unaligned offsets, and an error if they don't add up to
620 		 * a full sector.
621 		 */
622 		if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
623 			btrfs_err(fs_info,
624 		"partial page read in btrfs with offset %u and length %u",
625 				  bvec->bv_offset, bvec->bv_len);
626 		else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
627 				     sectorsize))
628 			btrfs_info(fs_info,
629 		"incomplete page read with offset %u and length %u",
630 				   bvec->bv_offset, bvec->bv_len);
631 
632 		start = page_offset(page) + bvec->bv_offset;
633 		end = start + bvec->bv_len - 1;
634 		len = bvec->bv_len;
635 
636 		if (likely(uptodate)) {
637 			loff_t i_size = i_size_read(inode);
638 			pgoff_t end_index = i_size >> PAGE_SHIFT;
639 
640 			/*
641 			 * Zero out the remaining part if this range straddles
642 			 * i_size.
643 			 *
644 			 * Here we should only zero the range inside the bvec,
645 			 * not touch anything else.
646 			 *
647 			 * NOTE: i_size is exclusive while end is inclusive.
648 			 */
649 			if (page->index == end_index && i_size <= end) {
650 				u32 zero_start = max(offset_in_page(i_size),
651 						     offset_in_page(start));
652 
653 				zero_user_segment(page, zero_start,
654 						  offset_in_page(end) + 1);
655 			}
656 		}
657 
658 		/* Update page status and unlock. */
659 		end_page_read(page, uptodate, start, len);
660 		endio_readpage_release_extent(&processed, BTRFS_I(inode),
661 					      start, end, uptodate);
662 
663 		ASSERT(bio_offset + len > bio_offset);
664 		bio_offset += len;
665 
666 	}
667 	/* Release the last extent */
668 	endio_readpage_release_extent(&processed, NULL, 0, 0, false);
669 	bio_put(bio);
670 }
671 
672 /*
673  * Populate every free slot in a provided array with pages.
674  *
675  * @nr_pages:   number of pages to allocate
676  * @page_array: the array to fill with pages; any existing non-null entries in
677  * 		the array will be skipped
678  *
679  * Return: 0        if all pages were able to be allocated;
680  *         -ENOMEM  otherwise, and the caller is responsible for freeing all
681  *                  non-null page pointers in the array.
682  */
683 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
684 {
685 	unsigned int allocated;
686 
687 	for (allocated = 0; allocated < nr_pages;) {
688 		unsigned int last = allocated;
689 
690 		allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
691 
692 		if (allocated == nr_pages)
693 			return 0;
694 
695 		/*
696 		 * During this iteration, no page could be allocated, even
697 		 * though alloc_pages_bulk_array() falls back to alloc_page()
698 		 * if  it could not bulk-allocate. So we must be out of memory.
699 		 */
700 		if (allocated == last)
701 			return -ENOMEM;
702 
703 		memalloc_retry_wait(GFP_NOFS);
704 	}
705 	return 0;
706 }
707 
708 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
709 				struct page *page, u64 disk_bytenr,
710 				unsigned int pg_offset)
711 {
712 	struct bio *bio = &bio_ctrl->bbio->bio;
713 	struct bio_vec *bvec = bio_last_bvec_all(bio);
714 	const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
715 
716 	if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
717 		/*
718 		 * For compression, all IO should have its logical bytenr set
719 		 * to the starting bytenr of the compressed extent.
720 		 */
721 		return bio->bi_iter.bi_sector == sector;
722 	}
723 
724 	/*
725 	 * The contig check requires the following conditions to be met:
726 	 *
727 	 * 1) The pages are belonging to the same inode
728 	 *    This is implied by the call chain.
729 	 *
730 	 * 2) The range has adjacent logical bytenr
731 	 *
732 	 * 3) The range has adjacent file offset
733 	 *    This is required for the usage of btrfs_bio->file_offset.
734 	 */
735 	return bio_end_sector(bio) == sector &&
736 		page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
737 		page_offset(page) + pg_offset;
738 }
739 
740 static void alloc_new_bio(struct btrfs_inode *inode,
741 			  struct btrfs_bio_ctrl *bio_ctrl,
742 			  u64 disk_bytenr, u64 file_offset)
743 {
744 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
745 	struct btrfs_bio *bbio;
746 
747 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
748 			       bio_ctrl->end_io_func, NULL);
749 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
750 	bbio->inode = inode;
751 	bbio->file_offset = file_offset;
752 	bio_ctrl->bbio = bbio;
753 	bio_ctrl->len_to_oe_boundary = U32_MAX;
754 
755 	/* Limit data write bios to the ordered boundary. */
756 	if (bio_ctrl->wbc) {
757 		struct btrfs_ordered_extent *ordered;
758 
759 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
760 		if (ordered) {
761 			bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
762 					ordered->file_offset +
763 					ordered->disk_num_bytes - file_offset);
764 			bbio->ordered = ordered;
765 		}
766 
767 		/*
768 		 * Pick the last added device to support cgroup writeback.  For
769 		 * multi-device file systems this means blk-cgroup policies have
770 		 * to always be set on the last added/replaced device.
771 		 * This is a bit odd but has been like that for a long time.
772 		 */
773 		bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
774 		wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
775 	}
776 }
777 
778 /*
779  * @disk_bytenr: logical bytenr where the write will be
780  * @page:	page to add to the bio
781  * @size:	portion of page that we want to write to
782  * @pg_offset:	offset of the new bio or to check whether we are adding
783  *              a contiguous page to the previous one
784  *
785  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
786  * new one in @bio_ctrl->bbio.
787  * The mirror number for this IO should already be initizlied in
788  * @bio_ctrl->mirror_num.
789  */
790 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
791 			       u64 disk_bytenr, struct page *page,
792 			       size_t size, unsigned long pg_offset)
793 {
794 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
795 
796 	ASSERT(pg_offset + size <= PAGE_SIZE);
797 	ASSERT(bio_ctrl->end_io_func);
798 
799 	if (bio_ctrl->bbio &&
800 	    !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
801 		submit_one_bio(bio_ctrl);
802 
803 	do {
804 		u32 len = size;
805 
806 		/* Allocate new bio if needed */
807 		if (!bio_ctrl->bbio) {
808 			alloc_new_bio(inode, bio_ctrl, disk_bytenr,
809 				      page_offset(page) + pg_offset);
810 		}
811 
812 		/* Cap to the current ordered extent boundary if there is one. */
813 		if (len > bio_ctrl->len_to_oe_boundary) {
814 			ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
815 			ASSERT(is_data_inode(&inode->vfs_inode));
816 			len = bio_ctrl->len_to_oe_boundary;
817 		}
818 
819 		if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
820 			/* bio full: move on to a new one */
821 			submit_one_bio(bio_ctrl);
822 			continue;
823 		}
824 
825 		if (bio_ctrl->wbc)
826 			wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
827 
828 		size -= len;
829 		pg_offset += len;
830 		disk_bytenr += len;
831 
832 		/*
833 		 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
834 		 * sector aligned.  alloc_new_bio() then sets it to the end of
835 		 * our ordered extent for writes into zoned devices.
836 		 *
837 		 * When len_to_oe_boundary is tracking an ordered extent, we
838 		 * trust the ordered extent code to align things properly, and
839 		 * the check above to cap our write to the ordered extent
840 		 * boundary is correct.
841 		 *
842 		 * When len_to_oe_boundary is U32_MAX, the cap above would
843 		 * result in a 4095 byte IO for the last page right before
844 		 * we hit the bio limit of UINT_MAX.  bio_add_page() has all
845 		 * the checks required to make sure we don't overflow the bio,
846 		 * and we should just ignore len_to_oe_boundary completely
847 		 * unless we're using it to track an ordered extent.
848 		 *
849 		 * It's pretty hard to make a bio sized U32_MAX, but it can
850 		 * happen when the page cache is able to feed us contiguous
851 		 * pages for large extents.
852 		 */
853 		if (bio_ctrl->len_to_oe_boundary != U32_MAX)
854 			bio_ctrl->len_to_oe_boundary -= len;
855 
856 		/* Ordered extent boundary: move on to a new bio. */
857 		if (bio_ctrl->len_to_oe_boundary == 0)
858 			submit_one_bio(bio_ctrl);
859 	} while (size);
860 }
861 
862 static int attach_extent_buffer_page(struct extent_buffer *eb,
863 				     struct page *page,
864 				     struct btrfs_subpage *prealloc)
865 {
866 	struct btrfs_fs_info *fs_info = eb->fs_info;
867 	int ret = 0;
868 
869 	/*
870 	 * If the page is mapped to btree inode, we should hold the private
871 	 * lock to prevent race.
872 	 * For cloned or dummy extent buffers, their pages are not mapped and
873 	 * will not race with any other ebs.
874 	 */
875 	if (page->mapping)
876 		lockdep_assert_held(&page->mapping->private_lock);
877 
878 	if (fs_info->nodesize >= PAGE_SIZE) {
879 		if (!PagePrivate(page))
880 			attach_page_private(page, eb);
881 		else
882 			WARN_ON(page->private != (unsigned long)eb);
883 		return 0;
884 	}
885 
886 	/* Already mapped, just free prealloc */
887 	if (PagePrivate(page)) {
888 		btrfs_free_subpage(prealloc);
889 		return 0;
890 	}
891 
892 	if (prealloc)
893 		/* Has preallocated memory for subpage */
894 		attach_page_private(page, prealloc);
895 	else
896 		/* Do new allocation to attach subpage */
897 		ret = btrfs_attach_subpage(fs_info, page,
898 					   BTRFS_SUBPAGE_METADATA);
899 	return ret;
900 }
901 
902 int set_page_extent_mapped(struct page *page)
903 {
904 	struct btrfs_fs_info *fs_info;
905 
906 	ASSERT(page->mapping);
907 
908 	if (PagePrivate(page))
909 		return 0;
910 
911 	fs_info = btrfs_sb(page->mapping->host->i_sb);
912 
913 	if (btrfs_is_subpage(fs_info, page))
914 		return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
915 
916 	attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
917 	return 0;
918 }
919 
920 void clear_page_extent_mapped(struct page *page)
921 {
922 	struct btrfs_fs_info *fs_info;
923 
924 	ASSERT(page->mapping);
925 
926 	if (!PagePrivate(page))
927 		return;
928 
929 	fs_info = btrfs_sb(page->mapping->host->i_sb);
930 	if (btrfs_is_subpage(fs_info, page))
931 		return btrfs_detach_subpage(fs_info, page);
932 
933 	detach_page_private(page);
934 }
935 
936 static struct extent_map *
937 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
938 		 u64 start, u64 len, struct extent_map **em_cached)
939 {
940 	struct extent_map *em;
941 
942 	if (em_cached && *em_cached) {
943 		em = *em_cached;
944 		if (extent_map_in_tree(em) && start >= em->start &&
945 		    start < extent_map_end(em)) {
946 			refcount_inc(&em->refs);
947 			return em;
948 		}
949 
950 		free_extent_map(em);
951 		*em_cached = NULL;
952 	}
953 
954 	em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
955 	if (em_cached && !IS_ERR(em)) {
956 		BUG_ON(*em_cached);
957 		refcount_inc(&em->refs);
958 		*em_cached = em;
959 	}
960 	return em;
961 }
962 /*
963  * basic readpage implementation.  Locked extent state structs are inserted
964  * into the tree that are removed when the IO is done (by the end_io
965  * handlers)
966  * XXX JDM: This needs looking at to ensure proper page locking
967  * return 0 on success, otherwise return error
968  */
969 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
970 		      struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
971 {
972 	struct inode *inode = page->mapping->host;
973 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
974 	u64 start = page_offset(page);
975 	const u64 end = start + PAGE_SIZE - 1;
976 	u64 cur = start;
977 	u64 extent_offset;
978 	u64 last_byte = i_size_read(inode);
979 	u64 block_start;
980 	struct extent_map *em;
981 	int ret = 0;
982 	size_t pg_offset = 0;
983 	size_t iosize;
984 	size_t blocksize = inode->i_sb->s_blocksize;
985 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
986 
987 	ret = set_page_extent_mapped(page);
988 	if (ret < 0) {
989 		unlock_extent(tree, start, end, NULL);
990 		unlock_page(page);
991 		return ret;
992 	}
993 
994 	if (page->index == last_byte >> PAGE_SHIFT) {
995 		size_t zero_offset = offset_in_page(last_byte);
996 
997 		if (zero_offset) {
998 			iosize = PAGE_SIZE - zero_offset;
999 			memzero_page(page, zero_offset, iosize);
1000 		}
1001 	}
1002 	bio_ctrl->end_io_func = end_bio_extent_readpage;
1003 	begin_page_read(fs_info, page);
1004 	while (cur <= end) {
1005 		enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1006 		bool force_bio_submit = false;
1007 		u64 disk_bytenr;
1008 
1009 		ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1010 		if (cur >= last_byte) {
1011 			iosize = PAGE_SIZE - pg_offset;
1012 			memzero_page(page, pg_offset, iosize);
1013 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1014 			end_page_read(page, true, cur, iosize);
1015 			break;
1016 		}
1017 		em = __get_extent_map(inode, page, pg_offset, cur,
1018 				      end - cur + 1, em_cached);
1019 		if (IS_ERR(em)) {
1020 			unlock_extent(tree, cur, end, NULL);
1021 			end_page_read(page, false, cur, end + 1 - cur);
1022 			return PTR_ERR(em);
1023 		}
1024 		extent_offset = cur - em->start;
1025 		BUG_ON(extent_map_end(em) <= cur);
1026 		BUG_ON(end < cur);
1027 
1028 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1029 			compress_type = em->compress_type;
1030 
1031 		iosize = min(extent_map_end(em) - cur, end - cur + 1);
1032 		iosize = ALIGN(iosize, blocksize);
1033 		if (compress_type != BTRFS_COMPRESS_NONE)
1034 			disk_bytenr = em->block_start;
1035 		else
1036 			disk_bytenr = em->block_start + extent_offset;
1037 		block_start = em->block_start;
1038 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1039 			block_start = EXTENT_MAP_HOLE;
1040 
1041 		/*
1042 		 * If we have a file range that points to a compressed extent
1043 		 * and it's followed by a consecutive file range that points
1044 		 * to the same compressed extent (possibly with a different
1045 		 * offset and/or length, so it either points to the whole extent
1046 		 * or only part of it), we must make sure we do not submit a
1047 		 * single bio to populate the pages for the 2 ranges because
1048 		 * this makes the compressed extent read zero out the pages
1049 		 * belonging to the 2nd range. Imagine the following scenario:
1050 		 *
1051 		 *  File layout
1052 		 *  [0 - 8K]                     [8K - 24K]
1053 		 *    |                               |
1054 		 *    |                               |
1055 		 * points to extent X,         points to extent X,
1056 		 * offset 4K, length of 8K     offset 0, length 16K
1057 		 *
1058 		 * [extent X, compressed length = 4K uncompressed length = 16K]
1059 		 *
1060 		 * If the bio to read the compressed extent covers both ranges,
1061 		 * it will decompress extent X into the pages belonging to the
1062 		 * first range and then it will stop, zeroing out the remaining
1063 		 * pages that belong to the other range that points to extent X.
1064 		 * So here we make sure we submit 2 bios, one for the first
1065 		 * range and another one for the third range. Both will target
1066 		 * the same physical extent from disk, but we can't currently
1067 		 * make the compressed bio endio callback populate the pages
1068 		 * for both ranges because each compressed bio is tightly
1069 		 * coupled with a single extent map, and each range can have
1070 		 * an extent map with a different offset value relative to the
1071 		 * uncompressed data of our extent and different lengths. This
1072 		 * is a corner case so we prioritize correctness over
1073 		 * non-optimal behavior (submitting 2 bios for the same extent).
1074 		 */
1075 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
1076 		    prev_em_start && *prev_em_start != (u64)-1 &&
1077 		    *prev_em_start != em->start)
1078 			force_bio_submit = true;
1079 
1080 		if (prev_em_start)
1081 			*prev_em_start = em->start;
1082 
1083 		free_extent_map(em);
1084 		em = NULL;
1085 
1086 		/* we've found a hole, just zero and go on */
1087 		if (block_start == EXTENT_MAP_HOLE) {
1088 			memzero_page(page, pg_offset, iosize);
1089 
1090 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1091 			end_page_read(page, true, cur, iosize);
1092 			cur = cur + iosize;
1093 			pg_offset += iosize;
1094 			continue;
1095 		}
1096 		/* the get_extent function already copied into the page */
1097 		if (block_start == EXTENT_MAP_INLINE) {
1098 			unlock_extent(tree, cur, cur + iosize - 1, NULL);
1099 			end_page_read(page, true, cur, iosize);
1100 			cur = cur + iosize;
1101 			pg_offset += iosize;
1102 			continue;
1103 		}
1104 
1105 		if (bio_ctrl->compress_type != compress_type) {
1106 			submit_one_bio(bio_ctrl);
1107 			bio_ctrl->compress_type = compress_type;
1108 		}
1109 
1110 		if (force_bio_submit)
1111 			submit_one_bio(bio_ctrl);
1112 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1113 				   pg_offset);
1114 		cur = cur + iosize;
1115 		pg_offset += iosize;
1116 	}
1117 
1118 	return 0;
1119 }
1120 
1121 int btrfs_read_folio(struct file *file, struct folio *folio)
1122 {
1123 	struct page *page = &folio->page;
1124 	struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1125 	u64 start = page_offset(page);
1126 	u64 end = start + PAGE_SIZE - 1;
1127 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1128 	int ret;
1129 
1130 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1131 
1132 	ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1133 	/*
1134 	 * If btrfs_do_readpage() failed we will want to submit the assembled
1135 	 * bio to do the cleanup.
1136 	 */
1137 	submit_one_bio(&bio_ctrl);
1138 	return ret;
1139 }
1140 
1141 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1142 					u64 start, u64 end,
1143 					struct extent_map **em_cached,
1144 					struct btrfs_bio_ctrl *bio_ctrl,
1145 					u64 *prev_em_start)
1146 {
1147 	struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1148 	int index;
1149 
1150 	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1151 
1152 	for (index = 0; index < nr_pages; index++) {
1153 		btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1154 				  prev_em_start);
1155 		put_page(pages[index]);
1156 	}
1157 }
1158 
1159 /*
1160  * helper for __extent_writepage, doing all of the delayed allocation setup.
1161  *
1162  * This returns 1 if btrfs_run_delalloc_range function did all the work required
1163  * to write the page (copy into inline extent).  In this case the IO has
1164  * been started and the page is already unlocked.
1165  *
1166  * This returns 0 if all went well (page still locked)
1167  * This returns < 0 if there were errors (page still locked)
1168  */
1169 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1170 		struct page *page, struct writeback_control *wbc)
1171 {
1172 	const u64 page_start = page_offset(page);
1173 	const u64 page_end = page_start + PAGE_SIZE - 1;
1174 	u64 delalloc_start = page_start;
1175 	u64 delalloc_end = page_end;
1176 	u64 delalloc_to_write = 0;
1177 	int ret = 0;
1178 
1179 	while (delalloc_start < page_end) {
1180 		delalloc_end = page_end;
1181 		if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1182 					      &delalloc_start, &delalloc_end)) {
1183 			delalloc_start = delalloc_end + 1;
1184 			continue;
1185 		}
1186 
1187 		ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1188 					       delalloc_end, wbc);
1189 		if (ret < 0)
1190 			return ret;
1191 
1192 		delalloc_start = delalloc_end + 1;
1193 	}
1194 
1195 	/*
1196 	 * delalloc_end is already one less than the total length, so
1197 	 * we don't subtract one from PAGE_SIZE
1198 	 */
1199 	delalloc_to_write +=
1200 		DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1201 
1202 	/*
1203 	 * If btrfs_run_dealloc_range() already started I/O and unlocked
1204 	 * the pages, we just need to account for them here.
1205 	 */
1206 	if (ret == 1) {
1207 		wbc->nr_to_write -= delalloc_to_write;
1208 		return 1;
1209 	}
1210 
1211 	if (wbc->nr_to_write < delalloc_to_write) {
1212 		int thresh = 8192;
1213 
1214 		if (delalloc_to_write < thresh * 2)
1215 			thresh = delalloc_to_write;
1216 		wbc->nr_to_write = min_t(u64, delalloc_to_write,
1217 					 thresh);
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 /*
1224  * Find the first byte we need to write.
1225  *
1226  * For subpage, one page can contain several sectors, and
1227  * __extent_writepage_io() will just grab all extent maps in the page
1228  * range and try to submit all non-inline/non-compressed extents.
1229  *
1230  * This is a big problem for subpage, we shouldn't re-submit already written
1231  * data at all.
1232  * This function will lookup subpage dirty bit to find which range we really
1233  * need to submit.
1234  *
1235  * Return the next dirty range in [@start, @end).
1236  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1237  */
1238 static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1239 				 struct page *page, u64 *start, u64 *end)
1240 {
1241 	struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1242 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
1243 	u64 orig_start = *start;
1244 	/* Declare as unsigned long so we can use bitmap ops */
1245 	unsigned long flags;
1246 	int range_start_bit;
1247 	int range_end_bit;
1248 
1249 	/*
1250 	 * For regular sector size == page size case, since one page only
1251 	 * contains one sector, we return the page offset directly.
1252 	 */
1253 	if (!btrfs_is_subpage(fs_info, page)) {
1254 		*start = page_offset(page);
1255 		*end = page_offset(page) + PAGE_SIZE;
1256 		return;
1257 	}
1258 
1259 	range_start_bit = spi->dirty_offset +
1260 			  (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1261 
1262 	/* We should have the page locked, but just in case */
1263 	spin_lock_irqsave(&subpage->lock, flags);
1264 	bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1265 			       spi->dirty_offset + spi->bitmap_nr_bits);
1266 	spin_unlock_irqrestore(&subpage->lock, flags);
1267 
1268 	range_start_bit -= spi->dirty_offset;
1269 	range_end_bit -= spi->dirty_offset;
1270 
1271 	*start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1272 	*end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1273 }
1274 
1275 /*
1276  * helper for __extent_writepage.  This calls the writepage start hooks,
1277  * and does the loop to map the page into extents and bios.
1278  *
1279  * We return 1 if the IO is started and the page is unlocked,
1280  * 0 if all went well (page still locked)
1281  * < 0 if there were errors (page still locked)
1282  */
1283 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1284 				 struct page *page,
1285 				 struct btrfs_bio_ctrl *bio_ctrl,
1286 				 loff_t i_size,
1287 				 int *nr_ret)
1288 {
1289 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1290 	u64 cur = page_offset(page);
1291 	u64 end = cur + PAGE_SIZE - 1;
1292 	u64 extent_offset;
1293 	u64 block_start;
1294 	struct extent_map *em;
1295 	int ret = 0;
1296 	int nr = 0;
1297 
1298 	ret = btrfs_writepage_cow_fixup(page);
1299 	if (ret) {
1300 		/* Fixup worker will requeue */
1301 		redirty_page_for_writepage(bio_ctrl->wbc, page);
1302 		unlock_page(page);
1303 		return 1;
1304 	}
1305 
1306 	bio_ctrl->end_io_func = end_bio_extent_writepage;
1307 	while (cur <= end) {
1308 		u32 len = end - cur + 1;
1309 		u64 disk_bytenr;
1310 		u64 em_end;
1311 		u64 dirty_range_start = cur;
1312 		u64 dirty_range_end;
1313 		u32 iosize;
1314 
1315 		if (cur >= i_size) {
1316 			btrfs_mark_ordered_io_finished(inode, page, cur, len,
1317 						       true);
1318 			/*
1319 			 * This range is beyond i_size, thus we don't need to
1320 			 * bother writing back.
1321 			 * But we still need to clear the dirty subpage bit, or
1322 			 * the next time the page gets dirtied, we will try to
1323 			 * writeback the sectors with subpage dirty bits,
1324 			 * causing writeback without ordered extent.
1325 			 */
1326 			btrfs_page_clear_dirty(fs_info, page, cur, len);
1327 			break;
1328 		}
1329 
1330 		find_next_dirty_byte(fs_info, page, &dirty_range_start,
1331 				     &dirty_range_end);
1332 		if (cur < dirty_range_start) {
1333 			cur = dirty_range_start;
1334 			continue;
1335 		}
1336 
1337 		em = btrfs_get_extent(inode, NULL, 0, cur, len);
1338 		if (IS_ERR(em)) {
1339 			ret = PTR_ERR_OR_ZERO(em);
1340 			goto out_error;
1341 		}
1342 
1343 		extent_offset = cur - em->start;
1344 		em_end = extent_map_end(em);
1345 		ASSERT(cur <= em_end);
1346 		ASSERT(cur < end);
1347 		ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1348 		ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1349 
1350 		block_start = em->block_start;
1351 		disk_bytenr = em->block_start + extent_offset;
1352 
1353 		ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
1354 		ASSERT(block_start != EXTENT_MAP_HOLE);
1355 		ASSERT(block_start != EXTENT_MAP_INLINE);
1356 
1357 		/*
1358 		 * Note that em_end from extent_map_end() and dirty_range_end from
1359 		 * find_next_dirty_byte() are all exclusive
1360 		 */
1361 		iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1362 		free_extent_map(em);
1363 		em = NULL;
1364 
1365 		btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1366 		if (!PageWriteback(page)) {
1367 			btrfs_err(inode->root->fs_info,
1368 				   "page %lu not writeback, cur %llu end %llu",
1369 			       page->index, cur, end);
1370 		}
1371 
1372 		/*
1373 		 * Although the PageDirty bit is cleared before entering this
1374 		 * function, subpage dirty bit is not cleared.
1375 		 * So clear subpage dirty bit here so next time we won't submit
1376 		 * page for range already written to disk.
1377 		 */
1378 		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
1379 
1380 		submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1381 				   cur - page_offset(page));
1382 		cur += iosize;
1383 		nr++;
1384 	}
1385 
1386 	btrfs_page_assert_not_dirty(fs_info, page);
1387 	*nr_ret = nr;
1388 	return 0;
1389 
1390 out_error:
1391 	/*
1392 	 * If we finish without problem, we should not only clear page dirty,
1393 	 * but also empty subpage dirty bits
1394 	 */
1395 	*nr_ret = nr;
1396 	return ret;
1397 }
1398 
1399 /*
1400  * the writepage semantics are similar to regular writepage.  extent
1401  * records are inserted to lock ranges in the tree, and as dirty areas
1402  * are found, they are marked writeback.  Then the lock bits are removed
1403  * and the end_io handler clears the writeback ranges
1404  *
1405  * Return 0 if everything goes well.
1406  * Return <0 for error.
1407  */
1408 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1409 {
1410 	struct folio *folio = page_folio(page);
1411 	struct inode *inode = page->mapping->host;
1412 	const u64 page_start = page_offset(page);
1413 	int ret;
1414 	int nr = 0;
1415 	size_t pg_offset;
1416 	loff_t i_size = i_size_read(inode);
1417 	unsigned long end_index = i_size >> PAGE_SHIFT;
1418 
1419 	trace___extent_writepage(page, inode, bio_ctrl->wbc);
1420 
1421 	WARN_ON(!PageLocked(page));
1422 
1423 	pg_offset = offset_in_page(i_size);
1424 	if (page->index > end_index ||
1425 	   (page->index == end_index && !pg_offset)) {
1426 		folio_invalidate(folio, 0, folio_size(folio));
1427 		folio_unlock(folio);
1428 		return 0;
1429 	}
1430 
1431 	if (page->index == end_index)
1432 		memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1433 
1434 	ret = set_page_extent_mapped(page);
1435 	if (ret < 0)
1436 		goto done;
1437 
1438 	ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1439 	if (ret == 1)
1440 		return 0;
1441 	if (ret)
1442 		goto done;
1443 
1444 	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1445 	if (ret == 1)
1446 		return 0;
1447 
1448 	bio_ctrl->wbc->nr_to_write--;
1449 
1450 done:
1451 	if (nr == 0) {
1452 		/* make sure the mapping tag for page dirty gets cleared */
1453 		set_page_writeback(page);
1454 		end_page_writeback(page);
1455 	}
1456 	if (ret) {
1457 		btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1458 					       PAGE_SIZE, !ret);
1459 		btrfs_page_clear_uptodate(btrfs_sb(inode->i_sb), page,
1460 					  page_start, PAGE_SIZE);
1461 		mapping_set_error(page->mapping, ret);
1462 	}
1463 	unlock_page(page);
1464 	ASSERT(ret <= 0);
1465 	return ret;
1466 }
1467 
1468 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1469 {
1470 	wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1471 		       TASK_UNINTERRUPTIBLE);
1472 }
1473 
1474 /*
1475  * Lock extent buffer status and pages for writeback.
1476  *
1477  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1478  * extent buffer is not dirty)
1479  * Return %true is the extent buffer is submitted to bio.
1480  */
1481 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1482 			  struct writeback_control *wbc)
1483 {
1484 	struct btrfs_fs_info *fs_info = eb->fs_info;
1485 	bool ret = false;
1486 
1487 	btrfs_tree_lock(eb);
1488 	while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1489 		btrfs_tree_unlock(eb);
1490 		if (wbc->sync_mode != WB_SYNC_ALL)
1491 			return false;
1492 		wait_on_extent_buffer_writeback(eb);
1493 		btrfs_tree_lock(eb);
1494 	}
1495 
1496 	/*
1497 	 * We need to do this to prevent races in people who check if the eb is
1498 	 * under IO since we can end up having no IO bits set for a short period
1499 	 * of time.
1500 	 */
1501 	spin_lock(&eb->refs_lock);
1502 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1503 		set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1504 		spin_unlock(&eb->refs_lock);
1505 		btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1506 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1507 					 -eb->len,
1508 					 fs_info->dirty_metadata_batch);
1509 		ret = true;
1510 	} else {
1511 		spin_unlock(&eb->refs_lock);
1512 	}
1513 	btrfs_tree_unlock(eb);
1514 	return ret;
1515 }
1516 
1517 static void set_btree_ioerr(struct extent_buffer *eb)
1518 {
1519 	struct btrfs_fs_info *fs_info = eb->fs_info;
1520 
1521 	set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1522 
1523 	/*
1524 	 * A read may stumble upon this buffer later, make sure that it gets an
1525 	 * error and knows there was an error.
1526 	 */
1527 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1528 
1529 	/*
1530 	 * We need to set the mapping with the io error as well because a write
1531 	 * error will flip the file system readonly, and then syncfs() will
1532 	 * return a 0 because we are readonly if we don't modify the err seq for
1533 	 * the superblock.
1534 	 */
1535 	mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1536 
1537 	/*
1538 	 * If writeback for a btree extent that doesn't belong to a log tree
1539 	 * failed, increment the counter transaction->eb_write_errors.
1540 	 * We do this because while the transaction is running and before it's
1541 	 * committing (when we call filemap_fdata[write|wait]_range against
1542 	 * the btree inode), we might have
1543 	 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1544 	 * returns an error or an error happens during writeback, when we're
1545 	 * committing the transaction we wouldn't know about it, since the pages
1546 	 * can be no longer dirty nor marked anymore for writeback (if a
1547 	 * subsequent modification to the extent buffer didn't happen before the
1548 	 * transaction commit), which makes filemap_fdata[write|wait]_range not
1549 	 * able to find the pages tagged with SetPageError at transaction
1550 	 * commit time. So if this happens we must abort the transaction,
1551 	 * otherwise we commit a super block with btree roots that point to
1552 	 * btree nodes/leafs whose content on disk is invalid - either garbage
1553 	 * or the content of some node/leaf from a past generation that got
1554 	 * cowed or deleted and is no longer valid.
1555 	 *
1556 	 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1557 	 * not be enough - we need to distinguish between log tree extents vs
1558 	 * non-log tree extents, and the next filemap_fdatawait_range() call
1559 	 * will catch and clear such errors in the mapping - and that call might
1560 	 * be from a log sync and not from a transaction commit. Also, checking
1561 	 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1562 	 * not done and would not be reliable - the eb might have been released
1563 	 * from memory and reading it back again means that flag would not be
1564 	 * set (since it's a runtime flag, not persisted on disk).
1565 	 *
1566 	 * Using the flags below in the btree inode also makes us achieve the
1567 	 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1568 	 * writeback for all dirty pages and before filemap_fdatawait_range()
1569 	 * is called, the writeback for all dirty pages had already finished
1570 	 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1571 	 * filemap_fdatawait_range() would return success, as it could not know
1572 	 * that writeback errors happened (the pages were no longer tagged for
1573 	 * writeback).
1574 	 */
1575 	switch (eb->log_index) {
1576 	case -1:
1577 		set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1578 		break;
1579 	case 0:
1580 		set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1581 		break;
1582 	case 1:
1583 		set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1584 		break;
1585 	default:
1586 		BUG(); /* unexpected, logic error */
1587 	}
1588 }
1589 
1590 /*
1591  * The endio specific version which won't touch any unsafe spinlock in endio
1592  * context.
1593  */
1594 static struct extent_buffer *find_extent_buffer_nolock(
1595 		struct btrfs_fs_info *fs_info, u64 start)
1596 {
1597 	struct extent_buffer *eb;
1598 
1599 	rcu_read_lock();
1600 	eb = radix_tree_lookup(&fs_info->buffer_radix,
1601 			       start >> fs_info->sectorsize_bits);
1602 	if (eb && atomic_inc_not_zero(&eb->refs)) {
1603 		rcu_read_unlock();
1604 		return eb;
1605 	}
1606 	rcu_read_unlock();
1607 	return NULL;
1608 }
1609 
1610 static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
1611 {
1612 	struct extent_buffer *eb = bbio->private;
1613 	struct btrfs_fs_info *fs_info = eb->fs_info;
1614 	bool uptodate = !bbio->bio.bi_status;
1615 	struct bvec_iter_all iter_all;
1616 	struct bio_vec *bvec;
1617 	u32 bio_offset = 0;
1618 
1619 	if (!uptodate)
1620 		set_btree_ioerr(eb);
1621 
1622 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
1623 		u64 start = eb->start + bio_offset;
1624 		struct page *page = bvec->bv_page;
1625 		u32 len = bvec->bv_len;
1626 
1627 		if (!uptodate)
1628 			btrfs_page_clear_uptodate(fs_info, page, start, len);
1629 		btrfs_page_clear_writeback(fs_info, page, start, len);
1630 		bio_offset += len;
1631 	}
1632 
1633 	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1634 	smp_mb__after_atomic();
1635 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1636 
1637 	bio_put(&bbio->bio);
1638 }
1639 
1640 static void prepare_eb_write(struct extent_buffer *eb)
1641 {
1642 	u32 nritems;
1643 	unsigned long start;
1644 	unsigned long end;
1645 
1646 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1647 
1648 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
1649 	nritems = btrfs_header_nritems(eb);
1650 	if (btrfs_header_level(eb) > 0) {
1651 		end = btrfs_node_key_ptr_offset(eb, nritems);
1652 		memzero_extent_buffer(eb, end, eb->len - end);
1653 	} else {
1654 		/*
1655 		 * Leaf:
1656 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1657 		 */
1658 		start = btrfs_item_nr_offset(eb, nritems);
1659 		end = btrfs_item_nr_offset(eb, 0);
1660 		if (nritems == 0)
1661 			end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1662 		else
1663 			end += btrfs_item_offset(eb, nritems - 1);
1664 		memzero_extent_buffer(eb, start, end - start);
1665 	}
1666 }
1667 
1668 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1669 					    struct writeback_control *wbc)
1670 {
1671 	struct btrfs_fs_info *fs_info = eb->fs_info;
1672 	struct btrfs_bio *bbio;
1673 
1674 	prepare_eb_write(eb);
1675 
1676 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1677 			       REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1678 			       eb->fs_info, extent_buffer_write_end_io, eb);
1679 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1680 	bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1681 	wbc_init_bio(wbc, &bbio->bio);
1682 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1683 	bbio->file_offset = eb->start;
1684 	if (fs_info->nodesize < PAGE_SIZE) {
1685 		struct page *p = eb->pages[0];
1686 
1687 		lock_page(p);
1688 		btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len);
1689 		if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start,
1690 						       eb->len)) {
1691 			clear_page_dirty_for_io(p);
1692 			wbc->nr_to_write--;
1693 		}
1694 		__bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p));
1695 		wbc_account_cgroup_owner(wbc, p, eb->len);
1696 		unlock_page(p);
1697 	} else {
1698 		for (int i = 0; i < num_extent_pages(eb); i++) {
1699 			struct page *p = eb->pages[i];
1700 
1701 			lock_page(p);
1702 			clear_page_dirty_for_io(p);
1703 			set_page_writeback(p);
1704 			__bio_add_page(&bbio->bio, p, PAGE_SIZE, 0);
1705 			wbc_account_cgroup_owner(wbc, p, PAGE_SIZE);
1706 			wbc->nr_to_write--;
1707 			unlock_page(p);
1708 		}
1709 	}
1710 	btrfs_submit_bio(bbio, 0);
1711 }
1712 
1713 /*
1714  * Submit one subpage btree page.
1715  *
1716  * The main difference to submit_eb_page() is:
1717  * - Page locking
1718  *   For subpage, we don't rely on page locking at all.
1719  *
1720  * - Flush write bio
1721  *   We only flush bio if we may be unable to fit current extent buffers into
1722  *   current bio.
1723  *
1724  * Return >=0 for the number of submitted extent buffers.
1725  * Return <0 for fatal error.
1726  */
1727 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1728 {
1729 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1730 	int submitted = 0;
1731 	u64 page_start = page_offset(page);
1732 	int bit_start = 0;
1733 	int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1734 
1735 	/* Lock and write each dirty extent buffers in the range */
1736 	while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1737 		struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
1738 		struct extent_buffer *eb;
1739 		unsigned long flags;
1740 		u64 start;
1741 
1742 		/*
1743 		 * Take private lock to ensure the subpage won't be detached
1744 		 * in the meantime.
1745 		 */
1746 		spin_lock(&page->mapping->private_lock);
1747 		if (!PagePrivate(page)) {
1748 			spin_unlock(&page->mapping->private_lock);
1749 			break;
1750 		}
1751 		spin_lock_irqsave(&subpage->lock, flags);
1752 		if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1753 			      subpage->bitmaps)) {
1754 			spin_unlock_irqrestore(&subpage->lock, flags);
1755 			spin_unlock(&page->mapping->private_lock);
1756 			bit_start++;
1757 			continue;
1758 		}
1759 
1760 		start = page_start + bit_start * fs_info->sectorsize;
1761 		bit_start += sectors_per_node;
1762 
1763 		/*
1764 		 * Here we just want to grab the eb without touching extra
1765 		 * spin locks, so call find_extent_buffer_nolock().
1766 		 */
1767 		eb = find_extent_buffer_nolock(fs_info, start);
1768 		spin_unlock_irqrestore(&subpage->lock, flags);
1769 		spin_unlock(&page->mapping->private_lock);
1770 
1771 		/*
1772 		 * The eb has already reached 0 refs thus find_extent_buffer()
1773 		 * doesn't return it. We don't need to write back such eb
1774 		 * anyway.
1775 		 */
1776 		if (!eb)
1777 			continue;
1778 
1779 		if (lock_extent_buffer_for_io(eb, wbc)) {
1780 			write_one_eb(eb, wbc);
1781 			submitted++;
1782 		}
1783 		free_extent_buffer(eb);
1784 	}
1785 	return submitted;
1786 }
1787 
1788 /*
1789  * Submit all page(s) of one extent buffer.
1790  *
1791  * @page:	the page of one extent buffer
1792  * @eb_context:	to determine if we need to submit this page, if current page
1793  *		belongs to this eb, we don't need to submit
1794  *
1795  * The caller should pass each page in their bytenr order, and here we use
1796  * @eb_context to determine if we have submitted pages of one extent buffer.
1797  *
1798  * If we have, we just skip until we hit a new page that doesn't belong to
1799  * current @eb_context.
1800  *
1801  * If not, we submit all the page(s) of the extent buffer.
1802  *
1803  * Return >0 if we have submitted the extent buffer successfully.
1804  * Return 0 if we don't need to submit the page, as it's already submitted by
1805  * previous call.
1806  * Return <0 for fatal error.
1807  */
1808 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1809 {
1810 	struct writeback_control *wbc = ctx->wbc;
1811 	struct address_space *mapping = page->mapping;
1812 	struct extent_buffer *eb;
1813 	int ret;
1814 
1815 	if (!PagePrivate(page))
1816 		return 0;
1817 
1818 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1819 		return submit_eb_subpage(page, wbc);
1820 
1821 	spin_lock(&mapping->private_lock);
1822 	if (!PagePrivate(page)) {
1823 		spin_unlock(&mapping->private_lock);
1824 		return 0;
1825 	}
1826 
1827 	eb = (struct extent_buffer *)page->private;
1828 
1829 	/*
1830 	 * Shouldn't happen and normally this would be a BUG_ON but no point
1831 	 * crashing the machine for something we can survive anyway.
1832 	 */
1833 	if (WARN_ON(!eb)) {
1834 		spin_unlock(&mapping->private_lock);
1835 		return 0;
1836 	}
1837 
1838 	if (eb == ctx->eb) {
1839 		spin_unlock(&mapping->private_lock);
1840 		return 0;
1841 	}
1842 	ret = atomic_inc_not_zero(&eb->refs);
1843 	spin_unlock(&mapping->private_lock);
1844 	if (!ret)
1845 		return 0;
1846 
1847 	ctx->eb = eb;
1848 
1849 	ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1850 	if (ret) {
1851 		if (ret == -EBUSY)
1852 			ret = 0;
1853 		free_extent_buffer(eb);
1854 		return ret;
1855 	}
1856 
1857 	if (!lock_extent_buffer_for_io(eb, wbc)) {
1858 		free_extent_buffer(eb);
1859 		return 0;
1860 	}
1861 	/* Implies write in zoned mode. */
1862 	if (ctx->zoned_bg) {
1863 		/* Mark the last eb in the block group. */
1864 		btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1865 		ctx->zoned_bg->meta_write_pointer += eb->len;
1866 	}
1867 	write_one_eb(eb, wbc);
1868 	free_extent_buffer(eb);
1869 	return 1;
1870 }
1871 
1872 int btree_write_cache_pages(struct address_space *mapping,
1873 				   struct writeback_control *wbc)
1874 {
1875 	struct btrfs_eb_write_context ctx = { .wbc = wbc };
1876 	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1877 	int ret = 0;
1878 	int done = 0;
1879 	int nr_to_write_done = 0;
1880 	struct folio_batch fbatch;
1881 	unsigned int nr_folios;
1882 	pgoff_t index;
1883 	pgoff_t end;		/* Inclusive */
1884 	int scanned = 0;
1885 	xa_mark_t tag;
1886 
1887 	folio_batch_init(&fbatch);
1888 	if (wbc->range_cyclic) {
1889 		index = mapping->writeback_index; /* Start from prev offset */
1890 		end = -1;
1891 		/*
1892 		 * Start from the beginning does not need to cycle over the
1893 		 * range, mark it as scanned.
1894 		 */
1895 		scanned = (index == 0);
1896 	} else {
1897 		index = wbc->range_start >> PAGE_SHIFT;
1898 		end = wbc->range_end >> PAGE_SHIFT;
1899 		scanned = 1;
1900 	}
1901 	if (wbc->sync_mode == WB_SYNC_ALL)
1902 		tag = PAGECACHE_TAG_TOWRITE;
1903 	else
1904 		tag = PAGECACHE_TAG_DIRTY;
1905 	btrfs_zoned_meta_io_lock(fs_info);
1906 retry:
1907 	if (wbc->sync_mode == WB_SYNC_ALL)
1908 		tag_pages_for_writeback(mapping, index, end);
1909 	while (!done && !nr_to_write_done && (index <= end) &&
1910 	       (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1911 					    tag, &fbatch))) {
1912 		unsigned i;
1913 
1914 		for (i = 0; i < nr_folios; i++) {
1915 			struct folio *folio = fbatch.folios[i];
1916 
1917 			ret = submit_eb_page(&folio->page, &ctx);
1918 			if (ret == 0)
1919 				continue;
1920 			if (ret < 0) {
1921 				done = 1;
1922 				break;
1923 			}
1924 
1925 			/*
1926 			 * the filesystem may choose to bump up nr_to_write.
1927 			 * We have to make sure to honor the new nr_to_write
1928 			 * at any time
1929 			 */
1930 			nr_to_write_done = wbc->nr_to_write <= 0;
1931 		}
1932 		folio_batch_release(&fbatch);
1933 		cond_resched();
1934 	}
1935 	if (!scanned && !done) {
1936 		/*
1937 		 * We hit the last page and there is more work to be done: wrap
1938 		 * back to the start of the file
1939 		 */
1940 		scanned = 1;
1941 		index = 0;
1942 		goto retry;
1943 	}
1944 	/*
1945 	 * If something went wrong, don't allow any metadata write bio to be
1946 	 * submitted.
1947 	 *
1948 	 * This would prevent use-after-free if we had dirty pages not
1949 	 * cleaned up, which can still happen by fuzzed images.
1950 	 *
1951 	 * - Bad extent tree
1952 	 *   Allowing existing tree block to be allocated for other trees.
1953 	 *
1954 	 * - Log tree operations
1955 	 *   Exiting tree blocks get allocated to log tree, bumps its
1956 	 *   generation, then get cleaned in tree re-balance.
1957 	 *   Such tree block will not be written back, since it's clean,
1958 	 *   thus no WRITTEN flag set.
1959 	 *   And after log writes back, this tree block is not traced by
1960 	 *   any dirty extent_io_tree.
1961 	 *
1962 	 * - Offending tree block gets re-dirtied from its original owner
1963 	 *   Since it has bumped generation, no WRITTEN flag, it can be
1964 	 *   reused without COWing. This tree block will not be traced
1965 	 *   by btrfs_transaction::dirty_pages.
1966 	 *
1967 	 *   Now such dirty tree block will not be cleaned by any dirty
1968 	 *   extent io tree. Thus we don't want to submit such wild eb
1969 	 *   if the fs already has error.
1970 	 *
1971 	 * We can get ret > 0 from submit_extent_page() indicating how many ebs
1972 	 * were submitted. Reset it to 0 to avoid false alerts for the caller.
1973 	 */
1974 	if (ret > 0)
1975 		ret = 0;
1976 	if (!ret && BTRFS_FS_ERROR(fs_info))
1977 		ret = -EROFS;
1978 
1979 	if (ctx.zoned_bg)
1980 		btrfs_put_block_group(ctx.zoned_bg);
1981 	btrfs_zoned_meta_io_unlock(fs_info);
1982 	return ret;
1983 }
1984 
1985 /*
1986  * Walk the list of dirty pages of the given address space and write all of them.
1987  *
1988  * @mapping:   address space structure to write
1989  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
1990  * @bio_ctrl:  holds context for the write, namely the bio
1991  *
1992  * If a page is already under I/O, write_cache_pages() skips it, even
1993  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
1994  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
1995  * and msync() need to guarantee that all the data which was dirty at the time
1996  * the call was made get new I/O started against them.  If wbc->sync_mode is
1997  * WB_SYNC_ALL then we were called for data integrity and we must wait for
1998  * existing IO to complete.
1999  */
2000 static int extent_write_cache_pages(struct address_space *mapping,
2001 			     struct btrfs_bio_ctrl *bio_ctrl)
2002 {
2003 	struct writeback_control *wbc = bio_ctrl->wbc;
2004 	struct inode *inode = mapping->host;
2005 	int ret = 0;
2006 	int done = 0;
2007 	int nr_to_write_done = 0;
2008 	struct folio_batch fbatch;
2009 	unsigned int nr_folios;
2010 	pgoff_t index;
2011 	pgoff_t end;		/* Inclusive */
2012 	pgoff_t done_index;
2013 	int range_whole = 0;
2014 	int scanned = 0;
2015 	xa_mark_t tag;
2016 
2017 	/*
2018 	 * We have to hold onto the inode so that ordered extents can do their
2019 	 * work when the IO finishes.  The alternative to this is failing to add
2020 	 * an ordered extent if the igrab() fails there and that is a huge pain
2021 	 * to deal with, so instead just hold onto the inode throughout the
2022 	 * writepages operation.  If it fails here we are freeing up the inode
2023 	 * anyway and we'd rather not waste our time writing out stuff that is
2024 	 * going to be truncated anyway.
2025 	 */
2026 	if (!igrab(inode))
2027 		return 0;
2028 
2029 	folio_batch_init(&fbatch);
2030 	if (wbc->range_cyclic) {
2031 		index = mapping->writeback_index; /* Start from prev offset */
2032 		end = -1;
2033 		/*
2034 		 * Start from the beginning does not need to cycle over the
2035 		 * range, mark it as scanned.
2036 		 */
2037 		scanned = (index == 0);
2038 	} else {
2039 		index = wbc->range_start >> PAGE_SHIFT;
2040 		end = wbc->range_end >> PAGE_SHIFT;
2041 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2042 			range_whole = 1;
2043 		scanned = 1;
2044 	}
2045 
2046 	/*
2047 	 * We do the tagged writepage as long as the snapshot flush bit is set
2048 	 * and we are the first one who do the filemap_flush() on this inode.
2049 	 *
2050 	 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2051 	 * not race in and drop the bit.
2052 	 */
2053 	if (range_whole && wbc->nr_to_write == LONG_MAX &&
2054 	    test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2055 			       &BTRFS_I(inode)->runtime_flags))
2056 		wbc->tagged_writepages = 1;
2057 
2058 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2059 		tag = PAGECACHE_TAG_TOWRITE;
2060 	else
2061 		tag = PAGECACHE_TAG_DIRTY;
2062 retry:
2063 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2064 		tag_pages_for_writeback(mapping, index, end);
2065 	done_index = index;
2066 	while (!done && !nr_to_write_done && (index <= end) &&
2067 			(nr_folios = filemap_get_folios_tag(mapping, &index,
2068 							end, tag, &fbatch))) {
2069 		unsigned i;
2070 
2071 		for (i = 0; i < nr_folios; i++) {
2072 			struct folio *folio = fbatch.folios[i];
2073 
2074 			done_index = folio_next_index(folio);
2075 			/*
2076 			 * At this point we hold neither the i_pages lock nor
2077 			 * the page lock: the page may be truncated or
2078 			 * invalidated (changing page->mapping to NULL),
2079 			 * or even swizzled back from swapper_space to
2080 			 * tmpfs file mapping
2081 			 */
2082 			if (!folio_trylock(folio)) {
2083 				submit_write_bio(bio_ctrl, 0);
2084 				folio_lock(folio);
2085 			}
2086 
2087 			if (unlikely(folio->mapping != mapping)) {
2088 				folio_unlock(folio);
2089 				continue;
2090 			}
2091 
2092 			if (!folio_test_dirty(folio)) {
2093 				/* Someone wrote it for us. */
2094 				folio_unlock(folio);
2095 				continue;
2096 			}
2097 
2098 			if (wbc->sync_mode != WB_SYNC_NONE) {
2099 				if (folio_test_writeback(folio))
2100 					submit_write_bio(bio_ctrl, 0);
2101 				folio_wait_writeback(folio);
2102 			}
2103 
2104 			if (folio_test_writeback(folio) ||
2105 			    !folio_clear_dirty_for_io(folio)) {
2106 				folio_unlock(folio);
2107 				continue;
2108 			}
2109 
2110 			ret = __extent_writepage(&folio->page, bio_ctrl);
2111 			if (ret < 0) {
2112 				done = 1;
2113 				break;
2114 			}
2115 
2116 			/*
2117 			 * The filesystem may choose to bump up nr_to_write.
2118 			 * We have to make sure to honor the new nr_to_write
2119 			 * at any time.
2120 			 */
2121 			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2122 					    wbc->nr_to_write <= 0);
2123 		}
2124 		folio_batch_release(&fbatch);
2125 		cond_resched();
2126 	}
2127 	if (!scanned && !done) {
2128 		/*
2129 		 * We hit the last page and there is more work to be done: wrap
2130 		 * back to the start of the file
2131 		 */
2132 		scanned = 1;
2133 		index = 0;
2134 
2135 		/*
2136 		 * If we're looping we could run into a page that is locked by a
2137 		 * writer and that writer could be waiting on writeback for a
2138 		 * page in our current bio, and thus deadlock, so flush the
2139 		 * write bio here.
2140 		 */
2141 		submit_write_bio(bio_ctrl, 0);
2142 		goto retry;
2143 	}
2144 
2145 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2146 		mapping->writeback_index = done_index;
2147 
2148 	btrfs_add_delayed_iput(BTRFS_I(inode));
2149 	return ret;
2150 }
2151 
2152 /*
2153  * Submit the pages in the range to bio for call sites which delalloc range has
2154  * already been ran (aka, ordered extent inserted) and all pages are still
2155  * locked.
2156  */
2157 void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2158 			       u64 start, u64 end, struct writeback_control *wbc,
2159 			       bool pages_dirty)
2160 {
2161 	bool found_error = false;
2162 	int ret = 0;
2163 	struct address_space *mapping = inode->i_mapping;
2164 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2165 	const u32 sectorsize = fs_info->sectorsize;
2166 	loff_t i_size = i_size_read(inode);
2167 	u64 cur = start;
2168 	struct btrfs_bio_ctrl bio_ctrl = {
2169 		.wbc = wbc,
2170 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2171 	};
2172 
2173 	if (wbc->no_cgroup_owner)
2174 		bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2175 
2176 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2177 
2178 	while (cur <= end) {
2179 		u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2180 		u32 cur_len = cur_end + 1 - cur;
2181 		struct page *page;
2182 		int nr = 0;
2183 
2184 		page = find_get_page(mapping, cur >> PAGE_SHIFT);
2185 		ASSERT(PageLocked(page));
2186 		if (pages_dirty && page != locked_page) {
2187 			ASSERT(PageDirty(page));
2188 			clear_page_dirty_for_io(page);
2189 		}
2190 
2191 		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2192 					    i_size, &nr);
2193 		if (ret == 1)
2194 			goto next_page;
2195 
2196 		/* Make sure the mapping tag for page dirty gets cleared. */
2197 		if (nr == 0) {
2198 			set_page_writeback(page);
2199 			end_page_writeback(page);
2200 		}
2201 		if (ret) {
2202 			btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2203 						       cur, cur_len, !ret);
2204 			btrfs_page_clear_uptodate(fs_info, page, cur, cur_len);
2205 			mapping_set_error(page->mapping, ret);
2206 		}
2207 		btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
2208 		if (ret < 0)
2209 			found_error = true;
2210 next_page:
2211 		put_page(page);
2212 		cur = cur_end + 1;
2213 	}
2214 
2215 	submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2216 }
2217 
2218 int extent_writepages(struct address_space *mapping,
2219 		      struct writeback_control *wbc)
2220 {
2221 	struct inode *inode = mapping->host;
2222 	int ret = 0;
2223 	struct btrfs_bio_ctrl bio_ctrl = {
2224 		.wbc = wbc,
2225 		.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2226 	};
2227 
2228 	/*
2229 	 * Allow only a single thread to do the reloc work in zoned mode to
2230 	 * protect the write pointer updates.
2231 	 */
2232 	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2233 	ret = extent_write_cache_pages(mapping, &bio_ctrl);
2234 	submit_write_bio(&bio_ctrl, ret);
2235 	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2236 	return ret;
2237 }
2238 
2239 void extent_readahead(struct readahead_control *rac)
2240 {
2241 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2242 	struct page *pagepool[16];
2243 	struct extent_map *em_cached = NULL;
2244 	u64 prev_em_start = (u64)-1;
2245 	int nr;
2246 
2247 	while ((nr = readahead_page_batch(rac, pagepool))) {
2248 		u64 contig_start = readahead_pos(rac);
2249 		u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2250 
2251 		contiguous_readpages(pagepool, nr, contig_start, contig_end,
2252 				&em_cached, &bio_ctrl, &prev_em_start);
2253 	}
2254 
2255 	if (em_cached)
2256 		free_extent_map(em_cached);
2257 	submit_one_bio(&bio_ctrl);
2258 }
2259 
2260 /*
2261  * basic invalidate_folio code, this waits on any locked or writeback
2262  * ranges corresponding to the folio, and then deletes any extent state
2263  * records from the tree
2264  */
2265 int extent_invalidate_folio(struct extent_io_tree *tree,
2266 			  struct folio *folio, size_t offset)
2267 {
2268 	struct extent_state *cached_state = NULL;
2269 	u64 start = folio_pos(folio);
2270 	u64 end = start + folio_size(folio) - 1;
2271 	size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
2272 
2273 	/* This function is only called for the btree inode */
2274 	ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2275 
2276 	start += ALIGN(offset, blocksize);
2277 	if (start > end)
2278 		return 0;
2279 
2280 	lock_extent(tree, start, end, &cached_state);
2281 	folio_wait_writeback(folio);
2282 
2283 	/*
2284 	 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2285 	 * so here we only need to unlock the extent range to free any
2286 	 * existing extent state.
2287 	 */
2288 	unlock_extent(tree, start, end, &cached_state);
2289 	return 0;
2290 }
2291 
2292 /*
2293  * a helper for release_folio, this tests for areas of the page that
2294  * are locked or under IO and drops the related state bits if it is safe
2295  * to drop the page.
2296  */
2297 static int try_release_extent_state(struct extent_io_tree *tree,
2298 				    struct page *page, gfp_t mask)
2299 {
2300 	u64 start = page_offset(page);
2301 	u64 end = start + PAGE_SIZE - 1;
2302 	int ret = 1;
2303 
2304 	if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
2305 		ret = 0;
2306 	} else {
2307 		u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2308 				   EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
2309 
2310 		/*
2311 		 * At this point we can safely clear everything except the
2312 		 * locked bit, the nodatasum bit and the delalloc new bit.
2313 		 * The delalloc new bit will be cleared by ordered extent
2314 		 * completion.
2315 		 */
2316 		ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2317 
2318 		/* if clear_extent_bit failed for enomem reasons,
2319 		 * we can't allow the release to continue.
2320 		 */
2321 		if (ret < 0)
2322 			ret = 0;
2323 		else
2324 			ret = 1;
2325 	}
2326 	return ret;
2327 }
2328 
2329 /*
2330  * a helper for release_folio.  As long as there are no locked extents
2331  * in the range corresponding to the page, both state records and extent
2332  * map records are removed
2333  */
2334 int try_release_extent_mapping(struct page *page, gfp_t mask)
2335 {
2336 	struct extent_map *em;
2337 	u64 start = page_offset(page);
2338 	u64 end = start + PAGE_SIZE - 1;
2339 	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2340 	struct extent_io_tree *tree = &btrfs_inode->io_tree;
2341 	struct extent_map_tree *map = &btrfs_inode->extent_tree;
2342 
2343 	if (gfpflags_allow_blocking(mask) &&
2344 	    page->mapping->host->i_size > SZ_16M) {
2345 		u64 len;
2346 		while (start <= end) {
2347 			struct btrfs_fs_info *fs_info;
2348 			u64 cur_gen;
2349 
2350 			len = end - start + 1;
2351 			write_lock(&map->lock);
2352 			em = lookup_extent_mapping(map, start, len);
2353 			if (!em) {
2354 				write_unlock(&map->lock);
2355 				break;
2356 			}
2357 			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2358 			    em->start != start) {
2359 				write_unlock(&map->lock);
2360 				free_extent_map(em);
2361 				break;
2362 			}
2363 			if (test_range_bit(tree, em->start,
2364 					   extent_map_end(em) - 1,
2365 					   EXTENT_LOCKED, 0, NULL))
2366 				goto next;
2367 			/*
2368 			 * If it's not in the list of modified extents, used
2369 			 * by a fast fsync, we can remove it. If it's being
2370 			 * logged we can safely remove it since fsync took an
2371 			 * extra reference on the em.
2372 			 */
2373 			if (list_empty(&em->list) ||
2374 			    test_bit(EXTENT_FLAG_LOGGING, &em->flags))
2375 				goto remove_em;
2376 			/*
2377 			 * If it's in the list of modified extents, remove it
2378 			 * only if its generation is older then the current one,
2379 			 * in which case we don't need it for a fast fsync.
2380 			 * Otherwise don't remove it, we could be racing with an
2381 			 * ongoing fast fsync that could miss the new extent.
2382 			 */
2383 			fs_info = btrfs_inode->root->fs_info;
2384 			spin_lock(&fs_info->trans_lock);
2385 			cur_gen = fs_info->generation;
2386 			spin_unlock(&fs_info->trans_lock);
2387 			if (em->generation >= cur_gen)
2388 				goto next;
2389 remove_em:
2390 			/*
2391 			 * We only remove extent maps that are not in the list of
2392 			 * modified extents or that are in the list but with a
2393 			 * generation lower then the current generation, so there
2394 			 * is no need to set the full fsync flag on the inode (it
2395 			 * hurts the fsync performance for workloads with a data
2396 			 * size that exceeds or is close to the system's memory).
2397 			 */
2398 			remove_extent_mapping(map, em);
2399 			/* once for the rb tree */
2400 			free_extent_map(em);
2401 next:
2402 			start = extent_map_end(em);
2403 			write_unlock(&map->lock);
2404 
2405 			/* once for us */
2406 			free_extent_map(em);
2407 
2408 			cond_resched(); /* Allow large-extent preemption. */
2409 		}
2410 	}
2411 	return try_release_extent_state(tree, page, mask);
2412 }
2413 
2414 /*
2415  * To cache previous fiemap extent
2416  *
2417  * Will be used for merging fiemap extent
2418  */
2419 struct fiemap_cache {
2420 	u64 offset;
2421 	u64 phys;
2422 	u64 len;
2423 	u32 flags;
2424 	bool cached;
2425 };
2426 
2427 /*
2428  * Helper to submit fiemap extent.
2429  *
2430  * Will try to merge current fiemap extent specified by @offset, @phys,
2431  * @len and @flags with cached one.
2432  * And only when we fails to merge, cached one will be submitted as
2433  * fiemap extent.
2434  *
2435  * Return value is the same as fiemap_fill_next_extent().
2436  */
2437 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2438 				struct fiemap_cache *cache,
2439 				u64 offset, u64 phys, u64 len, u32 flags)
2440 {
2441 	int ret = 0;
2442 
2443 	/* Set at the end of extent_fiemap(). */
2444 	ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2445 
2446 	if (!cache->cached)
2447 		goto assign;
2448 
2449 	/*
2450 	 * Sanity check, extent_fiemap() should have ensured that new
2451 	 * fiemap extent won't overlap with cached one.
2452 	 * Not recoverable.
2453 	 *
2454 	 * NOTE: Physical address can overlap, due to compression
2455 	 */
2456 	if (cache->offset + cache->len > offset) {
2457 		WARN_ON(1);
2458 		return -EINVAL;
2459 	}
2460 
2461 	/*
2462 	 * Only merges fiemap extents if
2463 	 * 1) Their logical addresses are continuous
2464 	 *
2465 	 * 2) Their physical addresses are continuous
2466 	 *    So truly compressed (physical size smaller than logical size)
2467 	 *    extents won't get merged with each other
2468 	 *
2469 	 * 3) Share same flags
2470 	 */
2471 	if (cache->offset + cache->len  == offset &&
2472 	    cache->phys + cache->len == phys  &&
2473 	    cache->flags == flags) {
2474 		cache->len += len;
2475 		return 0;
2476 	}
2477 
2478 	/* Not mergeable, need to submit cached one */
2479 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2480 				      cache->len, cache->flags);
2481 	cache->cached = false;
2482 	if (ret)
2483 		return ret;
2484 assign:
2485 	cache->cached = true;
2486 	cache->offset = offset;
2487 	cache->phys = phys;
2488 	cache->len = len;
2489 	cache->flags = flags;
2490 
2491 	return 0;
2492 }
2493 
2494 /*
2495  * Emit last fiemap cache
2496  *
2497  * The last fiemap cache may still be cached in the following case:
2498  * 0		      4k		    8k
2499  * |<- Fiemap range ->|
2500  * |<------------  First extent ----------->|
2501  *
2502  * In this case, the first extent range will be cached but not emitted.
2503  * So we must emit it before ending extent_fiemap().
2504  */
2505 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2506 				  struct fiemap_cache *cache)
2507 {
2508 	int ret;
2509 
2510 	if (!cache->cached)
2511 		return 0;
2512 
2513 	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2514 				      cache->len, cache->flags);
2515 	cache->cached = false;
2516 	if (ret > 0)
2517 		ret = 0;
2518 	return ret;
2519 }
2520 
2521 static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2522 {
2523 	struct extent_buffer *clone;
2524 	struct btrfs_key key;
2525 	int slot;
2526 	int ret;
2527 
2528 	path->slots[0]++;
2529 	if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2530 		return 0;
2531 
2532 	ret = btrfs_next_leaf(inode->root, path);
2533 	if (ret != 0)
2534 		return ret;
2535 
2536 	/*
2537 	 * Don't bother with cloning if there are no more file extent items for
2538 	 * our inode.
2539 	 */
2540 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2541 	if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2542 		return 1;
2543 
2544 	/* See the comment at fiemap_search_slot() about why we clone. */
2545 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2546 	if (!clone)
2547 		return -ENOMEM;
2548 
2549 	slot = path->slots[0];
2550 	btrfs_release_path(path);
2551 	path->nodes[0] = clone;
2552 	path->slots[0] = slot;
2553 
2554 	return 0;
2555 }
2556 
2557 /*
2558  * Search for the first file extent item that starts at a given file offset or
2559  * the one that starts immediately before that offset.
2560  * Returns: 0 on success, < 0 on error, 1 if not found.
2561  */
2562 static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2563 			      u64 file_offset)
2564 {
2565 	const u64 ino = btrfs_ino(inode);
2566 	struct btrfs_root *root = inode->root;
2567 	struct extent_buffer *clone;
2568 	struct btrfs_key key;
2569 	int slot;
2570 	int ret;
2571 
2572 	key.objectid = ino;
2573 	key.type = BTRFS_EXTENT_DATA_KEY;
2574 	key.offset = file_offset;
2575 
2576 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2577 	if (ret < 0)
2578 		return ret;
2579 
2580 	if (ret > 0 && path->slots[0] > 0) {
2581 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2582 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2583 			path->slots[0]--;
2584 	}
2585 
2586 	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2587 		ret = btrfs_next_leaf(root, path);
2588 		if (ret != 0)
2589 			return ret;
2590 
2591 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2592 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2593 			return 1;
2594 	}
2595 
2596 	/*
2597 	 * We clone the leaf and use it during fiemap. This is because while
2598 	 * using the leaf we do expensive things like checking if an extent is
2599 	 * shared, which can take a long time. In order to prevent blocking
2600 	 * other tasks for too long, we use a clone of the leaf. We have locked
2601 	 * the file range in the inode's io tree, so we know none of our file
2602 	 * extent items can change. This way we avoid blocking other tasks that
2603 	 * want to insert items for other inodes in the same leaf or b+tree
2604 	 * rebalance operations (triggered for example when someone is trying
2605 	 * to push items into this leaf when trying to insert an item in a
2606 	 * neighbour leaf).
2607 	 * We also need the private clone because holding a read lock on an
2608 	 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2609 	 * when we call fiemap_fill_next_extent(), because that may cause a page
2610 	 * fault when filling the user space buffer with fiemap data.
2611 	 */
2612 	clone = btrfs_clone_extent_buffer(path->nodes[0]);
2613 	if (!clone)
2614 		return -ENOMEM;
2615 
2616 	slot = path->slots[0];
2617 	btrfs_release_path(path);
2618 	path->nodes[0] = clone;
2619 	path->slots[0] = slot;
2620 
2621 	return 0;
2622 }
2623 
2624 /*
2625  * Process a range which is a hole or a prealloc extent in the inode's subvolume
2626  * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2627  * extent. The end offset (@end) is inclusive.
2628  */
2629 static int fiemap_process_hole(struct btrfs_inode *inode,
2630 			       struct fiemap_extent_info *fieinfo,
2631 			       struct fiemap_cache *cache,
2632 			       struct extent_state **delalloc_cached_state,
2633 			       struct btrfs_backref_share_check_ctx *backref_ctx,
2634 			       u64 disk_bytenr, u64 extent_offset,
2635 			       u64 extent_gen,
2636 			       u64 start, u64 end)
2637 {
2638 	const u64 i_size = i_size_read(&inode->vfs_inode);
2639 	u64 cur_offset = start;
2640 	u64 last_delalloc_end = 0;
2641 	u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2642 	bool checked_extent_shared = false;
2643 	int ret;
2644 
2645 	/*
2646 	 * There can be no delalloc past i_size, so don't waste time looking for
2647 	 * it beyond i_size.
2648 	 */
2649 	while (cur_offset < end && cur_offset < i_size) {
2650 		u64 delalloc_start;
2651 		u64 delalloc_end;
2652 		u64 prealloc_start;
2653 		u64 prealloc_len = 0;
2654 		bool delalloc;
2655 
2656 		delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2657 							delalloc_cached_state,
2658 							&delalloc_start,
2659 							&delalloc_end);
2660 		if (!delalloc)
2661 			break;
2662 
2663 		/*
2664 		 * If this is a prealloc extent we have to report every section
2665 		 * of it that has no delalloc.
2666 		 */
2667 		if (disk_bytenr != 0) {
2668 			if (last_delalloc_end == 0) {
2669 				prealloc_start = start;
2670 				prealloc_len = delalloc_start - start;
2671 			} else {
2672 				prealloc_start = last_delalloc_end + 1;
2673 				prealloc_len = delalloc_start - prealloc_start;
2674 			}
2675 		}
2676 
2677 		if (prealloc_len > 0) {
2678 			if (!checked_extent_shared && fieinfo->fi_extents_max) {
2679 				ret = btrfs_is_data_extent_shared(inode,
2680 								  disk_bytenr,
2681 								  extent_gen,
2682 								  backref_ctx);
2683 				if (ret < 0)
2684 					return ret;
2685 				else if (ret > 0)
2686 					prealloc_flags |= FIEMAP_EXTENT_SHARED;
2687 
2688 				checked_extent_shared = true;
2689 			}
2690 			ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2691 						 disk_bytenr + extent_offset,
2692 						 prealloc_len, prealloc_flags);
2693 			if (ret)
2694 				return ret;
2695 			extent_offset += prealloc_len;
2696 		}
2697 
2698 		ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2699 					 delalloc_end + 1 - delalloc_start,
2700 					 FIEMAP_EXTENT_DELALLOC |
2701 					 FIEMAP_EXTENT_UNKNOWN);
2702 		if (ret)
2703 			return ret;
2704 
2705 		last_delalloc_end = delalloc_end;
2706 		cur_offset = delalloc_end + 1;
2707 		extent_offset += cur_offset - delalloc_start;
2708 		cond_resched();
2709 	}
2710 
2711 	/*
2712 	 * Either we found no delalloc for the whole prealloc extent or we have
2713 	 * a prealloc extent that spans i_size or starts at or after i_size.
2714 	 */
2715 	if (disk_bytenr != 0 && last_delalloc_end < end) {
2716 		u64 prealloc_start;
2717 		u64 prealloc_len;
2718 
2719 		if (last_delalloc_end == 0) {
2720 			prealloc_start = start;
2721 			prealloc_len = end + 1 - start;
2722 		} else {
2723 			prealloc_start = last_delalloc_end + 1;
2724 			prealloc_len = end + 1 - prealloc_start;
2725 		}
2726 
2727 		if (!checked_extent_shared && fieinfo->fi_extents_max) {
2728 			ret = btrfs_is_data_extent_shared(inode,
2729 							  disk_bytenr,
2730 							  extent_gen,
2731 							  backref_ctx);
2732 			if (ret < 0)
2733 				return ret;
2734 			else if (ret > 0)
2735 				prealloc_flags |= FIEMAP_EXTENT_SHARED;
2736 		}
2737 		ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2738 					 disk_bytenr + extent_offset,
2739 					 prealloc_len, prealloc_flags);
2740 		if (ret)
2741 			return ret;
2742 	}
2743 
2744 	return 0;
2745 }
2746 
2747 static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2748 					  struct btrfs_path *path,
2749 					  u64 *last_extent_end_ret)
2750 {
2751 	const u64 ino = btrfs_ino(inode);
2752 	struct btrfs_root *root = inode->root;
2753 	struct extent_buffer *leaf;
2754 	struct btrfs_file_extent_item *ei;
2755 	struct btrfs_key key;
2756 	u64 disk_bytenr;
2757 	int ret;
2758 
2759 	/*
2760 	 * Lookup the last file extent. We're not using i_size here because
2761 	 * there might be preallocation past i_size.
2762 	 */
2763 	ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2764 	/* There can't be a file extent item at offset (u64)-1 */
2765 	ASSERT(ret != 0);
2766 	if (ret < 0)
2767 		return ret;
2768 
2769 	/*
2770 	 * For a non-existing key, btrfs_search_slot() always leaves us at a
2771 	 * slot > 0, except if the btree is empty, which is impossible because
2772 	 * at least it has the inode item for this inode and all the items for
2773 	 * the root inode 256.
2774 	 */
2775 	ASSERT(path->slots[0] > 0);
2776 	path->slots[0]--;
2777 	leaf = path->nodes[0];
2778 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2779 	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2780 		/* No file extent items in the subvolume tree. */
2781 		*last_extent_end_ret = 0;
2782 		return 0;
2783 	}
2784 
2785 	/*
2786 	 * For an inline extent, the disk_bytenr is where inline data starts at,
2787 	 * so first check if we have an inline extent item before checking if we
2788 	 * have an implicit hole (disk_bytenr == 0).
2789 	 */
2790 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2791 	if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2792 		*last_extent_end_ret = btrfs_file_extent_end(path);
2793 		return 0;
2794 	}
2795 
2796 	/*
2797 	 * Find the last file extent item that is not a hole (when NO_HOLES is
2798 	 * not enabled). This should take at most 2 iterations in the worst
2799 	 * case: we have one hole file extent item at slot 0 of a leaf and
2800 	 * another hole file extent item as the last item in the previous leaf.
2801 	 * This is because we merge file extent items that represent holes.
2802 	 */
2803 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2804 	while (disk_bytenr == 0) {
2805 		ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2806 		if (ret < 0) {
2807 			return ret;
2808 		} else if (ret > 0) {
2809 			/* No file extent items that are not holes. */
2810 			*last_extent_end_ret = 0;
2811 			return 0;
2812 		}
2813 		leaf = path->nodes[0];
2814 		ei = btrfs_item_ptr(leaf, path->slots[0],
2815 				    struct btrfs_file_extent_item);
2816 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2817 	}
2818 
2819 	*last_extent_end_ret = btrfs_file_extent_end(path);
2820 	return 0;
2821 }
2822 
2823 int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2824 		  u64 start, u64 len)
2825 {
2826 	const u64 ino = btrfs_ino(inode);
2827 	struct extent_state *cached_state = NULL;
2828 	struct extent_state *delalloc_cached_state = NULL;
2829 	struct btrfs_path *path;
2830 	struct fiemap_cache cache = { 0 };
2831 	struct btrfs_backref_share_check_ctx *backref_ctx;
2832 	u64 last_extent_end;
2833 	u64 prev_extent_end;
2834 	u64 lockstart;
2835 	u64 lockend;
2836 	bool stopped = false;
2837 	int ret;
2838 
2839 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
2840 	path = btrfs_alloc_path();
2841 	if (!backref_ctx || !path) {
2842 		ret = -ENOMEM;
2843 		goto out;
2844 	}
2845 
2846 	lockstart = round_down(start, inode->root->fs_info->sectorsize);
2847 	lockend = round_up(start + len, inode->root->fs_info->sectorsize);
2848 	prev_extent_end = lockstart;
2849 
2850 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
2851 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2852 
2853 	ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
2854 	if (ret < 0)
2855 		goto out_unlock;
2856 	btrfs_release_path(path);
2857 
2858 	path->reada = READA_FORWARD;
2859 	ret = fiemap_search_slot(inode, path, lockstart);
2860 	if (ret < 0) {
2861 		goto out_unlock;
2862 	} else if (ret > 0) {
2863 		/*
2864 		 * No file extent item found, but we may have delalloc between
2865 		 * the current offset and i_size. So check for that.
2866 		 */
2867 		ret = 0;
2868 		goto check_eof_delalloc;
2869 	}
2870 
2871 	while (prev_extent_end < lockend) {
2872 		struct extent_buffer *leaf = path->nodes[0];
2873 		struct btrfs_file_extent_item *ei;
2874 		struct btrfs_key key;
2875 		u64 extent_end;
2876 		u64 extent_len;
2877 		u64 extent_offset = 0;
2878 		u64 extent_gen;
2879 		u64 disk_bytenr = 0;
2880 		u64 flags = 0;
2881 		int extent_type;
2882 		u8 compression;
2883 
2884 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2885 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2886 			break;
2887 
2888 		extent_end = btrfs_file_extent_end(path);
2889 
2890 		/*
2891 		 * The first iteration can leave us at an extent item that ends
2892 		 * before our range's start. Move to the next item.
2893 		 */
2894 		if (extent_end <= lockstart)
2895 			goto next_item;
2896 
2897 		backref_ctx->curr_leaf_bytenr = leaf->start;
2898 
2899 		/* We have in implicit hole (NO_HOLES feature enabled). */
2900 		if (prev_extent_end < key.offset) {
2901 			const u64 range_end = min(key.offset, lockend) - 1;
2902 
2903 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2904 						  &delalloc_cached_state,
2905 						  backref_ctx, 0, 0, 0,
2906 						  prev_extent_end, range_end);
2907 			if (ret < 0) {
2908 				goto out_unlock;
2909 			} else if (ret > 0) {
2910 				/* fiemap_fill_next_extent() told us to stop. */
2911 				stopped = true;
2912 				break;
2913 			}
2914 
2915 			/* We've reached the end of the fiemap range, stop. */
2916 			if (key.offset >= lockend) {
2917 				stopped = true;
2918 				break;
2919 			}
2920 		}
2921 
2922 		extent_len = extent_end - key.offset;
2923 		ei = btrfs_item_ptr(leaf, path->slots[0],
2924 				    struct btrfs_file_extent_item);
2925 		compression = btrfs_file_extent_compression(leaf, ei);
2926 		extent_type = btrfs_file_extent_type(leaf, ei);
2927 		extent_gen = btrfs_file_extent_generation(leaf, ei);
2928 
2929 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2930 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2931 			if (compression == BTRFS_COMPRESS_NONE)
2932 				extent_offset = btrfs_file_extent_offset(leaf, ei);
2933 		}
2934 
2935 		if (compression != BTRFS_COMPRESS_NONE)
2936 			flags |= FIEMAP_EXTENT_ENCODED;
2937 
2938 		if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2939 			flags |= FIEMAP_EXTENT_DATA_INLINE;
2940 			flags |= FIEMAP_EXTENT_NOT_ALIGNED;
2941 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
2942 						 extent_len, flags);
2943 		} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
2944 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2945 						  &delalloc_cached_state,
2946 						  backref_ctx,
2947 						  disk_bytenr, extent_offset,
2948 						  extent_gen, key.offset,
2949 						  extent_end - 1);
2950 		} else if (disk_bytenr == 0) {
2951 			/* We have an explicit hole. */
2952 			ret = fiemap_process_hole(inode, fieinfo, &cache,
2953 						  &delalloc_cached_state,
2954 						  backref_ctx, 0, 0, 0,
2955 						  key.offset, extent_end - 1);
2956 		} else {
2957 			/* We have a regular extent. */
2958 			if (fieinfo->fi_extents_max) {
2959 				ret = btrfs_is_data_extent_shared(inode,
2960 								  disk_bytenr,
2961 								  extent_gen,
2962 								  backref_ctx);
2963 				if (ret < 0)
2964 					goto out_unlock;
2965 				else if (ret > 0)
2966 					flags |= FIEMAP_EXTENT_SHARED;
2967 			}
2968 
2969 			ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
2970 						 disk_bytenr + extent_offset,
2971 						 extent_len, flags);
2972 		}
2973 
2974 		if (ret < 0) {
2975 			goto out_unlock;
2976 		} else if (ret > 0) {
2977 			/* fiemap_fill_next_extent() told us to stop. */
2978 			stopped = true;
2979 			break;
2980 		}
2981 
2982 		prev_extent_end = extent_end;
2983 next_item:
2984 		if (fatal_signal_pending(current)) {
2985 			ret = -EINTR;
2986 			goto out_unlock;
2987 		}
2988 
2989 		ret = fiemap_next_leaf_item(inode, path);
2990 		if (ret < 0) {
2991 			goto out_unlock;
2992 		} else if (ret > 0) {
2993 			/* No more file extent items for this inode. */
2994 			break;
2995 		}
2996 		cond_resched();
2997 	}
2998 
2999 check_eof_delalloc:
3000 	/*
3001 	 * Release (and free) the path before emitting any final entries to
3002 	 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3003 	 * once we find no more file extent items exist, we may have a
3004 	 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3005 	 * faults when copying data to the user space buffer.
3006 	 */
3007 	btrfs_free_path(path);
3008 	path = NULL;
3009 
3010 	if (!stopped && prev_extent_end < lockend) {
3011 		ret = fiemap_process_hole(inode, fieinfo, &cache,
3012 					  &delalloc_cached_state, backref_ctx,
3013 					  0, 0, 0, prev_extent_end, lockend - 1);
3014 		if (ret < 0)
3015 			goto out_unlock;
3016 		prev_extent_end = lockend;
3017 	}
3018 
3019 	if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3020 		const u64 i_size = i_size_read(&inode->vfs_inode);
3021 
3022 		if (prev_extent_end < i_size) {
3023 			u64 delalloc_start;
3024 			u64 delalloc_end;
3025 			bool delalloc;
3026 
3027 			delalloc = btrfs_find_delalloc_in_range(inode,
3028 								prev_extent_end,
3029 								i_size - 1,
3030 								&delalloc_cached_state,
3031 								&delalloc_start,
3032 								&delalloc_end);
3033 			if (!delalloc)
3034 				cache.flags |= FIEMAP_EXTENT_LAST;
3035 		} else {
3036 			cache.flags |= FIEMAP_EXTENT_LAST;
3037 		}
3038 	}
3039 
3040 	ret = emit_last_fiemap_cache(fieinfo, &cache);
3041 
3042 out_unlock:
3043 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3044 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3045 out:
3046 	free_extent_state(delalloc_cached_state);
3047 	btrfs_free_backref_share_ctx(backref_ctx);
3048 	btrfs_free_path(path);
3049 	return ret;
3050 }
3051 
3052 static void __free_extent_buffer(struct extent_buffer *eb)
3053 {
3054 	kmem_cache_free(extent_buffer_cache, eb);
3055 }
3056 
3057 static int extent_buffer_under_io(const struct extent_buffer *eb)
3058 {
3059 	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3060 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3061 }
3062 
3063 static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
3064 {
3065 	struct btrfs_subpage *subpage;
3066 
3067 	lockdep_assert_held(&page->mapping->private_lock);
3068 
3069 	if (PagePrivate(page)) {
3070 		subpage = (struct btrfs_subpage *)page->private;
3071 		if (atomic_read(&subpage->eb_refs))
3072 			return true;
3073 		/*
3074 		 * Even there is no eb refs here, we may still have
3075 		 * end_page_read() call relying on page::private.
3076 		 */
3077 		if (atomic_read(&subpage->readers))
3078 			return true;
3079 	}
3080 	return false;
3081 }
3082 
3083 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
3084 {
3085 	struct btrfs_fs_info *fs_info = eb->fs_info;
3086 	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3087 
3088 	/*
3089 	 * For mapped eb, we're going to change the page private, which should
3090 	 * be done under the private_lock.
3091 	 */
3092 	if (mapped)
3093 		spin_lock(&page->mapping->private_lock);
3094 
3095 	if (!PagePrivate(page)) {
3096 		if (mapped)
3097 			spin_unlock(&page->mapping->private_lock);
3098 		return;
3099 	}
3100 
3101 	if (fs_info->nodesize >= PAGE_SIZE) {
3102 		/*
3103 		 * We do this since we'll remove the pages after we've
3104 		 * removed the eb from the radix tree, so we could race
3105 		 * and have this page now attached to the new eb.  So
3106 		 * only clear page_private if it's still connected to
3107 		 * this eb.
3108 		 */
3109 		if (PagePrivate(page) &&
3110 		    page->private == (unsigned long)eb) {
3111 			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3112 			BUG_ON(PageDirty(page));
3113 			BUG_ON(PageWriteback(page));
3114 			/*
3115 			 * We need to make sure we haven't be attached
3116 			 * to a new eb.
3117 			 */
3118 			detach_page_private(page);
3119 		}
3120 		if (mapped)
3121 			spin_unlock(&page->mapping->private_lock);
3122 		return;
3123 	}
3124 
3125 	/*
3126 	 * For subpage, we can have dummy eb with page private.  In this case,
3127 	 * we can directly detach the private as such page is only attached to
3128 	 * one dummy eb, no sharing.
3129 	 */
3130 	if (!mapped) {
3131 		btrfs_detach_subpage(fs_info, page);
3132 		return;
3133 	}
3134 
3135 	btrfs_page_dec_eb_refs(fs_info, page);
3136 
3137 	/*
3138 	 * We can only detach the page private if there are no other ebs in the
3139 	 * page range and no unfinished IO.
3140 	 */
3141 	if (!page_range_has_eb(fs_info, page))
3142 		btrfs_detach_subpage(fs_info, page);
3143 
3144 	spin_unlock(&page->mapping->private_lock);
3145 }
3146 
3147 /* Release all pages attached to the extent buffer */
3148 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3149 {
3150 	int i;
3151 	int num_pages;
3152 
3153 	ASSERT(!extent_buffer_under_io(eb));
3154 
3155 	num_pages = num_extent_pages(eb);
3156 	for (i = 0; i < num_pages; i++) {
3157 		struct page *page = eb->pages[i];
3158 
3159 		if (!page)
3160 			continue;
3161 
3162 		detach_extent_buffer_page(eb, page);
3163 
3164 		/* One for when we allocated the page */
3165 		put_page(page);
3166 	}
3167 }
3168 
3169 /*
3170  * Helper for releasing the extent buffer.
3171  */
3172 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3173 {
3174 	btrfs_release_extent_buffer_pages(eb);
3175 	btrfs_leak_debug_del_eb(eb);
3176 	__free_extent_buffer(eb);
3177 }
3178 
3179 static struct extent_buffer *
3180 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3181 		      unsigned long len)
3182 {
3183 	struct extent_buffer *eb = NULL;
3184 
3185 	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3186 	eb->start = start;
3187 	eb->len = len;
3188 	eb->fs_info = fs_info;
3189 	init_rwsem(&eb->lock);
3190 
3191 	btrfs_leak_debug_add_eb(eb);
3192 
3193 	spin_lock_init(&eb->refs_lock);
3194 	atomic_set(&eb->refs, 1);
3195 
3196 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3197 
3198 	return eb;
3199 }
3200 
3201 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3202 {
3203 	int i;
3204 	struct extent_buffer *new;
3205 	int num_pages = num_extent_pages(src);
3206 	int ret;
3207 
3208 	new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3209 	if (new == NULL)
3210 		return NULL;
3211 
3212 	/*
3213 	 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3214 	 * btrfs_release_extent_buffer() have different behavior for
3215 	 * UNMAPPED subpage extent buffer.
3216 	 */
3217 	set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3218 
3219 	ret = btrfs_alloc_page_array(num_pages, new->pages);
3220 	if (ret) {
3221 		btrfs_release_extent_buffer(new);
3222 		return NULL;
3223 	}
3224 
3225 	for (i = 0; i < num_pages; i++) {
3226 		int ret;
3227 		struct page *p = new->pages[i];
3228 
3229 		ret = attach_extent_buffer_page(new, p, NULL);
3230 		if (ret < 0) {
3231 			btrfs_release_extent_buffer(new);
3232 			return NULL;
3233 		}
3234 		WARN_ON(PageDirty(p));
3235 	}
3236 	copy_extent_buffer_full(new, src);
3237 	set_extent_buffer_uptodate(new);
3238 
3239 	return new;
3240 }
3241 
3242 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3243 						  u64 start, unsigned long len)
3244 {
3245 	struct extent_buffer *eb;
3246 	int num_pages;
3247 	int i;
3248 	int ret;
3249 
3250 	eb = __alloc_extent_buffer(fs_info, start, len);
3251 	if (!eb)
3252 		return NULL;
3253 
3254 	num_pages = num_extent_pages(eb);
3255 	ret = btrfs_alloc_page_array(num_pages, eb->pages);
3256 	if (ret)
3257 		goto err;
3258 
3259 	for (i = 0; i < num_pages; i++) {
3260 		struct page *p = eb->pages[i];
3261 
3262 		ret = attach_extent_buffer_page(eb, p, NULL);
3263 		if (ret < 0)
3264 			goto err;
3265 	}
3266 
3267 	set_extent_buffer_uptodate(eb);
3268 	btrfs_set_header_nritems(eb, 0);
3269 	set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3270 
3271 	return eb;
3272 err:
3273 	for (i = 0; i < num_pages; i++) {
3274 		if (eb->pages[i]) {
3275 			detach_extent_buffer_page(eb, eb->pages[i]);
3276 			__free_page(eb->pages[i]);
3277 		}
3278 	}
3279 	__free_extent_buffer(eb);
3280 	return NULL;
3281 }
3282 
3283 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3284 						u64 start)
3285 {
3286 	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3287 }
3288 
3289 static void check_buffer_tree_ref(struct extent_buffer *eb)
3290 {
3291 	int refs;
3292 	/*
3293 	 * The TREE_REF bit is first set when the extent_buffer is added
3294 	 * to the radix tree. It is also reset, if unset, when a new reference
3295 	 * is created by find_extent_buffer.
3296 	 *
3297 	 * It is only cleared in two cases: freeing the last non-tree
3298 	 * reference to the extent_buffer when its STALE bit is set or
3299 	 * calling release_folio when the tree reference is the only reference.
3300 	 *
3301 	 * In both cases, care is taken to ensure that the extent_buffer's
3302 	 * pages are not under io. However, release_folio can be concurrently
3303 	 * called with creating new references, which is prone to race
3304 	 * conditions between the calls to check_buffer_tree_ref in those
3305 	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3306 	 *
3307 	 * The actual lifetime of the extent_buffer in the radix tree is
3308 	 * adequately protected by the refcount, but the TREE_REF bit and
3309 	 * its corresponding reference are not. To protect against this
3310 	 * class of races, we call check_buffer_tree_ref from the codepaths
3311 	 * which trigger io. Note that once io is initiated, TREE_REF can no
3312 	 * longer be cleared, so that is the moment at which any such race is
3313 	 * best fixed.
3314 	 */
3315 	refs = atomic_read(&eb->refs);
3316 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3317 		return;
3318 
3319 	spin_lock(&eb->refs_lock);
3320 	if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3321 		atomic_inc(&eb->refs);
3322 	spin_unlock(&eb->refs_lock);
3323 }
3324 
3325 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
3326 		struct page *accessed)
3327 {
3328 	int num_pages, i;
3329 
3330 	check_buffer_tree_ref(eb);
3331 
3332 	num_pages = num_extent_pages(eb);
3333 	for (i = 0; i < num_pages; i++) {
3334 		struct page *p = eb->pages[i];
3335 
3336 		if (p != accessed)
3337 			mark_page_accessed(p);
3338 	}
3339 }
3340 
3341 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3342 					 u64 start)
3343 {
3344 	struct extent_buffer *eb;
3345 
3346 	eb = find_extent_buffer_nolock(fs_info, start);
3347 	if (!eb)
3348 		return NULL;
3349 	/*
3350 	 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3351 	 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3352 	 * another task running free_extent_buffer() might have seen that flag
3353 	 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3354 	 * writeback flags not set) and it's still in the tree (flag
3355 	 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3356 	 * decrementing the extent buffer's reference count twice.  So here we
3357 	 * could race and increment the eb's reference count, clear its stale
3358 	 * flag, mark it as dirty and drop our reference before the other task
3359 	 * finishes executing free_extent_buffer, which would later result in
3360 	 * an attempt to free an extent buffer that is dirty.
3361 	 */
3362 	if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3363 		spin_lock(&eb->refs_lock);
3364 		spin_unlock(&eb->refs_lock);
3365 	}
3366 	mark_extent_buffer_accessed(eb, NULL);
3367 	return eb;
3368 }
3369 
3370 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3371 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3372 					u64 start)
3373 {
3374 	struct extent_buffer *eb, *exists = NULL;
3375 	int ret;
3376 
3377 	eb = find_extent_buffer(fs_info, start);
3378 	if (eb)
3379 		return eb;
3380 	eb = alloc_dummy_extent_buffer(fs_info, start);
3381 	if (!eb)
3382 		return ERR_PTR(-ENOMEM);
3383 	eb->fs_info = fs_info;
3384 again:
3385 	ret = radix_tree_preload(GFP_NOFS);
3386 	if (ret) {
3387 		exists = ERR_PTR(ret);
3388 		goto free_eb;
3389 	}
3390 	spin_lock(&fs_info->buffer_lock);
3391 	ret = radix_tree_insert(&fs_info->buffer_radix,
3392 				start >> fs_info->sectorsize_bits, eb);
3393 	spin_unlock(&fs_info->buffer_lock);
3394 	radix_tree_preload_end();
3395 	if (ret == -EEXIST) {
3396 		exists = find_extent_buffer(fs_info, start);
3397 		if (exists)
3398 			goto free_eb;
3399 		else
3400 			goto again;
3401 	}
3402 	check_buffer_tree_ref(eb);
3403 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3404 
3405 	return eb;
3406 free_eb:
3407 	btrfs_release_extent_buffer(eb);
3408 	return exists;
3409 }
3410 #endif
3411 
3412 static struct extent_buffer *grab_extent_buffer(
3413 		struct btrfs_fs_info *fs_info, struct page *page)
3414 {
3415 	struct extent_buffer *exists;
3416 
3417 	/*
3418 	 * For subpage case, we completely rely on radix tree to ensure we
3419 	 * don't try to insert two ebs for the same bytenr.  So here we always
3420 	 * return NULL and just continue.
3421 	 */
3422 	if (fs_info->nodesize < PAGE_SIZE)
3423 		return NULL;
3424 
3425 	/* Page not yet attached to an extent buffer */
3426 	if (!PagePrivate(page))
3427 		return NULL;
3428 
3429 	/*
3430 	 * We could have already allocated an eb for this page and attached one
3431 	 * so lets see if we can get a ref on the existing eb, and if we can we
3432 	 * know it's good and we can just return that one, else we know we can
3433 	 * just overwrite page->private.
3434 	 */
3435 	exists = (struct extent_buffer *)page->private;
3436 	if (atomic_inc_not_zero(&exists->refs))
3437 		return exists;
3438 
3439 	WARN_ON(PageDirty(page));
3440 	detach_page_private(page);
3441 	return NULL;
3442 }
3443 
3444 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3445 {
3446 	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3447 		btrfs_err(fs_info, "bad tree block start %llu", start);
3448 		return -EINVAL;
3449 	}
3450 
3451 	if (fs_info->nodesize < PAGE_SIZE &&
3452 	    offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3453 		btrfs_err(fs_info,
3454 		"tree block crosses page boundary, start %llu nodesize %u",
3455 			  start, fs_info->nodesize);
3456 		return -EINVAL;
3457 	}
3458 	if (fs_info->nodesize >= PAGE_SIZE &&
3459 	    !PAGE_ALIGNED(start)) {
3460 		btrfs_err(fs_info,
3461 		"tree block is not page aligned, start %llu nodesize %u",
3462 			  start, fs_info->nodesize);
3463 		return -EINVAL;
3464 	}
3465 	return 0;
3466 }
3467 
3468 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3469 					  u64 start, u64 owner_root, int level)
3470 {
3471 	unsigned long len = fs_info->nodesize;
3472 	int num_pages;
3473 	int i;
3474 	unsigned long index = start >> PAGE_SHIFT;
3475 	struct extent_buffer *eb;
3476 	struct extent_buffer *exists = NULL;
3477 	struct page *p;
3478 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
3479 	struct btrfs_subpage *prealloc = NULL;
3480 	u64 lockdep_owner = owner_root;
3481 	int uptodate = 1;
3482 	int ret;
3483 
3484 	if (check_eb_alignment(fs_info, start))
3485 		return ERR_PTR(-EINVAL);
3486 
3487 #if BITS_PER_LONG == 32
3488 	if (start >= MAX_LFS_FILESIZE) {
3489 		btrfs_err_rl(fs_info,
3490 		"extent buffer %llu is beyond 32bit page cache limit", start);
3491 		btrfs_err_32bit_limit(fs_info);
3492 		return ERR_PTR(-EOVERFLOW);
3493 	}
3494 	if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3495 		btrfs_warn_32bit_limit(fs_info);
3496 #endif
3497 
3498 	eb = find_extent_buffer(fs_info, start);
3499 	if (eb)
3500 		return eb;
3501 
3502 	eb = __alloc_extent_buffer(fs_info, start, len);
3503 	if (!eb)
3504 		return ERR_PTR(-ENOMEM);
3505 
3506 	/*
3507 	 * The reloc trees are just snapshots, so we need them to appear to be
3508 	 * just like any other fs tree WRT lockdep.
3509 	 */
3510 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3511 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3512 
3513 	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3514 
3515 	num_pages = num_extent_pages(eb);
3516 
3517 	/*
3518 	 * Preallocate page->private for subpage case, so that we won't
3519 	 * allocate memory with private_lock nor page lock hold.
3520 	 *
3521 	 * The memory will be freed by attach_extent_buffer_page() or freed
3522 	 * manually if we exit earlier.
3523 	 */
3524 	if (fs_info->nodesize < PAGE_SIZE) {
3525 		prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3526 		if (IS_ERR(prealloc)) {
3527 			exists = ERR_CAST(prealloc);
3528 			goto free_eb;
3529 		}
3530 	}
3531 
3532 	for (i = 0; i < num_pages; i++, index++) {
3533 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
3534 		if (!p) {
3535 			exists = ERR_PTR(-ENOMEM);
3536 			btrfs_free_subpage(prealloc);
3537 			goto free_eb;
3538 		}
3539 
3540 		spin_lock(&mapping->private_lock);
3541 		exists = grab_extent_buffer(fs_info, p);
3542 		if (exists) {
3543 			spin_unlock(&mapping->private_lock);
3544 			unlock_page(p);
3545 			put_page(p);
3546 			mark_extent_buffer_accessed(exists, p);
3547 			btrfs_free_subpage(prealloc);
3548 			goto free_eb;
3549 		}
3550 		/* Should not fail, as we have preallocated the memory */
3551 		ret = attach_extent_buffer_page(eb, p, prealloc);
3552 		ASSERT(!ret);
3553 		/*
3554 		 * To inform we have extra eb under allocation, so that
3555 		 * detach_extent_buffer_page() won't release the page private
3556 		 * when the eb hasn't yet been inserted into radix tree.
3557 		 *
3558 		 * The ref will be decreased when the eb released the page, in
3559 		 * detach_extent_buffer_page().
3560 		 * Thus needs no special handling in error path.
3561 		 */
3562 		btrfs_page_inc_eb_refs(fs_info, p);
3563 		spin_unlock(&mapping->private_lock);
3564 
3565 		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
3566 		eb->pages[i] = p;
3567 		if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
3568 			uptodate = 0;
3569 
3570 		/*
3571 		 * We can't unlock the pages just yet since the extent buffer
3572 		 * hasn't been properly inserted in the radix tree, this
3573 		 * opens a race with btree_release_folio which can free a page
3574 		 * while we are still filling in all pages for the buffer and
3575 		 * we could crash.
3576 		 */
3577 	}
3578 	if (uptodate)
3579 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3580 again:
3581 	ret = radix_tree_preload(GFP_NOFS);
3582 	if (ret) {
3583 		exists = ERR_PTR(ret);
3584 		goto free_eb;
3585 	}
3586 
3587 	spin_lock(&fs_info->buffer_lock);
3588 	ret = radix_tree_insert(&fs_info->buffer_radix,
3589 				start >> fs_info->sectorsize_bits, eb);
3590 	spin_unlock(&fs_info->buffer_lock);
3591 	radix_tree_preload_end();
3592 	if (ret == -EEXIST) {
3593 		exists = find_extent_buffer(fs_info, start);
3594 		if (exists)
3595 			goto free_eb;
3596 		else
3597 			goto again;
3598 	}
3599 	/* add one reference for the tree */
3600 	check_buffer_tree_ref(eb);
3601 	set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3602 
3603 	/*
3604 	 * Now it's safe to unlock the pages because any calls to
3605 	 * btree_release_folio will correctly detect that a page belongs to a
3606 	 * live buffer and won't free them prematurely.
3607 	 */
3608 	for (i = 0; i < num_pages; i++)
3609 		unlock_page(eb->pages[i]);
3610 	return eb;
3611 
3612 free_eb:
3613 	WARN_ON(!atomic_dec_and_test(&eb->refs));
3614 	for (i = 0; i < num_pages; i++) {
3615 		if (eb->pages[i])
3616 			unlock_page(eb->pages[i]);
3617 	}
3618 
3619 	btrfs_release_extent_buffer(eb);
3620 	return exists;
3621 }
3622 
3623 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3624 {
3625 	struct extent_buffer *eb =
3626 			container_of(head, struct extent_buffer, rcu_head);
3627 
3628 	__free_extent_buffer(eb);
3629 }
3630 
3631 static int release_extent_buffer(struct extent_buffer *eb)
3632 	__releases(&eb->refs_lock)
3633 {
3634 	lockdep_assert_held(&eb->refs_lock);
3635 
3636 	WARN_ON(atomic_read(&eb->refs) == 0);
3637 	if (atomic_dec_and_test(&eb->refs)) {
3638 		if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3639 			struct btrfs_fs_info *fs_info = eb->fs_info;
3640 
3641 			spin_unlock(&eb->refs_lock);
3642 
3643 			spin_lock(&fs_info->buffer_lock);
3644 			radix_tree_delete(&fs_info->buffer_radix,
3645 					  eb->start >> fs_info->sectorsize_bits);
3646 			spin_unlock(&fs_info->buffer_lock);
3647 		} else {
3648 			spin_unlock(&eb->refs_lock);
3649 		}
3650 
3651 		btrfs_leak_debug_del_eb(eb);
3652 		/* Should be safe to release our pages at this point */
3653 		btrfs_release_extent_buffer_pages(eb);
3654 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3655 		if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3656 			__free_extent_buffer(eb);
3657 			return 1;
3658 		}
3659 #endif
3660 		call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3661 		return 1;
3662 	}
3663 	spin_unlock(&eb->refs_lock);
3664 
3665 	return 0;
3666 }
3667 
3668 void free_extent_buffer(struct extent_buffer *eb)
3669 {
3670 	int refs;
3671 	if (!eb)
3672 		return;
3673 
3674 	refs = atomic_read(&eb->refs);
3675 	while (1) {
3676 		if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3677 		    || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3678 			refs == 1))
3679 			break;
3680 		if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3681 			return;
3682 	}
3683 
3684 	spin_lock(&eb->refs_lock);
3685 	if (atomic_read(&eb->refs) == 2 &&
3686 	    test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3687 	    !extent_buffer_under_io(eb) &&
3688 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3689 		atomic_dec(&eb->refs);
3690 
3691 	/*
3692 	 * I know this is terrible, but it's temporary until we stop tracking
3693 	 * the uptodate bits and such for the extent buffers.
3694 	 */
3695 	release_extent_buffer(eb);
3696 }
3697 
3698 void free_extent_buffer_stale(struct extent_buffer *eb)
3699 {
3700 	if (!eb)
3701 		return;
3702 
3703 	spin_lock(&eb->refs_lock);
3704 	set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3705 
3706 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3707 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3708 		atomic_dec(&eb->refs);
3709 	release_extent_buffer(eb);
3710 }
3711 
3712 static void btree_clear_page_dirty(struct page *page)
3713 {
3714 	ASSERT(PageDirty(page));
3715 	ASSERT(PageLocked(page));
3716 	clear_page_dirty_for_io(page);
3717 	xa_lock_irq(&page->mapping->i_pages);
3718 	if (!PageDirty(page))
3719 		__xa_clear_mark(&page->mapping->i_pages,
3720 				page_index(page), PAGECACHE_TAG_DIRTY);
3721 	xa_unlock_irq(&page->mapping->i_pages);
3722 }
3723 
3724 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3725 {
3726 	struct btrfs_fs_info *fs_info = eb->fs_info;
3727 	struct page *page = eb->pages[0];
3728 	bool last;
3729 
3730 	/* btree_clear_page_dirty() needs page locked */
3731 	lock_page(page);
3732 	last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
3733 						  eb->len);
3734 	if (last)
3735 		btree_clear_page_dirty(page);
3736 	unlock_page(page);
3737 	WARN_ON(atomic_read(&eb->refs) == 0);
3738 }
3739 
3740 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3741 			      struct extent_buffer *eb)
3742 {
3743 	struct btrfs_fs_info *fs_info = eb->fs_info;
3744 	int i;
3745 	int num_pages;
3746 	struct page *page;
3747 
3748 	btrfs_assert_tree_write_locked(eb);
3749 
3750 	if (trans && btrfs_header_generation(eb) != trans->transid)
3751 		return;
3752 
3753 	if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3754 		return;
3755 
3756 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3757 				 fs_info->dirty_metadata_batch);
3758 
3759 	if (eb->fs_info->nodesize < PAGE_SIZE)
3760 		return clear_subpage_extent_buffer_dirty(eb);
3761 
3762 	num_pages = num_extent_pages(eb);
3763 
3764 	for (i = 0; i < num_pages; i++) {
3765 		page = eb->pages[i];
3766 		if (!PageDirty(page))
3767 			continue;
3768 		lock_page(page);
3769 		btree_clear_page_dirty(page);
3770 		unlock_page(page);
3771 	}
3772 	WARN_ON(atomic_read(&eb->refs) == 0);
3773 }
3774 
3775 void set_extent_buffer_dirty(struct extent_buffer *eb)
3776 {
3777 	int i;
3778 	int num_pages;
3779 	bool was_dirty;
3780 
3781 	check_buffer_tree_ref(eb);
3782 
3783 	was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3784 
3785 	num_pages = num_extent_pages(eb);
3786 	WARN_ON(atomic_read(&eb->refs) == 0);
3787 	WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3788 
3789 	if (!was_dirty) {
3790 		bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3791 
3792 		/*
3793 		 * For subpage case, we can have other extent buffers in the
3794 		 * same page, and in clear_subpage_extent_buffer_dirty() we
3795 		 * have to clear page dirty without subpage lock held.
3796 		 * This can cause race where our page gets dirty cleared after
3797 		 * we just set it.
3798 		 *
3799 		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3800 		 * its page for other reasons, we can use page lock to prevent
3801 		 * the above race.
3802 		 */
3803 		if (subpage)
3804 			lock_page(eb->pages[0]);
3805 		for (i = 0; i < num_pages; i++)
3806 			btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
3807 					     eb->start, eb->len);
3808 		if (subpage)
3809 			unlock_page(eb->pages[0]);
3810 		percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3811 					 eb->len,
3812 					 eb->fs_info->dirty_metadata_batch);
3813 	}
3814 #ifdef CONFIG_BTRFS_DEBUG
3815 	for (i = 0; i < num_pages; i++)
3816 		ASSERT(PageDirty(eb->pages[i]));
3817 #endif
3818 }
3819 
3820 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3821 {
3822 	struct btrfs_fs_info *fs_info = eb->fs_info;
3823 	struct page *page;
3824 	int num_pages;
3825 	int i;
3826 
3827 	clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3828 	num_pages = num_extent_pages(eb);
3829 	for (i = 0; i < num_pages; i++) {
3830 		page = eb->pages[i];
3831 		if (!page)
3832 			continue;
3833 
3834 		/*
3835 		 * This is special handling for metadata subpage, as regular
3836 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3837 		 */
3838 		if (fs_info->nodesize >= PAGE_SIZE)
3839 			ClearPageUptodate(page);
3840 		else
3841 			btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
3842 						     eb->len);
3843 	}
3844 }
3845 
3846 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3847 {
3848 	struct btrfs_fs_info *fs_info = eb->fs_info;
3849 	struct page *page;
3850 	int num_pages;
3851 	int i;
3852 
3853 	set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3854 	num_pages = num_extent_pages(eb);
3855 	for (i = 0; i < num_pages; i++) {
3856 		page = eb->pages[i];
3857 
3858 		/*
3859 		 * This is special handling for metadata subpage, as regular
3860 		 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3861 		 */
3862 		if (fs_info->nodesize >= PAGE_SIZE)
3863 			SetPageUptodate(page);
3864 		else
3865 			btrfs_subpage_set_uptodate(fs_info, page, eb->start,
3866 						   eb->len);
3867 	}
3868 }
3869 
3870 static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
3871 {
3872 	struct extent_buffer *eb = bbio->private;
3873 	struct btrfs_fs_info *fs_info = eb->fs_info;
3874 	bool uptodate = !bbio->bio.bi_status;
3875 	struct bvec_iter_all iter_all;
3876 	struct bio_vec *bvec;
3877 	u32 bio_offset = 0;
3878 
3879 	eb->read_mirror = bbio->mirror_num;
3880 
3881 	if (uptodate &&
3882 	    btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3883 		uptodate = false;
3884 
3885 	if (uptodate) {
3886 		set_extent_buffer_uptodate(eb);
3887 	} else {
3888 		clear_extent_buffer_uptodate(eb);
3889 		set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3890 	}
3891 
3892 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
3893 		u64 start = eb->start + bio_offset;
3894 		struct page *page = bvec->bv_page;
3895 		u32 len = bvec->bv_len;
3896 
3897 		if (uptodate)
3898 			btrfs_page_set_uptodate(fs_info, page, start, len);
3899 		else
3900 			btrfs_page_clear_uptodate(fs_info, page, start, len);
3901 
3902 		bio_offset += len;
3903 	}
3904 
3905 	clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3906 	smp_mb__after_atomic();
3907 	wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3908 	free_extent_buffer(eb);
3909 
3910 	bio_put(&bbio->bio);
3911 }
3912 
3913 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3914 			     struct btrfs_tree_parent_check *check)
3915 {
3916 	int num_pages = num_extent_pages(eb), i;
3917 	struct btrfs_bio *bbio;
3918 
3919 	if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3920 		return 0;
3921 
3922 	/*
3923 	 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3924 	 * operation, which could potentially still be in flight.  In this case
3925 	 * we simply want to return an error.
3926 	 */
3927 	if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3928 		return -EIO;
3929 
3930 	/* Someone else is already reading the buffer, just wait for it. */
3931 	if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3932 		goto done;
3933 
3934 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3935 	eb->read_mirror = 0;
3936 	check_buffer_tree_ref(eb);
3937 	atomic_inc(&eb->refs);
3938 
3939 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3940 			       REQ_OP_READ | REQ_META, eb->fs_info,
3941 			       extent_buffer_read_end_io, eb);
3942 	bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3943 	bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3944 	bbio->file_offset = eb->start;
3945 	memcpy(&bbio->parent_check, check, sizeof(*check));
3946 	if (eb->fs_info->nodesize < PAGE_SIZE) {
3947 		__bio_add_page(&bbio->bio, eb->pages[0], eb->len,
3948 			       eb->start - page_offset(eb->pages[0]));
3949 	} else {
3950 		for (i = 0; i < num_pages; i++)
3951 			__bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
3952 	}
3953 	btrfs_submit_bio(bbio, mirror_num);
3954 
3955 done:
3956 	if (wait == WAIT_COMPLETE) {
3957 		wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3958 		if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3959 			return -EIO;
3960 	}
3961 
3962 	return 0;
3963 }
3964 
3965 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3966 			    unsigned long len)
3967 {
3968 	btrfs_warn(eb->fs_info,
3969 		"access to eb bytenr %llu len %lu out of range start %lu len %lu",
3970 		eb->start, eb->len, start, len);
3971 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3972 
3973 	return true;
3974 }
3975 
3976 /*
3977  * Check if the [start, start + len) range is valid before reading/writing
3978  * the eb.
3979  * NOTE: @start and @len are offset inside the eb, not logical address.
3980  *
3981  * Caller should not touch the dst/src memory if this function returns error.
3982  */
3983 static inline int check_eb_range(const struct extent_buffer *eb,
3984 				 unsigned long start, unsigned long len)
3985 {
3986 	unsigned long offset;
3987 
3988 	/* start, start + len should not go beyond eb->len nor overflow */
3989 	if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3990 		return report_eb_range(eb, start, len);
3991 
3992 	return false;
3993 }
3994 
3995 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3996 			unsigned long start, unsigned long len)
3997 {
3998 	size_t cur;
3999 	size_t offset;
4000 	struct page *page;
4001 	char *kaddr;
4002 	char *dst = (char *)dstv;
4003 	unsigned long i = get_eb_page_index(start);
4004 
4005 	if (check_eb_range(eb, start, len))
4006 		return;
4007 
4008 	offset = get_eb_offset_in_page(eb, start);
4009 
4010 	while (len > 0) {
4011 		page = eb->pages[i];
4012 
4013 		cur = min(len, (PAGE_SIZE - offset));
4014 		kaddr = page_address(page);
4015 		memcpy(dst, kaddr + offset, cur);
4016 
4017 		dst += cur;
4018 		len -= cur;
4019 		offset = 0;
4020 		i++;
4021 	}
4022 }
4023 
4024 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4025 				       void __user *dstv,
4026 				       unsigned long start, unsigned long len)
4027 {
4028 	size_t cur;
4029 	size_t offset;
4030 	struct page *page;
4031 	char *kaddr;
4032 	char __user *dst = (char __user *)dstv;
4033 	unsigned long i = get_eb_page_index(start);
4034 	int ret = 0;
4035 
4036 	WARN_ON(start > eb->len);
4037 	WARN_ON(start + len > eb->start + eb->len);
4038 
4039 	offset = get_eb_offset_in_page(eb, start);
4040 
4041 	while (len > 0) {
4042 		page = eb->pages[i];
4043 
4044 		cur = min(len, (PAGE_SIZE - offset));
4045 		kaddr = page_address(page);
4046 		if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4047 			ret = -EFAULT;
4048 			break;
4049 		}
4050 
4051 		dst += cur;
4052 		len -= cur;
4053 		offset = 0;
4054 		i++;
4055 	}
4056 
4057 	return ret;
4058 }
4059 
4060 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4061 			 unsigned long start, unsigned long len)
4062 {
4063 	size_t cur;
4064 	size_t offset;
4065 	struct page *page;
4066 	char *kaddr;
4067 	char *ptr = (char *)ptrv;
4068 	unsigned long i = get_eb_page_index(start);
4069 	int ret = 0;
4070 
4071 	if (check_eb_range(eb, start, len))
4072 		return -EINVAL;
4073 
4074 	offset = get_eb_offset_in_page(eb, start);
4075 
4076 	while (len > 0) {
4077 		page = eb->pages[i];
4078 
4079 		cur = min(len, (PAGE_SIZE - offset));
4080 
4081 		kaddr = page_address(page);
4082 		ret = memcmp(ptr, kaddr + offset, cur);
4083 		if (ret)
4084 			break;
4085 
4086 		ptr += cur;
4087 		len -= cur;
4088 		offset = 0;
4089 		i++;
4090 	}
4091 	return ret;
4092 }
4093 
4094 /*
4095  * Check that the extent buffer is uptodate.
4096  *
4097  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4098  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4099  */
4100 static void assert_eb_page_uptodate(const struct extent_buffer *eb,
4101 				    struct page *page)
4102 {
4103 	struct btrfs_fs_info *fs_info = eb->fs_info;
4104 
4105 	/*
4106 	 * If we are using the commit root we could potentially clear a page
4107 	 * Uptodate while we're using the extent buffer that we've previously
4108 	 * looked up.  We don't want to complain in this case, as the page was
4109 	 * valid before, we just didn't write it out.  Instead we want to catch
4110 	 * the case where we didn't actually read the block properly, which
4111 	 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4112 	 */
4113 	if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4114 		return;
4115 
4116 	if (fs_info->nodesize < PAGE_SIZE) {
4117 		if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page,
4118 							 eb->start, eb->len)))
4119 			btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len);
4120 	} else {
4121 		WARN_ON(!PageUptodate(page));
4122 	}
4123 }
4124 
4125 static void __write_extent_buffer(const struct extent_buffer *eb,
4126 				  const void *srcv, unsigned long start,
4127 				  unsigned long len, bool use_memmove)
4128 {
4129 	size_t cur;
4130 	size_t offset;
4131 	struct page *page;
4132 	char *kaddr;
4133 	char *src = (char *)srcv;
4134 	unsigned long i = get_eb_page_index(start);
4135 	/* For unmapped (dummy) ebs, no need to check their uptodate status. */
4136 	const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4137 
4138 	WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
4139 
4140 	if (check_eb_range(eb, start, len))
4141 		return;
4142 
4143 	offset = get_eb_offset_in_page(eb, start);
4144 
4145 	while (len > 0) {
4146 		page = eb->pages[i];
4147 		if (check_uptodate)
4148 			assert_eb_page_uptodate(eb, page);
4149 
4150 		cur = min(len, PAGE_SIZE - offset);
4151 		kaddr = page_address(page);
4152 		if (use_memmove)
4153 			memmove(kaddr + offset, src, cur);
4154 		else
4155 			memcpy(kaddr + offset, src, cur);
4156 
4157 		src += cur;
4158 		len -= cur;
4159 		offset = 0;
4160 		i++;
4161 	}
4162 }
4163 
4164 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4165 			 unsigned long start, unsigned long len)
4166 {
4167 	return __write_extent_buffer(eb, srcv, start, len, false);
4168 }
4169 
4170 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4171 				 unsigned long start, unsigned long len)
4172 {
4173 	unsigned long cur = start;
4174 
4175 	while (cur < start + len) {
4176 		unsigned long index = get_eb_page_index(cur);
4177 		unsigned int offset = get_eb_offset_in_page(eb, cur);
4178 		unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset);
4179 		struct page *page = eb->pages[index];
4180 
4181 		assert_eb_page_uptodate(eb, page);
4182 		memset(page_address(page) + offset, c, cur_len);
4183 
4184 		cur += cur_len;
4185 	}
4186 }
4187 
4188 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4189 			   unsigned long len)
4190 {
4191 	if (check_eb_range(eb, start, len))
4192 		return;
4193 	return memset_extent_buffer(eb, 0, start, len);
4194 }
4195 
4196 void copy_extent_buffer_full(const struct extent_buffer *dst,
4197 			     const struct extent_buffer *src)
4198 {
4199 	unsigned long cur = 0;
4200 
4201 	ASSERT(dst->len == src->len);
4202 
4203 	while (cur < src->len) {
4204 		unsigned long index = get_eb_page_index(cur);
4205 		unsigned long offset = get_eb_offset_in_page(src, cur);
4206 		unsigned long cur_len = min(src->len, PAGE_SIZE - offset);
4207 		void *addr = page_address(src->pages[index]) + offset;
4208 
4209 		write_extent_buffer(dst, addr, cur, cur_len);
4210 
4211 		cur += cur_len;
4212 	}
4213 }
4214 
4215 void copy_extent_buffer(const struct extent_buffer *dst,
4216 			const struct extent_buffer *src,
4217 			unsigned long dst_offset, unsigned long src_offset,
4218 			unsigned long len)
4219 {
4220 	u64 dst_len = dst->len;
4221 	size_t cur;
4222 	size_t offset;
4223 	struct page *page;
4224 	char *kaddr;
4225 	unsigned long i = get_eb_page_index(dst_offset);
4226 
4227 	if (check_eb_range(dst, dst_offset, len) ||
4228 	    check_eb_range(src, src_offset, len))
4229 		return;
4230 
4231 	WARN_ON(src->len != dst_len);
4232 
4233 	offset = get_eb_offset_in_page(dst, dst_offset);
4234 
4235 	while (len > 0) {
4236 		page = dst->pages[i];
4237 		assert_eb_page_uptodate(dst, page);
4238 
4239 		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
4240 
4241 		kaddr = page_address(page);
4242 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
4243 
4244 		src_offset += cur;
4245 		len -= cur;
4246 		offset = 0;
4247 		i++;
4248 	}
4249 }
4250 
4251 /*
4252  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
4253  * given bit number
4254  * @eb: the extent buffer
4255  * @start: offset of the bitmap item in the extent buffer
4256  * @nr: bit number
4257  * @page_index: return index of the page in the extent buffer that contains the
4258  * given bit number
4259  * @page_offset: return offset into the page given by page_index
4260  *
4261  * This helper hides the ugliness of finding the byte in an extent buffer which
4262  * contains a given bit.
4263  */
4264 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4265 				    unsigned long start, unsigned long nr,
4266 				    unsigned long *page_index,
4267 				    size_t *page_offset)
4268 {
4269 	size_t byte_offset = BIT_BYTE(nr);
4270 	size_t offset;
4271 
4272 	/*
4273 	 * The byte we want is the offset of the extent buffer + the offset of
4274 	 * the bitmap item in the extent buffer + the offset of the byte in the
4275 	 * bitmap item.
4276 	 */
4277 	offset = start + offset_in_page(eb->start) + byte_offset;
4278 
4279 	*page_index = offset >> PAGE_SHIFT;
4280 	*page_offset = offset_in_page(offset);
4281 }
4282 
4283 /*
4284  * Determine whether a bit in a bitmap item is set.
4285  *
4286  * @eb:     the extent buffer
4287  * @start:  offset of the bitmap item in the extent buffer
4288  * @nr:     bit number to test
4289  */
4290 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4291 			   unsigned long nr)
4292 {
4293 	u8 *kaddr;
4294 	struct page *page;
4295 	unsigned long i;
4296 	size_t offset;
4297 
4298 	eb_bitmap_offset(eb, start, nr, &i, &offset);
4299 	page = eb->pages[i];
4300 	assert_eb_page_uptodate(eb, page);
4301 	kaddr = page_address(page);
4302 	return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4303 }
4304 
4305 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4306 {
4307 	unsigned long index = get_eb_page_index(bytenr);
4308 
4309 	if (check_eb_range(eb, bytenr, 1))
4310 		return NULL;
4311 	return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr);
4312 }
4313 
4314 /*
4315  * Set an area of a bitmap to 1.
4316  *
4317  * @eb:     the extent buffer
4318  * @start:  offset of the bitmap item in the extent buffer
4319  * @pos:    bit number of the first bit
4320  * @len:    number of bits to set
4321  */
4322 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4323 			      unsigned long pos, unsigned long len)
4324 {
4325 	unsigned int first_byte = start + BIT_BYTE(pos);
4326 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4327 	const bool same_byte = (first_byte == last_byte);
4328 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4329 	u8 *kaddr;
4330 
4331 	if (same_byte)
4332 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4333 
4334 	/* Handle the first byte. */
4335 	kaddr = extent_buffer_get_byte(eb, first_byte);
4336 	*kaddr |= mask;
4337 	if (same_byte)
4338 		return;
4339 
4340 	/* Handle the byte aligned part. */
4341 	ASSERT(first_byte + 1 <= last_byte);
4342 	memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4343 
4344 	/* Handle the last byte. */
4345 	kaddr = extent_buffer_get_byte(eb, last_byte);
4346 	*kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4347 }
4348 
4349 
4350 /*
4351  * Clear an area of a bitmap.
4352  *
4353  * @eb:     the extent buffer
4354  * @start:  offset of the bitmap item in the extent buffer
4355  * @pos:    bit number of the first bit
4356  * @len:    number of bits to clear
4357  */
4358 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4359 				unsigned long start, unsigned long pos,
4360 				unsigned long len)
4361 {
4362 	unsigned int first_byte = start + BIT_BYTE(pos);
4363 	unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4364 	const bool same_byte = (first_byte == last_byte);
4365 	u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4366 	u8 *kaddr;
4367 
4368 	if (same_byte)
4369 		mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4370 
4371 	/* Handle the first byte. */
4372 	kaddr = extent_buffer_get_byte(eb, first_byte);
4373 	*kaddr &= ~mask;
4374 	if (same_byte)
4375 		return;
4376 
4377 	/* Handle the byte aligned part. */
4378 	ASSERT(first_byte + 1 <= last_byte);
4379 	memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4380 
4381 	/* Handle the last byte. */
4382 	kaddr = extent_buffer_get_byte(eb, last_byte);
4383 	*kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4384 }
4385 
4386 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4387 {
4388 	unsigned long distance = (src > dst) ? src - dst : dst - src;
4389 	return distance < len;
4390 }
4391 
4392 void memcpy_extent_buffer(const struct extent_buffer *dst,
4393 			  unsigned long dst_offset, unsigned long src_offset,
4394 			  unsigned long len)
4395 {
4396 	unsigned long cur_off = 0;
4397 
4398 	if (check_eb_range(dst, dst_offset, len) ||
4399 	    check_eb_range(dst, src_offset, len))
4400 		return;
4401 
4402 	while (cur_off < len) {
4403 		unsigned long cur_src = cur_off + src_offset;
4404 		unsigned long pg_index = get_eb_page_index(cur_src);
4405 		unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
4406 		unsigned long cur_len = min(src_offset + len - cur_src,
4407 					    PAGE_SIZE - pg_off);
4408 		void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
4409 		const bool use_memmove = areas_overlap(src_offset + cur_off,
4410 						       dst_offset + cur_off, cur_len);
4411 
4412 		__write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4413 				      use_memmove);
4414 		cur_off += cur_len;
4415 	}
4416 }
4417 
4418 void memmove_extent_buffer(const struct extent_buffer *dst,
4419 			   unsigned long dst_offset, unsigned long src_offset,
4420 			   unsigned long len)
4421 {
4422 	unsigned long dst_end = dst_offset + len - 1;
4423 	unsigned long src_end = src_offset + len - 1;
4424 
4425 	if (check_eb_range(dst, dst_offset, len) ||
4426 	    check_eb_range(dst, src_offset, len))
4427 		return;
4428 
4429 	if (dst_offset < src_offset) {
4430 		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4431 		return;
4432 	}
4433 
4434 	while (len > 0) {
4435 		unsigned long src_i;
4436 		size_t cur;
4437 		size_t dst_off_in_page;
4438 		size_t src_off_in_page;
4439 		void *src_addr;
4440 		bool use_memmove;
4441 
4442 		src_i = get_eb_page_index(src_end);
4443 
4444 		dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
4445 		src_off_in_page = get_eb_offset_in_page(dst, src_end);
4446 
4447 		cur = min_t(unsigned long, len, src_off_in_page + 1);
4448 		cur = min(cur, dst_off_in_page + 1);
4449 
4450 		src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
4451 					cur + 1;
4452 		use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4453 					    cur);
4454 
4455 		__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4456 				      use_memmove);
4457 
4458 		dst_end -= cur;
4459 		src_end -= cur;
4460 		len -= cur;
4461 	}
4462 }
4463 
4464 #define GANG_LOOKUP_SIZE	16
4465 static struct extent_buffer *get_next_extent_buffer(
4466 		struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4467 {
4468 	struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4469 	struct extent_buffer *found = NULL;
4470 	u64 page_start = page_offset(page);
4471 	u64 cur = page_start;
4472 
4473 	ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4474 	lockdep_assert_held(&fs_info->buffer_lock);
4475 
4476 	while (cur < page_start + PAGE_SIZE) {
4477 		int ret;
4478 		int i;
4479 
4480 		ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4481 				(void **)gang, cur >> fs_info->sectorsize_bits,
4482 				min_t(unsigned int, GANG_LOOKUP_SIZE,
4483 				      PAGE_SIZE / fs_info->nodesize));
4484 		if (ret == 0)
4485 			goto out;
4486 		for (i = 0; i < ret; i++) {
4487 			/* Already beyond page end */
4488 			if (gang[i]->start >= page_start + PAGE_SIZE)
4489 				goto out;
4490 			/* Found one */
4491 			if (gang[i]->start >= bytenr) {
4492 				found = gang[i];
4493 				goto out;
4494 			}
4495 		}
4496 		cur = gang[ret - 1]->start + gang[ret - 1]->len;
4497 	}
4498 out:
4499 	return found;
4500 }
4501 
4502 static int try_release_subpage_extent_buffer(struct page *page)
4503 {
4504 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4505 	u64 cur = page_offset(page);
4506 	const u64 end = page_offset(page) + PAGE_SIZE;
4507 	int ret;
4508 
4509 	while (cur < end) {
4510 		struct extent_buffer *eb = NULL;
4511 
4512 		/*
4513 		 * Unlike try_release_extent_buffer() which uses page->private
4514 		 * to grab buffer, for subpage case we rely on radix tree, thus
4515 		 * we need to ensure radix tree consistency.
4516 		 *
4517 		 * We also want an atomic snapshot of the radix tree, thus go
4518 		 * with spinlock rather than RCU.
4519 		 */
4520 		spin_lock(&fs_info->buffer_lock);
4521 		eb = get_next_extent_buffer(fs_info, page, cur);
4522 		if (!eb) {
4523 			/* No more eb in the page range after or at cur */
4524 			spin_unlock(&fs_info->buffer_lock);
4525 			break;
4526 		}
4527 		cur = eb->start + eb->len;
4528 
4529 		/*
4530 		 * The same as try_release_extent_buffer(), to ensure the eb
4531 		 * won't disappear out from under us.
4532 		 */
4533 		spin_lock(&eb->refs_lock);
4534 		if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4535 			spin_unlock(&eb->refs_lock);
4536 			spin_unlock(&fs_info->buffer_lock);
4537 			break;
4538 		}
4539 		spin_unlock(&fs_info->buffer_lock);
4540 
4541 		/*
4542 		 * If tree ref isn't set then we know the ref on this eb is a
4543 		 * real ref, so just return, this eb will likely be freed soon
4544 		 * anyway.
4545 		 */
4546 		if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4547 			spin_unlock(&eb->refs_lock);
4548 			break;
4549 		}
4550 
4551 		/*
4552 		 * Here we don't care about the return value, we will always
4553 		 * check the page private at the end.  And
4554 		 * release_extent_buffer() will release the refs_lock.
4555 		 */
4556 		release_extent_buffer(eb);
4557 	}
4558 	/*
4559 	 * Finally to check if we have cleared page private, as if we have
4560 	 * released all ebs in the page, the page private should be cleared now.
4561 	 */
4562 	spin_lock(&page->mapping->private_lock);
4563 	if (!PagePrivate(page))
4564 		ret = 1;
4565 	else
4566 		ret = 0;
4567 	spin_unlock(&page->mapping->private_lock);
4568 	return ret;
4569 
4570 }
4571 
4572 int try_release_extent_buffer(struct page *page)
4573 {
4574 	struct extent_buffer *eb;
4575 
4576 	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4577 		return try_release_subpage_extent_buffer(page);
4578 
4579 	/*
4580 	 * We need to make sure nobody is changing page->private, as we rely on
4581 	 * page->private as the pointer to extent buffer.
4582 	 */
4583 	spin_lock(&page->mapping->private_lock);
4584 	if (!PagePrivate(page)) {
4585 		spin_unlock(&page->mapping->private_lock);
4586 		return 1;
4587 	}
4588 
4589 	eb = (struct extent_buffer *)page->private;
4590 	BUG_ON(!eb);
4591 
4592 	/*
4593 	 * This is a little awful but should be ok, we need to make sure that
4594 	 * the eb doesn't disappear out from under us while we're looking at
4595 	 * this page.
4596 	 */
4597 	spin_lock(&eb->refs_lock);
4598 	if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4599 		spin_unlock(&eb->refs_lock);
4600 		spin_unlock(&page->mapping->private_lock);
4601 		return 0;
4602 	}
4603 	spin_unlock(&page->mapping->private_lock);
4604 
4605 	/*
4606 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
4607 	 * so just return, this page will likely be freed soon anyway.
4608 	 */
4609 	if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4610 		spin_unlock(&eb->refs_lock);
4611 		return 0;
4612 	}
4613 
4614 	return release_extent_buffer(eb);
4615 }
4616 
4617 /*
4618  * btrfs_readahead_tree_block - attempt to readahead a child block
4619  * @fs_info:	the fs_info
4620  * @bytenr:	bytenr to read
4621  * @owner_root: objectid of the root that owns this eb
4622  * @gen:	generation for the uptodate check, can be 0
4623  * @level:	level for the eb
4624  *
4625  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4626  * normal uptodate check of the eb, without checking the generation.  If we have
4627  * to read the block we will not block on anything.
4628  */
4629 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4630 				u64 bytenr, u64 owner_root, u64 gen, int level)
4631 {
4632 	struct btrfs_tree_parent_check check = {
4633 		.has_first_key = 0,
4634 		.level = level,
4635 		.transid = gen
4636 	};
4637 	struct extent_buffer *eb;
4638 	int ret;
4639 
4640 	eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4641 	if (IS_ERR(eb))
4642 		return;
4643 
4644 	if (btrfs_buffer_uptodate(eb, gen, 1)) {
4645 		free_extent_buffer(eb);
4646 		return;
4647 	}
4648 
4649 	ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4650 	if (ret < 0)
4651 		free_extent_buffer_stale(eb);
4652 	else
4653 		free_extent_buffer(eb);
4654 }
4655 
4656 /*
4657  * btrfs_readahead_node_child - readahead a node's child block
4658  * @node:	parent node we're reading from
4659  * @slot:	slot in the parent node for the child we want to read
4660  *
4661  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4662  * the slot in the node provided.
4663  */
4664 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4665 {
4666 	btrfs_readahead_tree_block(node->fs_info,
4667 				   btrfs_node_blockptr(node, slot),
4668 				   btrfs_header_owner(node),
4669 				   btrfs_node_ptr_generation(node, slot),
4670 				   btrfs_header_level(node) - 1);
4671 }
4672