1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "bio.h"
23 #include "locking.h"
24 #include "backref.h"
25 #include "disk-io.h"
26 #include "subpage.h"
27 #include "zoned.h"
28 #include "block-group.h"
29 #include "compression.h"
30 #include "fs.h"
31 #include "accessors.h"
32 #include "file-item.h"
33 #include "file.h"
34 #include "dev-replace.h"
35 #include "super.h"
36 #include "transaction.h"
37
38 static struct kmem_cache *extent_buffer_cache;
39
40 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42 {
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49 }
50
btrfs_leak_debug_del_eb(struct extent_buffer * eb)51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52 {
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59 }
60
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62 {
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 btrfs_err(fs_info,
79 "buffer leak start %llu len %u refs %d bflags %lu owner %llu",
80 eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87 }
88 #else
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
91 #endif
92
93 /*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97 struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 /* Last byte contained in bbio + 1 . */
100 loff_t next_file_offset;
101 enum btrfs_compression_type compress_type;
102 u32 len_to_oe_boundary;
103 blk_opf_t opf;
104 btrfs_bio_end_io_t end_io_func;
105 struct writeback_control *wbc;
106
107 /*
108 * The sectors of the page which are going to be submitted by
109 * extent_writepage_io().
110 * This is to avoid touching ranges covered by compression/inline.
111 */
112 unsigned long submit_bitmap;
113 struct readahead_control *ractl;
114 };
115
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)116 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
117 {
118 struct btrfs_bio *bbio = bio_ctrl->bbio;
119
120 if (!bbio)
121 return;
122
123 /* Caller should ensure the bio has at least some range added */
124 ASSERT(bbio->bio.bi_iter.bi_size);
125
126 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
127 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
128 btrfs_submit_compressed_read(bbio);
129 else
130 btrfs_submit_bbio(bbio, 0);
131
132 /* The bbio is owned by the end_io handler now */
133 bio_ctrl->bbio = NULL;
134 }
135
136 /*
137 * Submit or fail the current bio in the bio_ctrl structure.
138 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)139 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
140 {
141 struct btrfs_bio *bbio = bio_ctrl->bbio;
142
143 if (!bbio)
144 return;
145
146 if (ret) {
147 ASSERT(ret < 0);
148 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
149 /* The bio is owned by the end_io handler now */
150 bio_ctrl->bbio = NULL;
151 } else {
152 submit_one_bio(bio_ctrl);
153 }
154 }
155
extent_buffer_init_cachep(void)156 int __init extent_buffer_init_cachep(void)
157 {
158 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
159 sizeof(struct extent_buffer), 0, 0,
160 NULL);
161 if (!extent_buffer_cache)
162 return -ENOMEM;
163
164 return 0;
165 }
166
extent_buffer_free_cachep(void)167 void __cold extent_buffer_free_cachep(void)
168 {
169 /*
170 * Make sure all delayed rcu free are flushed before we
171 * destroy caches.
172 */
173 rcu_barrier();
174 kmem_cache_destroy(extent_buffer_cache);
175 }
176
process_one_folio(struct btrfs_fs_info * fs_info,struct folio * folio,const struct folio * locked_folio,unsigned long page_ops,u64 start,u64 end)177 static void process_one_folio(struct btrfs_fs_info *fs_info,
178 struct folio *folio, const struct folio *locked_folio,
179 unsigned long page_ops, u64 start, u64 end)
180 {
181 u32 len;
182
183 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
184 len = end + 1 - start;
185
186 if (page_ops & PAGE_SET_ORDERED)
187 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
188 if (page_ops & PAGE_START_WRITEBACK) {
189 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
190 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
191 }
192 if (page_ops & PAGE_END_WRITEBACK)
193 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
194
195 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
196 btrfs_folio_end_lock(fs_info, folio, start, len);
197 }
198
__process_folios_contig(struct address_space * mapping,const struct folio * locked_folio,u64 start,u64 end,unsigned long page_ops)199 static void __process_folios_contig(struct address_space *mapping,
200 const struct folio *locked_folio, u64 start,
201 u64 end, unsigned long page_ops)
202 {
203 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
204 pgoff_t index = start >> PAGE_SHIFT;
205 pgoff_t end_index = end >> PAGE_SHIFT;
206 struct folio_batch fbatch;
207 int i;
208
209 folio_batch_init(&fbatch);
210 while (index <= end_index) {
211 int found_folios;
212
213 found_folios = filemap_get_folios_contig(mapping, &index,
214 end_index, &fbatch);
215 for (i = 0; i < found_folios; i++) {
216 struct folio *folio = fbatch.folios[i];
217
218 process_one_folio(fs_info, folio, locked_folio,
219 page_ops, start, end);
220 }
221 folio_batch_release(&fbatch);
222 cond_resched();
223 }
224 }
225
unlock_delalloc_folio(const struct inode * inode,struct folio * locked_folio,u64 start,u64 end)226 static noinline void unlock_delalloc_folio(const struct inode *inode,
227 struct folio *locked_folio,
228 u64 start, u64 end)
229 {
230 ASSERT(locked_folio);
231
232 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
233 PAGE_UNLOCK);
234 }
235
lock_delalloc_folios(struct inode * inode,struct folio * locked_folio,u64 start,u64 end)236 static noinline int lock_delalloc_folios(struct inode *inode,
237 struct folio *locked_folio,
238 u64 start, u64 end)
239 {
240 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
241 struct address_space *mapping = inode->i_mapping;
242 pgoff_t index = start >> PAGE_SHIFT;
243 pgoff_t end_index = end >> PAGE_SHIFT;
244 u64 processed_end = start;
245 struct folio_batch fbatch;
246
247 folio_batch_init(&fbatch);
248 while (index <= end_index) {
249 unsigned int found_folios, i;
250
251 found_folios = filemap_get_folios_contig(mapping, &index,
252 end_index, &fbatch);
253 if (found_folios == 0)
254 goto out;
255
256 for (i = 0; i < found_folios; i++) {
257 struct folio *folio = fbatch.folios[i];
258 u64 range_start;
259 u32 range_len;
260
261 if (folio == locked_folio)
262 continue;
263
264 folio_lock(folio);
265 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
266 folio_unlock(folio);
267 goto out;
268 }
269 range_start = max_t(u64, folio_pos(folio), start);
270 range_len = min_t(u64, folio_end(folio), end + 1) - range_start;
271 btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
272
273 processed_end = range_start + range_len - 1;
274 }
275 folio_batch_release(&fbatch);
276 cond_resched();
277 }
278
279 return 0;
280 out:
281 folio_batch_release(&fbatch);
282 if (processed_end > start)
283 unlock_delalloc_folio(inode, locked_folio, start, processed_end);
284 return -EAGAIN;
285 }
286
287 /*
288 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
289 * more than @max_bytes.
290 *
291 * @start: The original start bytenr to search.
292 * Will store the extent range start bytenr.
293 * @end: The original end bytenr of the search range
294 * Will store the extent range end bytenr.
295 *
296 * Return true if we find a delalloc range which starts inside the original
297 * range, and @start/@end will store the delalloc range start/end.
298 *
299 * Return false if we can't find any delalloc range which starts inside the
300 * original range, and @start/@end will be the non-delalloc range start/end.
301 */
302 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct folio * locked_folio,u64 * start,u64 * end)303 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
304 struct folio *locked_folio,
305 u64 *start, u64 *end)
306 {
307 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
308 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
309 const u64 orig_start = *start;
310 const u64 orig_end = *end;
311 /* The sanity tests may not set a valid fs_info. */
312 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
313 u64 delalloc_start;
314 u64 delalloc_end;
315 bool found;
316 struct extent_state *cached_state = NULL;
317 int ret;
318 int loops = 0;
319
320 /* Caller should pass a valid @end to indicate the search range end */
321 ASSERT(orig_end > orig_start);
322
323 /* The range should at least cover part of the folio */
324 ASSERT(!(orig_start >= folio_end(locked_folio) ||
325 orig_end <= folio_pos(locked_folio)));
326 again:
327 /* step one, find a bunch of delalloc bytes starting at start */
328 delalloc_start = *start;
329 delalloc_end = 0;
330 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
331 max_bytes, &cached_state);
332 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
333 *start = delalloc_start;
334
335 /* @delalloc_end can be -1, never go beyond @orig_end */
336 *end = min(delalloc_end, orig_end);
337 btrfs_free_extent_state(cached_state);
338 return false;
339 }
340
341 /*
342 * start comes from the offset of locked_folio. We have to lock
343 * folios in order, so we can't process delalloc bytes before
344 * locked_folio
345 */
346 if (delalloc_start < *start)
347 delalloc_start = *start;
348
349 /*
350 * make sure to limit the number of folios we try to lock down
351 */
352 if (delalloc_end + 1 - delalloc_start > max_bytes)
353 delalloc_end = delalloc_start + max_bytes - 1;
354
355 /* step two, lock all the folioss after the folios that has start */
356 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
357 delalloc_end);
358 ASSERT(!ret || ret == -EAGAIN);
359 if (ret == -EAGAIN) {
360 /* some of the folios are gone, lets avoid looping by
361 * shortening the size of the delalloc range we're searching
362 */
363 btrfs_free_extent_state(cached_state);
364 cached_state = NULL;
365 if (!loops) {
366 max_bytes = PAGE_SIZE;
367 loops = 1;
368 goto again;
369 } else {
370 found = false;
371 goto out_failed;
372 }
373 }
374
375 /* step three, lock the state bits for the whole range */
376 btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
377
378 /* then test to make sure it is all still delalloc */
379 ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end,
380 EXTENT_DELALLOC, cached_state);
381
382 btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
383 if (!ret) {
384 unlock_delalloc_folio(inode, locked_folio, delalloc_start,
385 delalloc_end);
386 cond_resched();
387 goto again;
388 }
389 *start = delalloc_start;
390 *end = delalloc_end;
391 out_failed:
392 return found;
393 }
394
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,const struct folio * locked_folio,struct extent_state ** cached,u32 clear_bits,unsigned long page_ops)395 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
396 const struct folio *locked_folio,
397 struct extent_state **cached,
398 u32 clear_bits, unsigned long page_ops)
399 {
400 btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
401
402 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
403 end, page_ops);
404 }
405
btrfs_verify_folio(struct folio * folio,u64 start,u32 len)406 static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
407 {
408 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
409
410 if (!fsverity_active(folio->mapping->host) ||
411 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
412 start >= i_size_read(folio->mapping->host))
413 return true;
414 return fsverity_verify_folio(folio);
415 }
416
end_folio_read(struct folio * folio,bool uptodate,u64 start,u32 len)417 static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
418 {
419 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
420
421 ASSERT(folio_pos(folio) <= start &&
422 start + len <= folio_end(folio));
423
424 if (uptodate && btrfs_verify_folio(folio, start, len))
425 btrfs_folio_set_uptodate(fs_info, folio, start, len);
426 else
427 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
428
429 if (!btrfs_is_subpage(fs_info, folio))
430 folio_unlock(folio);
431 else
432 btrfs_folio_end_lock(fs_info, folio, start, len);
433 }
434
435 /*
436 * After a write IO is done, we need to:
437 *
438 * - clear the uptodate bits on error
439 * - clear the writeback bits in the extent tree for the range
440 * - filio_end_writeback() if there is no more pending io for the folio
441 *
442 * Scheduling is not allowed, so the extent state tree is expected
443 * to have one and only one object corresponding to this IO.
444 */
end_bbio_data_write(struct btrfs_bio * bbio)445 static void end_bbio_data_write(struct btrfs_bio *bbio)
446 {
447 struct btrfs_fs_info *fs_info = bbio->fs_info;
448 struct bio *bio = &bbio->bio;
449 int error = blk_status_to_errno(bio->bi_status);
450 struct folio_iter fi;
451 const u32 sectorsize = fs_info->sectorsize;
452
453 ASSERT(!bio_flagged(bio, BIO_CLONED));
454 bio_for_each_folio_all(fi, bio) {
455 struct folio *folio = fi.folio;
456 u64 start = folio_pos(folio) + fi.offset;
457 u32 len = fi.length;
458
459 /* Our read/write should always be sector aligned. */
460 if (!IS_ALIGNED(fi.offset, sectorsize))
461 btrfs_err(fs_info,
462 "partial page write in btrfs with offset %zu and length %zu",
463 fi.offset, fi.length);
464 else if (!IS_ALIGNED(fi.length, sectorsize))
465 btrfs_info(fs_info,
466 "incomplete page write with offset %zu and length %zu",
467 fi.offset, fi.length);
468
469 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
470 !error);
471 if (error)
472 mapping_set_error(folio->mapping, error);
473 btrfs_folio_clear_writeback(fs_info, folio, start, len);
474 }
475
476 bio_put(bio);
477 }
478
begin_folio_read(struct btrfs_fs_info * fs_info,struct folio * folio)479 static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
480 {
481 ASSERT(folio_test_locked(folio));
482 if (!btrfs_is_subpage(fs_info, folio))
483 return;
484
485 ASSERT(folio_test_private(folio));
486 btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio));
487 }
488
489 /*
490 * After a data read IO is done, we need to:
491 *
492 * - clear the uptodate bits on error
493 * - set the uptodate bits if things worked
494 * - set the folio up to date if all extents in the tree are uptodate
495 * - clear the lock bit in the extent tree
496 * - unlock the folio if there are no other extents locked for it
497 *
498 * Scheduling is not allowed, so the extent state tree is expected
499 * to have one and only one object corresponding to this IO.
500 */
end_bbio_data_read(struct btrfs_bio * bbio)501 static void end_bbio_data_read(struct btrfs_bio *bbio)
502 {
503 struct btrfs_fs_info *fs_info = bbio->fs_info;
504 struct bio *bio = &bbio->bio;
505 struct folio_iter fi;
506
507 ASSERT(!bio_flagged(bio, BIO_CLONED));
508 bio_for_each_folio_all(fi, &bbio->bio) {
509 bool uptodate = !bio->bi_status;
510 struct folio *folio = fi.folio;
511 struct inode *inode = folio->mapping->host;
512 u64 start = folio_pos(folio) + fi.offset;
513
514 btrfs_debug(fs_info,
515 "%s: bi_sector=%llu, err=%d, mirror=%u",
516 __func__, bio->bi_iter.bi_sector, bio->bi_status,
517 bbio->mirror_num);
518
519
520 if (likely(uptodate)) {
521 u64 end = start + fi.length - 1;
522 loff_t i_size = i_size_read(inode);
523
524 /*
525 * Zero out the remaining part if this range straddles
526 * i_size.
527 *
528 * Here we should only zero the range inside the folio,
529 * not touch anything else.
530 *
531 * NOTE: i_size is exclusive while end is inclusive and
532 * folio_contains() takes PAGE_SIZE units.
533 */
534 if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
535 i_size <= end) {
536 u32 zero_start = max(offset_in_folio(folio, i_size),
537 offset_in_folio(folio, start));
538 u32 zero_len = offset_in_folio(folio, end) + 1 -
539 zero_start;
540
541 folio_zero_range(folio, zero_start, zero_len);
542 }
543 }
544
545 /* Update page status and unlock. */
546 end_folio_read(folio, uptodate, start, fi.length);
547 }
548 bio_put(bio);
549 }
550
551 /*
552 * Populate every free slot in a provided array with folios using GFP_NOFS.
553 *
554 * @nr_folios: number of folios to allocate
555 * @folio_array: the array to fill with folios; any existing non-NULL entries in
556 * the array will be skipped
557 *
558 * Return: 0 if all folios were able to be allocated;
559 * -ENOMEM otherwise, the partially allocated folios would be freed and
560 * the array slots zeroed
561 */
btrfs_alloc_folio_array(unsigned int nr_folios,struct folio ** folio_array)562 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
563 {
564 for (int i = 0; i < nr_folios; i++) {
565 if (folio_array[i])
566 continue;
567 folio_array[i] = folio_alloc(GFP_NOFS, 0);
568 if (!folio_array[i])
569 goto error;
570 }
571 return 0;
572 error:
573 for (int i = 0; i < nr_folios; i++) {
574 if (folio_array[i])
575 folio_put(folio_array[i]);
576 }
577 return -ENOMEM;
578 }
579
580 /*
581 * Populate every free slot in a provided array with pages, using GFP_NOFS.
582 *
583 * @nr_pages: number of pages to allocate
584 * @page_array: the array to fill with pages; any existing non-null entries in
585 * the array will be skipped
586 * @nofail: whether using __GFP_NOFAIL flag
587 *
588 * Return: 0 if all pages were able to be allocated;
589 * -ENOMEM otherwise, the partially allocated pages would be freed and
590 * the array slots zeroed
591 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array,bool nofail)592 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
593 bool nofail)
594 {
595 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
596 unsigned int allocated;
597
598 for (allocated = 0; allocated < nr_pages;) {
599 unsigned int last = allocated;
600
601 allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
602 if (unlikely(allocated == last)) {
603 /* No progress, fail and do cleanup. */
604 for (int i = 0; i < allocated; i++) {
605 __free_page(page_array[i]);
606 page_array[i] = NULL;
607 }
608 return -ENOMEM;
609 }
610 }
611 return 0;
612 }
613
614 /*
615 * Populate needed folios for the extent buffer.
616 *
617 * For now, the folios populated are always in order 0 (aka, single page).
618 */
alloc_eb_folio_array(struct extent_buffer * eb,bool nofail)619 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
620 {
621 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
622 int num_pages = num_extent_pages(eb);
623 int ret;
624
625 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
626 if (ret < 0)
627 return ret;
628
629 for (int i = 0; i < num_pages; i++)
630 eb->folios[i] = page_folio(page_array[i]);
631 eb->folio_size = PAGE_SIZE;
632 eb->folio_shift = PAGE_SHIFT;
633 return 0;
634 }
635
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,loff_t file_offset)636 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
637 u64 disk_bytenr, loff_t file_offset)
638 {
639 struct bio *bio = &bio_ctrl->bbio->bio;
640 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
641
642 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
643 /*
644 * For compression, all IO should have its logical bytenr set
645 * to the starting bytenr of the compressed extent.
646 */
647 return bio->bi_iter.bi_sector == sector;
648 }
649
650 /*
651 * To merge into a bio both the disk sector and the logical offset in
652 * the file need to be contiguous.
653 */
654 return bio_ctrl->next_file_offset == file_offset &&
655 bio_end_sector(bio) == sector;
656 }
657
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)658 static void alloc_new_bio(struct btrfs_inode *inode,
659 struct btrfs_bio_ctrl *bio_ctrl,
660 u64 disk_bytenr, u64 file_offset)
661 {
662 struct btrfs_fs_info *fs_info = inode->root->fs_info;
663 struct btrfs_bio *bbio;
664
665 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
666 bio_ctrl->end_io_func, NULL);
667 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
668 bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
669 bbio->inode = inode;
670 bbio->file_offset = file_offset;
671 bio_ctrl->bbio = bbio;
672 bio_ctrl->len_to_oe_boundary = U32_MAX;
673 bio_ctrl->next_file_offset = file_offset;
674
675 /* Limit data write bios to the ordered boundary. */
676 if (bio_ctrl->wbc) {
677 struct btrfs_ordered_extent *ordered;
678
679 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
680 if (ordered) {
681 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
682 ordered->file_offset +
683 ordered->disk_num_bytes - file_offset);
684 bbio->ordered = ordered;
685 }
686
687 /*
688 * Pick the last added device to support cgroup writeback. For
689 * multi-device file systems this means blk-cgroup policies have
690 * to always be set on the last added/replaced device.
691 * This is a bit odd but has been like that for a long time.
692 */
693 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
694 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
695 }
696 }
697
698 /*
699 * @disk_bytenr: logical bytenr where the write will be
700 * @page: page to add to the bio
701 * @size: portion of page that we want to write to
702 * @pg_offset: offset of the new bio or to check whether we are adding
703 * a contiguous page to the previous one
704 *
705 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
706 * new one in @bio_ctrl->bbio.
707 * The mirror number for this IO should already be initizlied in
708 * @bio_ctrl->mirror_num.
709 */
submit_extent_folio(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct folio * folio,size_t size,unsigned long pg_offset)710 static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
711 u64 disk_bytenr, struct folio *folio,
712 size_t size, unsigned long pg_offset)
713 {
714 struct btrfs_inode *inode = folio_to_inode(folio);
715 loff_t file_offset = folio_pos(folio) + pg_offset;
716
717 ASSERT(pg_offset + size <= folio_size(folio));
718 ASSERT(bio_ctrl->end_io_func);
719
720 if (bio_ctrl->bbio &&
721 !btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset))
722 submit_one_bio(bio_ctrl);
723
724 do {
725 u32 len = size;
726
727 /* Allocate new bio if needed */
728 if (!bio_ctrl->bbio)
729 alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset);
730
731 /* Cap to the current ordered extent boundary if there is one. */
732 if (len > bio_ctrl->len_to_oe_boundary) {
733 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
734 ASSERT(is_data_inode(inode));
735 len = bio_ctrl->len_to_oe_boundary;
736 }
737
738 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
739 /* bio full: move on to a new one */
740 submit_one_bio(bio_ctrl);
741 continue;
742 }
743 bio_ctrl->next_file_offset += len;
744
745 if (bio_ctrl->wbc)
746 wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len);
747
748 size -= len;
749 pg_offset += len;
750 disk_bytenr += len;
751 file_offset += len;
752
753 /*
754 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
755 * sector aligned. alloc_new_bio() then sets it to the end of
756 * our ordered extent for writes into zoned devices.
757 *
758 * When len_to_oe_boundary is tracking an ordered extent, we
759 * trust the ordered extent code to align things properly, and
760 * the check above to cap our write to the ordered extent
761 * boundary is correct.
762 *
763 * When len_to_oe_boundary is U32_MAX, the cap above would
764 * result in a 4095 byte IO for the last folio right before
765 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
766 * the checks required to make sure we don't overflow the bio,
767 * and we should just ignore len_to_oe_boundary completely
768 * unless we're using it to track an ordered extent.
769 *
770 * It's pretty hard to make a bio sized U32_MAX, but it can
771 * happen when the page cache is able to feed us contiguous
772 * folios for large extents.
773 */
774 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
775 bio_ctrl->len_to_oe_boundary -= len;
776
777 /* Ordered extent boundary: move on to a new bio. */
778 if (bio_ctrl->len_to_oe_boundary == 0)
779 submit_one_bio(bio_ctrl);
780 } while (size);
781 }
782
attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_folio_state * prealloc)783 static int attach_extent_buffer_folio(struct extent_buffer *eb,
784 struct folio *folio,
785 struct btrfs_folio_state *prealloc)
786 {
787 struct btrfs_fs_info *fs_info = eb->fs_info;
788 int ret = 0;
789
790 /*
791 * If the page is mapped to btree inode, we should hold the private
792 * lock to prevent race.
793 * For cloned or dummy extent buffers, their pages are not mapped and
794 * will not race with any other ebs.
795 */
796 if (folio->mapping)
797 lockdep_assert_held(&folio->mapping->i_private_lock);
798
799 if (!btrfs_meta_is_subpage(fs_info)) {
800 if (!folio_test_private(folio))
801 folio_attach_private(folio, eb);
802 else
803 WARN_ON(folio_get_private(folio) != eb);
804 return 0;
805 }
806
807 /* Already mapped, just free prealloc */
808 if (folio_test_private(folio)) {
809 btrfs_free_folio_state(prealloc);
810 return 0;
811 }
812
813 if (prealloc)
814 /* Has preallocated memory for subpage */
815 folio_attach_private(folio, prealloc);
816 else
817 /* Do new allocation to attach subpage */
818 ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
819 return ret;
820 }
821
set_folio_extent_mapped(struct folio * folio)822 int set_folio_extent_mapped(struct folio *folio)
823 {
824 struct btrfs_fs_info *fs_info;
825
826 ASSERT(folio->mapping);
827
828 if (folio_test_private(folio))
829 return 0;
830
831 fs_info = folio_to_fs_info(folio);
832
833 if (btrfs_is_subpage(fs_info, folio))
834 return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
835
836 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
837 return 0;
838 }
839
clear_folio_extent_mapped(struct folio * folio)840 void clear_folio_extent_mapped(struct folio *folio)
841 {
842 struct btrfs_fs_info *fs_info;
843
844 ASSERT(folio->mapping);
845
846 if (!folio_test_private(folio))
847 return;
848
849 fs_info = folio_to_fs_info(folio);
850 if (btrfs_is_subpage(fs_info, folio))
851 return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA);
852
853 folio_detach_private(folio);
854 }
855
get_extent_map(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len,struct extent_map ** em_cached)856 static struct extent_map *get_extent_map(struct btrfs_inode *inode,
857 struct folio *folio, u64 start,
858 u64 len, struct extent_map **em_cached)
859 {
860 struct extent_map *em;
861
862 ASSERT(em_cached);
863
864 if (*em_cached) {
865 em = *em_cached;
866 if (btrfs_extent_map_in_tree(em) && start >= em->start &&
867 start < btrfs_extent_map_end(em)) {
868 refcount_inc(&em->refs);
869 return em;
870 }
871
872 btrfs_free_extent_map(em);
873 *em_cached = NULL;
874 }
875
876 em = btrfs_get_extent(inode, folio, start, len);
877 if (!IS_ERR(em)) {
878 BUG_ON(*em_cached);
879 refcount_inc(&em->refs);
880 *em_cached = em;
881 }
882
883 return em;
884 }
885
btrfs_readahead_expand(struct readahead_control * ractl,const struct extent_map * em)886 static void btrfs_readahead_expand(struct readahead_control *ractl,
887 const struct extent_map *em)
888 {
889 const u64 ra_pos = readahead_pos(ractl);
890 const u64 ra_end = ra_pos + readahead_length(ractl);
891 const u64 em_end = em->start + em->ram_bytes;
892
893 /* No expansion for holes and inline extents. */
894 if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
895 return;
896
897 ASSERT(em_end >= ra_pos,
898 "extent_map %llu %llu ends before current readahead position %llu",
899 em->start, em->len, ra_pos);
900 if (em_end > ra_end)
901 readahead_expand(ractl, ra_pos, em_end - ra_pos);
902 }
903
904 /*
905 * basic readpage implementation. Locked extent state structs are inserted
906 * into the tree that are removed when the IO is done (by the end_io
907 * handlers)
908 * XXX JDM: This needs looking at to ensure proper page locking
909 * return 0 on success, otherwise return error
910 */
btrfs_do_readpage(struct folio * folio,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)911 static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
912 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
913 {
914 struct inode *inode = folio->mapping->host;
915 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
916 u64 start = folio_pos(folio);
917 const u64 end = start + folio_size(folio) - 1;
918 u64 extent_offset;
919 u64 last_byte = i_size_read(inode);
920 struct extent_map *em;
921 int ret = 0;
922 const size_t blocksize = fs_info->sectorsize;
923
924 ret = set_folio_extent_mapped(folio);
925 if (ret < 0) {
926 folio_unlock(folio);
927 return ret;
928 }
929
930 if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
931 size_t zero_offset = offset_in_folio(folio, last_byte);
932
933 if (zero_offset)
934 folio_zero_range(folio, zero_offset,
935 folio_size(folio) - zero_offset);
936 }
937 bio_ctrl->end_io_func = end_bbio_data_read;
938 begin_folio_read(fs_info, folio);
939 for (u64 cur = start; cur <= end; cur += blocksize) {
940 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
941 unsigned long pg_offset = offset_in_folio(folio, cur);
942 bool force_bio_submit = false;
943 u64 disk_bytenr;
944 u64 block_start;
945
946 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
947 if (cur >= last_byte) {
948 folio_zero_range(folio, pg_offset, end - cur + 1);
949 end_folio_read(folio, true, cur, end - cur + 1);
950 break;
951 }
952 if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
953 end_folio_read(folio, true, cur, blocksize);
954 continue;
955 }
956 em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
957 if (IS_ERR(em)) {
958 end_folio_read(folio, false, cur, end + 1 - cur);
959 return PTR_ERR(em);
960 }
961 extent_offset = cur - em->start;
962 BUG_ON(btrfs_extent_map_end(em) <= cur);
963 BUG_ON(end < cur);
964
965 compress_type = btrfs_extent_map_compression(em);
966
967 /*
968 * Only expand readahead for extents which are already creating
969 * the pages anyway in add_ra_bio_pages, which is compressed
970 * extents in the non subpage case.
971 */
972 if (bio_ctrl->ractl &&
973 !btrfs_is_subpage(fs_info, folio) &&
974 compress_type != BTRFS_COMPRESS_NONE)
975 btrfs_readahead_expand(bio_ctrl->ractl, em);
976
977 if (compress_type != BTRFS_COMPRESS_NONE)
978 disk_bytenr = em->disk_bytenr;
979 else
980 disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
981
982 if (em->flags & EXTENT_FLAG_PREALLOC)
983 block_start = EXTENT_MAP_HOLE;
984 else
985 block_start = btrfs_extent_map_block_start(em);
986
987 /*
988 * If we have a file range that points to a compressed extent
989 * and it's followed by a consecutive file range that points
990 * to the same compressed extent (possibly with a different
991 * offset and/or length, so it either points to the whole extent
992 * or only part of it), we must make sure we do not submit a
993 * single bio to populate the folios for the 2 ranges because
994 * this makes the compressed extent read zero out the folios
995 * belonging to the 2nd range. Imagine the following scenario:
996 *
997 * File layout
998 * [0 - 8K] [8K - 24K]
999 * | |
1000 * | |
1001 * points to extent X, points to extent X,
1002 * offset 4K, length of 8K offset 0, length 16K
1003 *
1004 * [extent X, compressed length = 4K uncompressed length = 16K]
1005 *
1006 * If the bio to read the compressed extent covers both ranges,
1007 * it will decompress extent X into the folios belonging to the
1008 * first range and then it will stop, zeroing out the remaining
1009 * folios that belong to the other range that points to extent X.
1010 * So here we make sure we submit 2 bios, one for the first
1011 * range and another one for the third range. Both will target
1012 * the same physical extent from disk, but we can't currently
1013 * make the compressed bio endio callback populate the folios
1014 * for both ranges because each compressed bio is tightly
1015 * coupled with a single extent map, and each range can have
1016 * an extent map with a different offset value relative to the
1017 * uncompressed data of our extent and different lengths. This
1018 * is a corner case so we prioritize correctness over
1019 * non-optimal behavior (submitting 2 bios for the same extent).
1020 */
1021 if (compress_type != BTRFS_COMPRESS_NONE &&
1022 prev_em_start && *prev_em_start != (u64)-1 &&
1023 *prev_em_start != em->start)
1024 force_bio_submit = true;
1025
1026 if (prev_em_start)
1027 *prev_em_start = em->start;
1028
1029 btrfs_free_extent_map(em);
1030 em = NULL;
1031
1032 /* we've found a hole, just zero and go on */
1033 if (block_start == EXTENT_MAP_HOLE) {
1034 folio_zero_range(folio, pg_offset, blocksize);
1035 end_folio_read(folio, true, cur, blocksize);
1036 continue;
1037 }
1038 /* the get_extent function already copied into the folio */
1039 if (block_start == EXTENT_MAP_INLINE) {
1040 end_folio_read(folio, true, cur, blocksize);
1041 continue;
1042 }
1043
1044 if (bio_ctrl->compress_type != compress_type) {
1045 submit_one_bio(bio_ctrl);
1046 bio_ctrl->compress_type = compress_type;
1047 }
1048
1049 if (force_bio_submit)
1050 submit_one_bio(bio_ctrl);
1051 submit_extent_folio(bio_ctrl, disk_bytenr, folio, blocksize,
1052 pg_offset);
1053 }
1054 return 0;
1055 }
1056
1057 /*
1058 * Check if we can skip waiting the @ordered extent covering the block at @fileoff.
1059 *
1060 * @fileoff: Both input and output.
1061 * Input as the file offset where the check should start at.
1062 * Output as where the next check should start at,
1063 * if the function returns true.
1064 *
1065 * Return true if we can skip to @fileoff. The caller needs to check the new
1066 * @fileoff value to make sure it covers the full range, before skipping the
1067 * full OE.
1068 *
1069 * Return false if we must wait for the ordered extent.
1070 */
can_skip_one_ordered_range(struct btrfs_inode * inode,struct btrfs_ordered_extent * ordered,u64 * fileoff)1071 static bool can_skip_one_ordered_range(struct btrfs_inode *inode,
1072 struct btrfs_ordered_extent *ordered,
1073 u64 *fileoff)
1074 {
1075 const struct btrfs_fs_info *fs_info = inode->root->fs_info;
1076 struct folio *folio;
1077 const u32 blocksize = fs_info->sectorsize;
1078 u64 cur = *fileoff;
1079 bool ret;
1080
1081 folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT);
1082
1083 /*
1084 * We should have locked the folio(s) for range [start, end], thus
1085 * there must be a folio and it must be locked.
1086 */
1087 ASSERT(!IS_ERR(folio));
1088 ASSERT(folio_test_locked(folio));
1089
1090 /*
1091 * There are several cases for the folio and OE combination:
1092 *
1093 * 1) Folio has no private flag
1094 * The OE has all its IO done but not yet finished, and folio got
1095 * invalidated.
1096 *
1097 * Have we have to wait for the OE to finish, as it may contain the
1098 * to-be-inserted data checksum.
1099 * Without the data checksum inserted into the csum tree, read will
1100 * just fail with missing csum.
1101 */
1102 if (!folio_test_private(folio)) {
1103 ret = false;
1104 goto out;
1105 }
1106
1107 /*
1108 * 2) The first block is DIRTY.
1109 *
1110 * This means the OE is created by some other folios whose file pos is
1111 * before this one. And since we are holding the folio lock, the writeback
1112 * of this folio cannot start.
1113 *
1114 * We must skip the whole OE, because it will never start until we
1115 * finished our folio read and unlocked the folio.
1116 */
1117 if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
1118 u64 range_len = min(folio_end(folio),
1119 ordered->file_offset + ordered->num_bytes) - cur;
1120
1121 ret = true;
1122 /*
1123 * At least inside the folio, all the remaining blocks should
1124 * also be dirty.
1125 */
1126 ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len));
1127 *fileoff = ordered->file_offset + ordered->num_bytes;
1128 goto out;
1129 }
1130
1131 /*
1132 * 3) The first block is uptodate.
1133 *
1134 * At least the first block can be skipped, but we are still not fully
1135 * sure. E.g. if the OE has some other folios in the range that cannot
1136 * be skipped.
1137 * So we return true and update @next_ret to the OE/folio boundary.
1138 */
1139 if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
1140 u64 range_len = min(folio_end(folio),
1141 ordered->file_offset + ordered->num_bytes) - cur;
1142
1143 /*
1144 * The whole range to the OE end or folio boundary should also
1145 * be uptodate.
1146 */
1147 ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len));
1148 ret = true;
1149 *fileoff = cur + range_len;
1150 goto out;
1151 }
1152
1153 /*
1154 * 4) The first block is not uptodate.
1155 *
1156 * This means the folio is invalidated after the writeback was finished,
1157 * but by some other operations (e.g. block aligned buffered write) the
1158 * folio is inserted into filemap.
1159 * Very much the same as case 1).
1160 */
1161 ret = false;
1162 out:
1163 folio_put(folio);
1164 return ret;
1165 }
1166
can_skip_ordered_extent(struct btrfs_inode * inode,struct btrfs_ordered_extent * ordered,u64 start,u64 end)1167 static bool can_skip_ordered_extent(struct btrfs_inode *inode,
1168 struct btrfs_ordered_extent *ordered,
1169 u64 start, u64 end)
1170 {
1171 const u64 range_end = min(end, ordered->file_offset + ordered->num_bytes - 1);
1172 u64 cur = max(start, ordered->file_offset);
1173
1174 while (cur < range_end) {
1175 bool can_skip;
1176
1177 can_skip = can_skip_one_ordered_range(inode, ordered, &cur);
1178 if (!can_skip)
1179 return false;
1180 }
1181 return true;
1182 }
1183
1184 /*
1185 * Locking helper to make sure we get a stable view of extent maps for the
1186 * involved range.
1187 *
1188 * This is for folio read paths (read and readahead), thus the involved range
1189 * should have all the folios locked.
1190 */
lock_extents_for_read(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)1191 static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
1192 struct extent_state **cached_state)
1193 {
1194 u64 cur_pos;
1195
1196 /* Caller must provide a valid @cached_state. */
1197 ASSERT(cached_state);
1198
1199 /* The range must at least be page aligned, as all read paths are folio based. */
1200 ASSERT(IS_ALIGNED(start, PAGE_SIZE));
1201 ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
1202
1203 again:
1204 btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
1205 cur_pos = start;
1206 while (cur_pos < end) {
1207 struct btrfs_ordered_extent *ordered;
1208
1209 ordered = btrfs_lookup_ordered_range(inode, cur_pos,
1210 end - cur_pos + 1);
1211 /*
1212 * No ordered extents in the range, and we hold the extent lock,
1213 * no one can modify the extent maps in the range, we're safe to return.
1214 */
1215 if (!ordered)
1216 break;
1217
1218 /* Check if we can skip waiting for the whole OE. */
1219 if (can_skip_ordered_extent(inode, ordered, start, end)) {
1220 cur_pos = min(ordered->file_offset + ordered->num_bytes,
1221 end + 1);
1222 btrfs_put_ordered_extent(ordered);
1223 continue;
1224 }
1225
1226 /* Now wait for the OE to finish. */
1227 btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
1228 btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
1229 btrfs_put_ordered_extent(ordered);
1230 /* We have unlocked the whole range, restart from the beginning. */
1231 goto again;
1232 }
1233 }
1234
btrfs_read_folio(struct file * file,struct folio * folio)1235 int btrfs_read_folio(struct file *file, struct folio *folio)
1236 {
1237 struct btrfs_inode *inode = folio_to_inode(folio);
1238 const u64 start = folio_pos(folio);
1239 const u64 end = start + folio_size(folio) - 1;
1240 struct extent_state *cached_state = NULL;
1241 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1242 struct extent_map *em_cached = NULL;
1243 int ret;
1244
1245 lock_extents_for_read(inode, start, end, &cached_state);
1246 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1247 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
1248
1249 btrfs_free_extent_map(em_cached);
1250
1251 /*
1252 * If btrfs_do_readpage() failed we will want to submit the assembled
1253 * bio to do the cleanup.
1254 */
1255 submit_one_bio(&bio_ctrl);
1256 return ret;
1257 }
1258
set_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u32 len)1259 static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1260 u64 start, u32 len)
1261 {
1262 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1263 const u64 folio_start = folio_pos(folio);
1264 unsigned int start_bit;
1265 unsigned int nbits;
1266
1267 ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio));
1268 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1269 nbits = len >> fs_info->sectorsize_bits;
1270 ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1271 bitmap_set(delalloc_bitmap, start_bit, nbits);
1272 }
1273
find_next_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u64 * found_start,u32 * found_len)1274 static bool find_next_delalloc_bitmap(struct folio *folio,
1275 unsigned long *delalloc_bitmap, u64 start,
1276 u64 *found_start, u32 *found_len)
1277 {
1278 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1279 const u64 folio_start = folio_pos(folio);
1280 const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio);
1281 unsigned int start_bit;
1282 unsigned int first_zero;
1283 unsigned int first_set;
1284
1285 ASSERT(start >= folio_start && start < folio_start + folio_size(folio));
1286
1287 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1288 first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1289 if (first_set >= bitmap_size)
1290 return false;
1291
1292 *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1293 first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1294 *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1295 return true;
1296 }
1297
1298 /*
1299 * Do all of the delayed allocation setup.
1300 *
1301 * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
1302 * The @folio should no longer be touched (treat it as already unlocked).
1303 *
1304 * Return 0 if there is still dirty block that needs to be submitted through
1305 * extent_writepage_io().
1306 * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
1307 * submitted, and @folio is still kept locked.
1308 *
1309 * Return <0 if there is any error hit.
1310 * Any allocated ordered extent range covering this folio will be marked
1311 * finished (IOERR), and @folio is still kept locked.
1312 */
writepage_delalloc(struct btrfs_inode * inode,struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1313 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1314 struct folio *folio,
1315 struct btrfs_bio_ctrl *bio_ctrl)
1316 {
1317 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1318 struct writeback_control *wbc = bio_ctrl->wbc;
1319 const bool is_subpage = btrfs_is_subpage(fs_info, folio);
1320 const u64 page_start = folio_pos(folio);
1321 const u64 page_end = page_start + folio_size(folio) - 1;
1322 const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
1323 unsigned long delalloc_bitmap = 0;
1324 /*
1325 * Save the last found delalloc end. As the delalloc end can go beyond
1326 * page boundary, thus we cannot rely on subpage bitmap to locate the
1327 * last delalloc end.
1328 */
1329 u64 last_delalloc_end = 0;
1330 /*
1331 * The range end (exclusive) of the last successfully finished delalloc
1332 * range.
1333 * Any range covered by ordered extent must either be manually marked
1334 * finished (error handling), or has IO submitted (and finish the
1335 * ordered extent normally).
1336 *
1337 * This records the end of ordered extent cleanup if we hit an error.
1338 */
1339 u64 last_finished_delalloc_end = page_start;
1340 u64 delalloc_start = page_start;
1341 u64 delalloc_end = page_end;
1342 u64 delalloc_to_write = 0;
1343 int ret = 0;
1344 int bit;
1345
1346 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1347 if (btrfs_is_subpage(fs_info, folio)) {
1348 ASSERT(blocks_per_folio > 1);
1349 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1350 } else {
1351 bio_ctrl->submit_bitmap = 1;
1352 }
1353
1354 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
1355 u64 start = page_start + (bit << fs_info->sectorsize_bits);
1356
1357 btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1358 }
1359
1360 /* Lock all (subpage) delalloc ranges inside the folio first. */
1361 while (delalloc_start < page_end) {
1362 delalloc_end = page_end;
1363 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1364 &delalloc_start, &delalloc_end)) {
1365 delalloc_start = delalloc_end + 1;
1366 continue;
1367 }
1368 set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1369 min(delalloc_end, page_end) + 1 - delalloc_start);
1370 last_delalloc_end = delalloc_end;
1371 delalloc_start = delalloc_end + 1;
1372 }
1373 delalloc_start = page_start;
1374
1375 if (!last_delalloc_end)
1376 goto out;
1377
1378 /* Run the delalloc ranges for the above locked ranges. */
1379 while (delalloc_start < page_end) {
1380 u64 found_start;
1381 u32 found_len;
1382 bool found;
1383
1384 if (!is_subpage) {
1385 /*
1386 * For non-subpage case, the found delalloc range must
1387 * cover this folio and there must be only one locked
1388 * delalloc range.
1389 */
1390 found_start = page_start;
1391 found_len = last_delalloc_end + 1 - found_start;
1392 found = true;
1393 } else {
1394 found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1395 delalloc_start, &found_start, &found_len);
1396 }
1397 if (!found)
1398 break;
1399 /*
1400 * The subpage range covers the last sector, the delalloc range may
1401 * end beyond the folio boundary, use the saved delalloc_end
1402 * instead.
1403 */
1404 if (found_start + found_len >= page_end)
1405 found_len = last_delalloc_end + 1 - found_start;
1406
1407 if (ret >= 0) {
1408 /*
1409 * Some delalloc range may be created by previous folios.
1410 * Thus we still need to clean up this range during error
1411 * handling.
1412 */
1413 last_finished_delalloc_end = found_start;
1414 /* No errors hit so far, run the current delalloc range. */
1415 ret = btrfs_run_delalloc_range(inode, folio,
1416 found_start,
1417 found_start + found_len - 1,
1418 wbc);
1419 if (ret >= 0)
1420 last_finished_delalloc_end = found_start + found_len;
1421 if (unlikely(ret < 0))
1422 btrfs_err_rl(fs_info,
1423 "failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
1424 btrfs_root_id(inode->root),
1425 btrfs_ino(inode),
1426 folio_pos(folio),
1427 blocks_per_folio,
1428 &bio_ctrl->submit_bitmap,
1429 found_start, found_len, ret);
1430 } else {
1431 /*
1432 * We've hit an error during previous delalloc range,
1433 * have to cleanup the remaining locked ranges.
1434 */
1435 btrfs_unlock_extent(&inode->io_tree, found_start,
1436 found_start + found_len - 1, NULL);
1437 unlock_delalloc_folio(&inode->vfs_inode, folio,
1438 found_start,
1439 found_start + found_len - 1);
1440 }
1441
1442 /*
1443 * We have some ranges that's going to be submitted asynchronously
1444 * (compression or inline). These range have their own control
1445 * on when to unlock the pages. We should not touch them
1446 * anymore, so clear the range from the submission bitmap.
1447 */
1448 if (ret > 0) {
1449 unsigned int start_bit = (found_start - page_start) >>
1450 fs_info->sectorsize_bits;
1451 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1452 page_start) >> fs_info->sectorsize_bits;
1453 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1454 }
1455 /*
1456 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1457 * thus for the last range, we cannot touch the folio anymore.
1458 */
1459 if (found_start + found_len >= last_delalloc_end + 1)
1460 break;
1461
1462 delalloc_start = found_start + found_len;
1463 }
1464 /*
1465 * It's possible we had some ordered extents created before we hit
1466 * an error, cleanup non-async successfully created delalloc ranges.
1467 */
1468 if (unlikely(ret < 0)) {
1469 unsigned int bitmap_size = min(
1470 (last_finished_delalloc_end - page_start) >>
1471 fs_info->sectorsize_bits,
1472 blocks_per_folio);
1473
1474 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
1475 btrfs_mark_ordered_io_finished(inode, folio,
1476 page_start + (bit << fs_info->sectorsize_bits),
1477 fs_info->sectorsize, false);
1478 return ret;
1479 }
1480 out:
1481 if (last_delalloc_end)
1482 delalloc_end = last_delalloc_end;
1483 else
1484 delalloc_end = page_end;
1485 /*
1486 * delalloc_end is already one less than the total length, so
1487 * we don't subtract one from PAGE_SIZE.
1488 */
1489 delalloc_to_write +=
1490 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1491
1492 /*
1493 * If all ranges are submitted asynchronously, we just need to account
1494 * for them here.
1495 */
1496 if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) {
1497 wbc->nr_to_write -= delalloc_to_write;
1498 return 1;
1499 }
1500
1501 if (wbc->nr_to_write < delalloc_to_write) {
1502 int thresh = 8192;
1503
1504 if (delalloc_to_write < thresh * 2)
1505 thresh = delalloc_to_write;
1506 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1507 thresh);
1508 }
1509
1510 return 0;
1511 }
1512
1513 /*
1514 * Return 0 if we have submitted or queued the sector for submission.
1515 * Return <0 for critical errors.
1516 *
1517 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1518 */
submit_one_sector(struct btrfs_inode * inode,struct folio * folio,u64 filepos,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1519 static int submit_one_sector(struct btrfs_inode *inode,
1520 struct folio *folio,
1521 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1522 loff_t i_size)
1523 {
1524 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1525 struct extent_map *em;
1526 u64 block_start;
1527 u64 disk_bytenr;
1528 u64 extent_offset;
1529 u64 em_end;
1530 const u32 sectorsize = fs_info->sectorsize;
1531
1532 ASSERT(IS_ALIGNED(filepos, sectorsize));
1533
1534 /* @filepos >= i_size case should be handled by the caller. */
1535 ASSERT(filepos < i_size);
1536
1537 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1538 if (IS_ERR(em))
1539 return PTR_ERR(em);
1540
1541 extent_offset = filepos - em->start;
1542 em_end = btrfs_extent_map_end(em);
1543 ASSERT(filepos <= em_end);
1544 ASSERT(IS_ALIGNED(em->start, sectorsize));
1545 ASSERT(IS_ALIGNED(em->len, sectorsize));
1546
1547 block_start = btrfs_extent_map_block_start(em);
1548 disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset;
1549
1550 ASSERT(!btrfs_extent_map_is_compressed(em));
1551 ASSERT(block_start != EXTENT_MAP_HOLE);
1552 ASSERT(block_start != EXTENT_MAP_INLINE);
1553
1554 btrfs_free_extent_map(em);
1555 em = NULL;
1556
1557 /*
1558 * Although the PageDirty bit is cleared before entering this
1559 * function, subpage dirty bit is not cleared.
1560 * So clear subpage dirty bit here so next time we won't submit
1561 * a folio for a range already written to disk.
1562 */
1563 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1564 btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1565 /*
1566 * Above call should set the whole folio with writeback flag, even
1567 * just for a single subpage sector.
1568 * As long as the folio is properly locked and the range is correct,
1569 * we should always get the folio with writeback flag.
1570 */
1571 ASSERT(folio_test_writeback(folio));
1572
1573 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1574 sectorsize, filepos - folio_pos(folio));
1575 return 0;
1576 }
1577
1578 /*
1579 * Helper for extent_writepage(). This calls the writepage start hooks,
1580 * and does the loop to map the page into extents and bios.
1581 *
1582 * We return 1 if the IO is started and the page is unlocked,
1583 * 0 if all went well (page still locked)
1584 * < 0 if there were errors (page still locked)
1585 */
extent_writepage_io(struct btrfs_inode * inode,struct folio * folio,u64 start,u32 len,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1586 static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1587 struct folio *folio,
1588 u64 start, u32 len,
1589 struct btrfs_bio_ctrl *bio_ctrl,
1590 loff_t i_size)
1591 {
1592 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1593 unsigned long range_bitmap = 0;
1594 bool submitted_io = false;
1595 bool error = false;
1596 const u64 folio_start = folio_pos(folio);
1597 const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
1598 u64 cur;
1599 int bit;
1600 int ret = 0;
1601
1602 ASSERT(start >= folio_start &&
1603 start + len <= folio_start + folio_size(folio));
1604
1605 ret = btrfs_writepage_cow_fixup(folio);
1606 if (ret == -EAGAIN) {
1607 /* Fixup worker will requeue */
1608 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1609 folio_unlock(folio);
1610 return 1;
1611 }
1612 if (ret < 0)
1613 return ret;
1614
1615 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1616 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1617 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1618 blocks_per_folio);
1619
1620 bio_ctrl->end_io_func = end_bbio_data_write;
1621
1622 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) {
1623 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1624
1625 if (cur >= i_size) {
1626 btrfs_mark_ordered_io_finished(inode, folio, cur,
1627 start + len - cur, true);
1628 /*
1629 * This range is beyond i_size, thus we don't need to
1630 * bother writing back.
1631 * But we still need to clear the dirty subpage bit, or
1632 * the next time the folio gets dirtied, we will try to
1633 * writeback the sectors with subpage dirty bits,
1634 * causing writeback without ordered extent.
1635 */
1636 btrfs_folio_clear_dirty(fs_info, folio, cur,
1637 start + len - cur);
1638 break;
1639 }
1640 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1641 if (unlikely(ret < 0)) {
1642 /*
1643 * bio_ctrl may contain a bio crossing several folios.
1644 * Submit it immediately so that the bio has a chance
1645 * to finish normally, other than marked as error.
1646 */
1647 submit_one_bio(bio_ctrl);
1648 /*
1649 * Failed to grab the extent map which should be very rare.
1650 * Since there is no bio submitted to finish the ordered
1651 * extent, we have to manually finish this sector.
1652 */
1653 btrfs_mark_ordered_io_finished(inode, folio, cur,
1654 fs_info->sectorsize, false);
1655 error = true;
1656 continue;
1657 }
1658 submitted_io = true;
1659 }
1660
1661 /*
1662 * If we didn't submitted any sector (>= i_size), folio dirty get
1663 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1664 * by folio_start_writeback() if the folio is not dirty).
1665 *
1666 * Here we set writeback and clear for the range. If the full folio
1667 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1668 *
1669 * If we hit any error, the corresponding sector will still be dirty
1670 * thus no need to clear PAGECACHE_TAG_DIRTY.
1671 */
1672 if (!submitted_io && !error) {
1673 btrfs_folio_set_writeback(fs_info, folio, start, len);
1674 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1675 }
1676 return ret;
1677 }
1678
1679 /*
1680 * the writepage semantics are similar to regular writepage. extent
1681 * records are inserted to lock ranges in the tree, and as dirty areas
1682 * are found, they are marked writeback. Then the lock bits are removed
1683 * and the end_io handler clears the writeback ranges
1684 *
1685 * Return 0 if everything goes well.
1686 * Return <0 for error.
1687 */
extent_writepage(struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1688 static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1689 {
1690 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
1691 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1692 int ret;
1693 size_t pg_offset;
1694 loff_t i_size = i_size_read(&inode->vfs_inode);
1695 const pgoff_t end_index = i_size >> PAGE_SHIFT;
1696 const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio);
1697
1698 trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
1699
1700 WARN_ON(!folio_test_locked(folio));
1701
1702 pg_offset = offset_in_folio(folio, i_size);
1703 if (folio->index > end_index ||
1704 (folio->index == end_index && !pg_offset)) {
1705 folio_invalidate(folio, 0, folio_size(folio));
1706 folio_unlock(folio);
1707 return 0;
1708 }
1709
1710 if (folio_contains(folio, end_index))
1711 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1712
1713 /*
1714 * Default to unlock the whole folio.
1715 * The proper bitmap can only be initialized until writepage_delalloc().
1716 */
1717 bio_ctrl->submit_bitmap = (unsigned long)-1;
1718
1719 /*
1720 * If the page is dirty but without private set, it's marked dirty
1721 * without informing the fs.
1722 * Nowadays that is a bug, since the introduction of
1723 * pin_user_pages*().
1724 *
1725 * So here we check if the page has private set to rule out such
1726 * case.
1727 * But we also have a long history of relying on the COW fixup,
1728 * so here we only enable this check for experimental builds until
1729 * we're sure it's safe.
1730 */
1731 if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) &&
1732 unlikely(!folio_test_private(folio))) {
1733 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
1734 btrfs_err_rl(fs_info,
1735 "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
1736 btrfs_root_id(inode->root),
1737 btrfs_ino(inode), folio_pos(folio));
1738 ret = -EUCLEAN;
1739 goto done;
1740 }
1741
1742 ret = set_folio_extent_mapped(folio);
1743 if (ret < 0)
1744 goto done;
1745
1746 ret = writepage_delalloc(inode, folio, bio_ctrl);
1747 if (ret == 1)
1748 return 0;
1749 if (ret)
1750 goto done;
1751
1752 ret = extent_writepage_io(inode, folio, folio_pos(folio),
1753 folio_size(folio), bio_ctrl, i_size);
1754 if (ret == 1)
1755 return 0;
1756 if (ret < 0)
1757 btrfs_err_rl(fs_info,
1758 "failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
1759 btrfs_root_id(inode->root), btrfs_ino(inode),
1760 folio_pos(folio), blocks_per_folio,
1761 &bio_ctrl->submit_bitmap, ret);
1762
1763 bio_ctrl->wbc->nr_to_write--;
1764
1765 done:
1766 if (ret < 0)
1767 mapping_set_error(folio->mapping, ret);
1768 /*
1769 * Only unlock ranges that are submitted. As there can be some async
1770 * submitted ranges inside the folio.
1771 */
1772 btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1773 ASSERT(ret <= 0);
1774 return ret;
1775 }
1776
1777 /*
1778 * Lock extent buffer status and pages for writeback.
1779 *
1780 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1781 * extent buffer is not dirty)
1782 * Return %true is the extent buffer is submitted to bio.
1783 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1784 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1785 struct writeback_control *wbc)
1786 {
1787 struct btrfs_fs_info *fs_info = eb->fs_info;
1788 bool ret = false;
1789
1790 btrfs_tree_lock(eb);
1791 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1792 btrfs_tree_unlock(eb);
1793 if (wbc->sync_mode != WB_SYNC_ALL)
1794 return false;
1795 wait_on_extent_buffer_writeback(eb);
1796 btrfs_tree_lock(eb);
1797 }
1798
1799 /*
1800 * We need to do this to prevent races in people who check if the eb is
1801 * under IO since we can end up having no IO bits set for a short period
1802 * of time.
1803 */
1804 spin_lock(&eb->refs_lock);
1805 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1806 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
1807 unsigned long flags;
1808
1809 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1810 spin_unlock(&eb->refs_lock);
1811
1812 xas_lock_irqsave(&xas, flags);
1813 xas_load(&xas);
1814 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
1815 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
1816 xas_unlock_irqrestore(&xas, flags);
1817
1818 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1819 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1820 -eb->len,
1821 fs_info->dirty_metadata_batch);
1822 ret = true;
1823 } else {
1824 spin_unlock(&eb->refs_lock);
1825 }
1826 btrfs_tree_unlock(eb);
1827 return ret;
1828 }
1829
set_btree_ioerr(struct extent_buffer * eb)1830 static void set_btree_ioerr(struct extent_buffer *eb)
1831 {
1832 struct btrfs_fs_info *fs_info = eb->fs_info;
1833
1834 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1835
1836 /*
1837 * A read may stumble upon this buffer later, make sure that it gets an
1838 * error and knows there was an error.
1839 */
1840 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1841
1842 /*
1843 * We need to set the mapping with the io error as well because a write
1844 * error will flip the file system readonly, and then syncfs() will
1845 * return a 0 because we are readonly if we don't modify the err seq for
1846 * the superblock.
1847 */
1848 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1849
1850 /*
1851 * If writeback for a btree extent that doesn't belong to a log tree
1852 * failed, increment the counter transaction->eb_write_errors.
1853 * We do this because while the transaction is running and before it's
1854 * committing (when we call filemap_fdata[write|wait]_range against
1855 * the btree inode), we might have
1856 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1857 * returns an error or an error happens during writeback, when we're
1858 * committing the transaction we wouldn't know about it, since the pages
1859 * can be no longer dirty nor marked anymore for writeback (if a
1860 * subsequent modification to the extent buffer didn't happen before the
1861 * transaction commit), which makes filemap_fdata[write|wait]_range not
1862 * able to find the pages which contain errors at transaction
1863 * commit time. So if this happens we must abort the transaction,
1864 * otherwise we commit a super block with btree roots that point to
1865 * btree nodes/leafs whose content on disk is invalid - either garbage
1866 * or the content of some node/leaf from a past generation that got
1867 * cowed or deleted and is no longer valid.
1868 *
1869 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1870 * not be enough - we need to distinguish between log tree extents vs
1871 * non-log tree extents, and the next filemap_fdatawait_range() call
1872 * will catch and clear such errors in the mapping - and that call might
1873 * be from a log sync and not from a transaction commit. Also, checking
1874 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1875 * not done and would not be reliable - the eb might have been released
1876 * from memory and reading it back again means that flag would not be
1877 * set (since it's a runtime flag, not persisted on disk).
1878 *
1879 * Using the flags below in the btree inode also makes us achieve the
1880 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1881 * writeback for all dirty pages and before filemap_fdatawait_range()
1882 * is called, the writeback for all dirty pages had already finished
1883 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1884 * filemap_fdatawait_range() would return success, as it could not know
1885 * that writeback errors happened (the pages were no longer tagged for
1886 * writeback).
1887 */
1888 switch (eb->log_index) {
1889 case -1:
1890 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1891 break;
1892 case 0:
1893 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1894 break;
1895 case 1:
1896 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1897 break;
1898 default:
1899 BUG(); /* unexpected, logic error */
1900 }
1901 }
1902
buffer_tree_set_mark(const struct extent_buffer * eb,xa_mark_t mark)1903 static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
1904 {
1905 struct btrfs_fs_info *fs_info = eb->fs_info;
1906 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
1907 unsigned long flags;
1908
1909 xas_lock_irqsave(&xas, flags);
1910 xas_load(&xas);
1911 xas_set_mark(&xas, mark);
1912 xas_unlock_irqrestore(&xas, flags);
1913 }
1914
buffer_tree_clear_mark(const struct extent_buffer * eb,xa_mark_t mark)1915 static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark)
1916 {
1917 struct btrfs_fs_info *fs_info = eb->fs_info;
1918 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits);
1919 unsigned long flags;
1920
1921 xas_lock_irqsave(&xas, flags);
1922 xas_load(&xas);
1923 xas_clear_mark(&xas, mark);
1924 xas_unlock_irqrestore(&xas, flags);
1925 }
1926
buffer_tree_tag_for_writeback(struct btrfs_fs_info * fs_info,unsigned long start,unsigned long end)1927 static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info,
1928 unsigned long start, unsigned long end)
1929 {
1930 XA_STATE(xas, &fs_info->buffer_tree, start);
1931 unsigned int tagged = 0;
1932 void *eb;
1933
1934 xas_lock_irq(&xas);
1935 xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) {
1936 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
1937 if (++tagged % XA_CHECK_SCHED)
1938 continue;
1939 xas_pause(&xas);
1940 xas_unlock_irq(&xas);
1941 cond_resched();
1942 xas_lock_irq(&xas);
1943 }
1944 xas_unlock_irq(&xas);
1945 }
1946
1947 struct eb_batch {
1948 unsigned int nr;
1949 unsigned int cur;
1950 struct extent_buffer *ebs[PAGEVEC_SIZE];
1951 };
1952
eb_batch_add(struct eb_batch * batch,struct extent_buffer * eb)1953 static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb)
1954 {
1955 batch->ebs[batch->nr++] = eb;
1956 return (batch->nr < PAGEVEC_SIZE);
1957 }
1958
eb_batch_init(struct eb_batch * batch)1959 static inline void eb_batch_init(struct eb_batch *batch)
1960 {
1961 batch->nr = 0;
1962 batch->cur = 0;
1963 }
1964
eb_batch_next(struct eb_batch * batch)1965 static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch)
1966 {
1967 if (batch->cur >= batch->nr)
1968 return NULL;
1969 return batch->ebs[batch->cur++];
1970 }
1971
eb_batch_release(struct eb_batch * batch)1972 static inline void eb_batch_release(struct eb_batch *batch)
1973 {
1974 for (unsigned int i = 0; i < batch->nr; i++)
1975 free_extent_buffer(batch->ebs[i]);
1976 eb_batch_init(batch);
1977 }
1978
find_get_eb(struct xa_state * xas,unsigned long max,xa_mark_t mark)1979 static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max,
1980 xa_mark_t mark)
1981 {
1982 struct extent_buffer *eb;
1983
1984 retry:
1985 eb = xas_find_marked(xas, max, mark);
1986
1987 if (xas_retry(xas, eb))
1988 goto retry;
1989
1990 if (!eb)
1991 return NULL;
1992
1993 if (!refcount_inc_not_zero(&eb->refs)) {
1994 xas_reset(xas);
1995 goto retry;
1996 }
1997
1998 if (unlikely(eb != xas_reload(xas))) {
1999 free_extent_buffer(eb);
2000 xas_reset(xas);
2001 goto retry;
2002 }
2003
2004 return eb;
2005 }
2006
buffer_tree_get_ebs_tag(struct btrfs_fs_info * fs_info,unsigned long * start,unsigned long end,xa_mark_t tag,struct eb_batch * batch)2007 static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info,
2008 unsigned long *start,
2009 unsigned long end, xa_mark_t tag,
2010 struct eb_batch *batch)
2011 {
2012 XA_STATE(xas, &fs_info->buffer_tree, *start);
2013 struct extent_buffer *eb;
2014
2015 rcu_read_lock();
2016 while ((eb = find_get_eb(&xas, end, tag)) != NULL) {
2017 if (!eb_batch_add(batch, eb)) {
2018 *start = ((eb->start + eb->len) >> fs_info->nodesize_bits);
2019 goto out;
2020 }
2021 }
2022 if (end == ULONG_MAX)
2023 *start = ULONG_MAX;
2024 else
2025 *start = end + 1;
2026 out:
2027 rcu_read_unlock();
2028
2029 return batch->nr;
2030 }
2031
2032 /*
2033 * The endio specific version which won't touch any unsafe spinlock in endio
2034 * context.
2035 */
find_extent_buffer_nolock(struct btrfs_fs_info * fs_info,u64 start)2036 static struct extent_buffer *find_extent_buffer_nolock(
2037 struct btrfs_fs_info *fs_info, u64 start)
2038 {
2039 struct extent_buffer *eb;
2040 unsigned long index = (start >> fs_info->nodesize_bits);
2041
2042 rcu_read_lock();
2043 eb = xa_load(&fs_info->buffer_tree, index);
2044 if (eb && !refcount_inc_not_zero(&eb->refs))
2045 eb = NULL;
2046 rcu_read_unlock();
2047 return eb;
2048 }
2049
end_bbio_meta_write(struct btrfs_bio * bbio)2050 static void end_bbio_meta_write(struct btrfs_bio *bbio)
2051 {
2052 struct extent_buffer *eb = bbio->private;
2053 struct folio_iter fi;
2054
2055 if (bbio->bio.bi_status != BLK_STS_OK)
2056 set_btree_ioerr(eb);
2057
2058 bio_for_each_folio_all(fi, &bbio->bio) {
2059 btrfs_meta_folio_clear_writeback(fi.folio, eb);
2060 }
2061
2062 buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK);
2063 clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
2064 bio_put(&bbio->bio);
2065 }
2066
prepare_eb_write(struct extent_buffer * eb)2067 static void prepare_eb_write(struct extent_buffer *eb)
2068 {
2069 u32 nritems;
2070 unsigned long start;
2071 unsigned long end;
2072
2073 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
2074
2075 /* Set btree blocks beyond nritems with 0 to avoid stale content */
2076 nritems = btrfs_header_nritems(eb);
2077 if (btrfs_header_level(eb) > 0) {
2078 end = btrfs_node_key_ptr_offset(eb, nritems);
2079 memzero_extent_buffer(eb, end, eb->len - end);
2080 } else {
2081 /*
2082 * Leaf:
2083 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
2084 */
2085 start = btrfs_item_nr_offset(eb, nritems);
2086 end = btrfs_item_nr_offset(eb, 0);
2087 if (nritems == 0)
2088 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
2089 else
2090 end += btrfs_item_offset(eb, nritems - 1);
2091 memzero_extent_buffer(eb, start, end - start);
2092 }
2093 }
2094
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)2095 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
2096 struct writeback_control *wbc)
2097 {
2098 struct btrfs_fs_info *fs_info = eb->fs_info;
2099 struct btrfs_bio *bbio;
2100
2101 prepare_eb_write(eb);
2102
2103 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
2104 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
2105 eb->fs_info, end_bbio_meta_write, eb);
2106 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
2107 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
2108 wbc_init_bio(wbc, &bbio->bio);
2109 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
2110 bbio->file_offset = eb->start;
2111 for (int i = 0; i < num_extent_folios(eb); i++) {
2112 struct folio *folio = eb->folios[i];
2113 u64 range_start = max_t(u64, eb->start, folio_pos(folio));
2114 u32 range_len = min_t(u64, folio_end(folio),
2115 eb->start + eb->len) - range_start;
2116
2117 folio_lock(folio);
2118 btrfs_meta_folio_clear_dirty(folio, eb);
2119 btrfs_meta_folio_set_writeback(folio, eb);
2120 if (!folio_test_dirty(folio))
2121 wbc->nr_to_write -= folio_nr_pages(folio);
2122 bio_add_folio_nofail(&bbio->bio, folio, range_len,
2123 offset_in_folio(folio, range_start));
2124 wbc_account_cgroup_owner(wbc, folio, range_len);
2125 folio_unlock(folio);
2126 }
2127 btrfs_submit_bbio(bbio, 0);
2128 }
2129
2130 /*
2131 * Wait for all eb writeback in the given range to finish.
2132 *
2133 * @fs_info: The fs_info for this file system.
2134 * @start: The offset of the range to start waiting on writeback.
2135 * @end: The end of the range, inclusive. This is meant to be used in
2136 * conjuction with wait_marked_extents, so this will usually be
2137 * the_next_eb->start - 1.
2138 */
btrfs_btree_wait_writeback_range(struct btrfs_fs_info * fs_info,u64 start,u64 end)2139 void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
2140 u64 end)
2141 {
2142 struct eb_batch batch;
2143 unsigned long start_index = (start >> fs_info->nodesize_bits);
2144 unsigned long end_index = (end >> fs_info->nodesize_bits);
2145
2146 eb_batch_init(&batch);
2147 while (start_index <= end_index) {
2148 struct extent_buffer *eb;
2149 unsigned int nr_ebs;
2150
2151 nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index,
2152 PAGECACHE_TAG_WRITEBACK, &batch);
2153 if (!nr_ebs)
2154 break;
2155
2156 while ((eb = eb_batch_next(&batch)) != NULL)
2157 wait_on_extent_buffer_writeback(eb);
2158 eb_batch_release(&batch);
2159 cond_resched();
2160 }
2161 }
2162
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)2163 int btree_write_cache_pages(struct address_space *mapping,
2164 struct writeback_control *wbc)
2165 {
2166 struct btrfs_eb_write_context ctx = { .wbc = wbc };
2167 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
2168 int ret = 0;
2169 int done = 0;
2170 int nr_to_write_done = 0;
2171 struct eb_batch batch;
2172 unsigned int nr_ebs;
2173 unsigned long index;
2174 unsigned long end;
2175 int scanned = 0;
2176 xa_mark_t tag;
2177
2178 eb_batch_init(&batch);
2179 if (wbc->range_cyclic) {
2180 index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits);
2181 end = -1;
2182
2183 /*
2184 * Start from the beginning does not need to cycle over the
2185 * range, mark it as scanned.
2186 */
2187 scanned = (index == 0);
2188 } else {
2189 index = (wbc->range_start >> fs_info->nodesize_bits);
2190 end = (wbc->range_end >> fs_info->nodesize_bits);
2191
2192 scanned = 1;
2193 }
2194 if (wbc->sync_mode == WB_SYNC_ALL)
2195 tag = PAGECACHE_TAG_TOWRITE;
2196 else
2197 tag = PAGECACHE_TAG_DIRTY;
2198 btrfs_zoned_meta_io_lock(fs_info);
2199 retry:
2200 if (wbc->sync_mode == WB_SYNC_ALL)
2201 buffer_tree_tag_for_writeback(fs_info, index, end);
2202 while (!done && !nr_to_write_done && (index <= end) &&
2203 (nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) {
2204 struct extent_buffer *eb;
2205
2206 while ((eb = eb_batch_next(&batch)) != NULL) {
2207 ctx.eb = eb;
2208
2209 ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx);
2210 if (ret) {
2211 if (ret == -EBUSY)
2212 ret = 0;
2213
2214 if (ret) {
2215 done = 1;
2216 break;
2217 }
2218 continue;
2219 }
2220
2221 if (!lock_extent_buffer_for_io(eb, wbc))
2222 continue;
2223
2224 /* Implies write in zoned mode. */
2225 if (ctx.zoned_bg) {
2226 /* Mark the last eb in the block group. */
2227 btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb);
2228 ctx.zoned_bg->meta_write_pointer += eb->len;
2229 }
2230 write_one_eb(eb, wbc);
2231 }
2232 nr_to_write_done = (wbc->nr_to_write <= 0);
2233 eb_batch_release(&batch);
2234 cond_resched();
2235 }
2236 if (!scanned && !done) {
2237 /*
2238 * We hit the last page and there is more work to be done: wrap
2239 * back to the start of the file
2240 */
2241 scanned = 1;
2242 index = 0;
2243 goto retry;
2244 }
2245 /*
2246 * If something went wrong, don't allow any metadata write bio to be
2247 * submitted.
2248 *
2249 * This would prevent use-after-free if we had dirty pages not
2250 * cleaned up, which can still happen by fuzzed images.
2251 *
2252 * - Bad extent tree
2253 * Allowing existing tree block to be allocated for other trees.
2254 *
2255 * - Log tree operations
2256 * Exiting tree blocks get allocated to log tree, bumps its
2257 * generation, then get cleaned in tree re-balance.
2258 * Such tree block will not be written back, since it's clean,
2259 * thus no WRITTEN flag set.
2260 * And after log writes back, this tree block is not traced by
2261 * any dirty extent_io_tree.
2262 *
2263 * - Offending tree block gets re-dirtied from its original owner
2264 * Since it has bumped generation, no WRITTEN flag, it can be
2265 * reused without COWing. This tree block will not be traced
2266 * by btrfs_transaction::dirty_pages.
2267 *
2268 * Now such dirty tree block will not be cleaned by any dirty
2269 * extent io tree. Thus we don't want to submit such wild eb
2270 * if the fs already has error.
2271 *
2272 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2273 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2274 */
2275 if (ret > 0)
2276 ret = 0;
2277 if (!ret && BTRFS_FS_ERROR(fs_info))
2278 ret = -EROFS;
2279
2280 if (ctx.zoned_bg)
2281 btrfs_put_block_group(ctx.zoned_bg);
2282 btrfs_zoned_meta_io_unlock(fs_info);
2283 return ret;
2284 }
2285
2286 /*
2287 * Walk the list of dirty pages of the given address space and write all of them.
2288 *
2289 * @mapping: address space structure to write
2290 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2291 * @bio_ctrl: holds context for the write, namely the bio
2292 *
2293 * If a page is already under I/O, write_cache_pages() skips it, even
2294 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2295 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2296 * and msync() need to guarantee that all the data which was dirty at the time
2297 * the call was made get new I/O started against them. If wbc->sync_mode is
2298 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2299 * existing IO to complete.
2300 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)2301 static int extent_write_cache_pages(struct address_space *mapping,
2302 struct btrfs_bio_ctrl *bio_ctrl)
2303 {
2304 struct writeback_control *wbc = bio_ctrl->wbc;
2305 struct inode *inode = mapping->host;
2306 int ret = 0;
2307 int done = 0;
2308 int nr_to_write_done = 0;
2309 struct folio_batch fbatch;
2310 unsigned int nr_folios;
2311 pgoff_t index;
2312 pgoff_t end; /* Inclusive */
2313 pgoff_t done_index;
2314 int range_whole = 0;
2315 int scanned = 0;
2316 xa_mark_t tag;
2317
2318 /*
2319 * We have to hold onto the inode so that ordered extents can do their
2320 * work when the IO finishes. The alternative to this is failing to add
2321 * an ordered extent if the igrab() fails there and that is a huge pain
2322 * to deal with, so instead just hold onto the inode throughout the
2323 * writepages operation. If it fails here we are freeing up the inode
2324 * anyway and we'd rather not waste our time writing out stuff that is
2325 * going to be truncated anyway.
2326 */
2327 if (!igrab(inode))
2328 return 0;
2329
2330 folio_batch_init(&fbatch);
2331 if (wbc->range_cyclic) {
2332 index = mapping->writeback_index; /* Start from prev offset */
2333 end = -1;
2334 /*
2335 * Start from the beginning does not need to cycle over the
2336 * range, mark it as scanned.
2337 */
2338 scanned = (index == 0);
2339 } else {
2340 index = wbc->range_start >> PAGE_SHIFT;
2341 end = wbc->range_end >> PAGE_SHIFT;
2342 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2343 range_whole = 1;
2344 scanned = 1;
2345 }
2346
2347 /*
2348 * We do the tagged writepage as long as the snapshot flush bit is set
2349 * and we are the first one who do the filemap_flush() on this inode.
2350 *
2351 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2352 * not race in and drop the bit.
2353 */
2354 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2355 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2356 &BTRFS_I(inode)->runtime_flags))
2357 wbc->tagged_writepages = 1;
2358
2359 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2360 tag = PAGECACHE_TAG_TOWRITE;
2361 else
2362 tag = PAGECACHE_TAG_DIRTY;
2363 retry:
2364 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2365 tag_pages_for_writeback(mapping, index, end);
2366 done_index = index;
2367 while (!done && !nr_to_write_done && (index <= end) &&
2368 (nr_folios = filemap_get_folios_tag(mapping, &index,
2369 end, tag, &fbatch))) {
2370 unsigned i;
2371
2372 for (i = 0; i < nr_folios; i++) {
2373 struct folio *folio = fbatch.folios[i];
2374
2375 done_index = folio_next_index(folio);
2376 /*
2377 * At this point we hold neither the i_pages lock nor
2378 * the folio lock: the folio may be truncated or
2379 * invalidated (changing folio->mapping to NULL).
2380 */
2381 if (!folio_trylock(folio)) {
2382 submit_write_bio(bio_ctrl, 0);
2383 folio_lock(folio);
2384 }
2385
2386 if (unlikely(folio->mapping != mapping)) {
2387 folio_unlock(folio);
2388 continue;
2389 }
2390
2391 if (!folio_test_dirty(folio)) {
2392 /* Someone wrote it for us. */
2393 folio_unlock(folio);
2394 continue;
2395 }
2396
2397 /*
2398 * For subpage case, compression can lead to mixed
2399 * writeback and dirty flags, e.g:
2400 * 0 32K 64K 96K 128K
2401 * | |//////||/////| |//|
2402 *
2403 * In above case, [32K, 96K) is asynchronously submitted
2404 * for compression, and [124K, 128K) needs to be written back.
2405 *
2406 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2407 * won't be submitted as the page still has writeback flag
2408 * and will be skipped in the next check.
2409 *
2410 * This mixed writeback and dirty case is only possible for
2411 * subpage case.
2412 *
2413 * TODO: Remove this check after migrating compression to
2414 * regular submission.
2415 */
2416 if (wbc->sync_mode != WB_SYNC_NONE ||
2417 btrfs_is_subpage(inode_to_fs_info(inode), folio)) {
2418 if (folio_test_writeback(folio))
2419 submit_write_bio(bio_ctrl, 0);
2420 folio_wait_writeback(folio);
2421 }
2422
2423 if (folio_test_writeback(folio) ||
2424 !folio_clear_dirty_for_io(folio)) {
2425 folio_unlock(folio);
2426 continue;
2427 }
2428
2429 ret = extent_writepage(folio, bio_ctrl);
2430 if (ret < 0) {
2431 done = 1;
2432 break;
2433 }
2434
2435 /*
2436 * The filesystem may choose to bump up nr_to_write.
2437 * We have to make sure to honor the new nr_to_write
2438 * at any time.
2439 */
2440 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2441 wbc->nr_to_write <= 0);
2442 }
2443 folio_batch_release(&fbatch);
2444 cond_resched();
2445 }
2446 if (!scanned && !done) {
2447 /*
2448 * We hit the last page and there is more work to be done: wrap
2449 * back to the start of the file
2450 */
2451 scanned = 1;
2452 index = 0;
2453
2454 /*
2455 * If we're looping we could run into a page that is locked by a
2456 * writer and that writer could be waiting on writeback for a
2457 * page in our current bio, and thus deadlock, so flush the
2458 * write bio here.
2459 */
2460 submit_write_bio(bio_ctrl, 0);
2461 goto retry;
2462 }
2463
2464 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2465 mapping->writeback_index = done_index;
2466
2467 btrfs_add_delayed_iput(BTRFS_I(inode));
2468 return ret;
2469 }
2470
2471 /*
2472 * Submit the pages in the range to bio for call sites which delalloc range has
2473 * already been ran (aka, ordered extent inserted) and all pages are still
2474 * locked.
2475 */
extent_write_locked_range(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2476 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2477 u64 start, u64 end, struct writeback_control *wbc,
2478 bool pages_dirty)
2479 {
2480 bool found_error = false;
2481 int ret = 0;
2482 struct address_space *mapping = inode->i_mapping;
2483 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2484 const u32 sectorsize = fs_info->sectorsize;
2485 loff_t i_size = i_size_read(inode);
2486 u64 cur = start;
2487 struct btrfs_bio_ctrl bio_ctrl = {
2488 .wbc = wbc,
2489 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2490 };
2491
2492 if (wbc->no_cgroup_owner)
2493 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2494
2495 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2496
2497 while (cur <= end) {
2498 u64 cur_end;
2499 u32 cur_len;
2500 struct folio *folio;
2501
2502 folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
2503
2504 /*
2505 * This shouldn't happen, the pages are pinned and locked, this
2506 * code is just in case, but shouldn't actually be run.
2507 */
2508 if (IS_ERR(folio)) {
2509 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2510 cur_len = cur_end + 1 - cur;
2511 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2512 cur, cur_len, false);
2513 mapping_set_error(mapping, PTR_ERR(folio));
2514 cur = cur_end;
2515 continue;
2516 }
2517
2518 cur_end = min_t(u64, folio_end(folio) - 1, end);
2519 cur_len = cur_end + 1 - cur;
2520
2521 ASSERT(folio_test_locked(folio));
2522 if (pages_dirty && folio != locked_folio)
2523 ASSERT(folio_test_dirty(folio));
2524
2525 /*
2526 * Set the submission bitmap to submit all sectors.
2527 * extent_writepage_io() will do the truncation correctly.
2528 */
2529 bio_ctrl.submit_bitmap = (unsigned long)-1;
2530 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2531 &bio_ctrl, i_size);
2532 if (ret == 1)
2533 goto next_page;
2534
2535 if (ret)
2536 mapping_set_error(mapping, ret);
2537 btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
2538 if (ret < 0)
2539 found_error = true;
2540 next_page:
2541 folio_put(folio);
2542 cur = cur_end + 1;
2543 }
2544
2545 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2546 }
2547
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2548 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2549 {
2550 struct inode *inode = mapping->host;
2551 int ret = 0;
2552 struct btrfs_bio_ctrl bio_ctrl = {
2553 .wbc = wbc,
2554 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2555 };
2556
2557 /*
2558 * Allow only a single thread to do the reloc work in zoned mode to
2559 * protect the write pointer updates.
2560 */
2561 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2562 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2563 submit_write_bio(&bio_ctrl, ret);
2564 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2565 return ret;
2566 }
2567
btrfs_readahead(struct readahead_control * rac)2568 void btrfs_readahead(struct readahead_control *rac)
2569 {
2570 struct btrfs_bio_ctrl bio_ctrl = {
2571 .opf = REQ_OP_READ | REQ_RAHEAD,
2572 .ractl = rac
2573 };
2574 struct folio *folio;
2575 struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
2576 const u64 start = readahead_pos(rac);
2577 const u64 end = start + readahead_length(rac) - 1;
2578 struct extent_state *cached_state = NULL;
2579 struct extent_map *em_cached = NULL;
2580 u64 prev_em_start = (u64)-1;
2581
2582 lock_extents_for_read(inode, start, end, &cached_state);
2583
2584 while ((folio = readahead_folio(rac)) != NULL)
2585 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2586
2587 btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
2588
2589 if (em_cached)
2590 btrfs_free_extent_map(em_cached);
2591 submit_one_bio(&bio_ctrl);
2592 }
2593
2594 /*
2595 * basic invalidate_folio code, this waits on any locked or writeback
2596 * ranges corresponding to the folio, and then deletes any extent state
2597 * records from the tree
2598 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2599 int extent_invalidate_folio(struct extent_io_tree *tree,
2600 struct folio *folio, size_t offset)
2601 {
2602 struct extent_state *cached_state = NULL;
2603 u64 start = folio_pos(folio);
2604 u64 end = start + folio_size(folio) - 1;
2605 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2606
2607 /* This function is only called for the btree inode */
2608 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2609
2610 start += ALIGN(offset, blocksize);
2611 if (start > end)
2612 return 0;
2613
2614 btrfs_lock_extent(tree, start, end, &cached_state);
2615 folio_wait_writeback(folio);
2616
2617 /*
2618 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2619 * so here we only need to unlock the extent range to free any
2620 * existing extent state.
2621 */
2622 btrfs_unlock_extent(tree, start, end, &cached_state);
2623 return 0;
2624 }
2625
2626 /*
2627 * A helper for struct address_space_operations::release_folio, this tests for
2628 * areas of the folio that are locked or under IO and drops the related state
2629 * bits if it is safe to drop the folio.
2630 */
try_release_extent_state(struct extent_io_tree * tree,struct folio * folio)2631 static bool try_release_extent_state(struct extent_io_tree *tree,
2632 struct folio *folio)
2633 {
2634 struct extent_state *cached_state = NULL;
2635 u64 start = folio_pos(folio);
2636 u64 end = start + folio_size(folio) - 1;
2637 u32 range_bits;
2638 u32 clear_bits;
2639 bool ret = false;
2640 int ret2;
2641
2642 btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state);
2643
2644 /*
2645 * We can release the folio if it's locked only for ordered extent
2646 * completion, since that doesn't require using the folio.
2647 */
2648 if ((range_bits & EXTENT_LOCKED) &&
2649 !(range_bits & EXTENT_FINISHING_ORDERED))
2650 goto out;
2651
2652 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
2653 EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
2654 EXTENT_FINISHING_ORDERED);
2655 /*
2656 * At this point we can safely clear everything except the locked,
2657 * nodatasum, delalloc new and finishing ordered bits. The delalloc new
2658 * bit will be cleared by ordered extent completion.
2659 */
2660 ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state);
2661 /*
2662 * If clear_extent_bit failed for enomem reasons, we can't allow the
2663 * release to continue.
2664 */
2665 if (ret2 == 0)
2666 ret = true;
2667 out:
2668 btrfs_free_extent_state(cached_state);
2669
2670 return ret;
2671 }
2672
2673 /*
2674 * a helper for release_folio. As long as there are no locked extents
2675 * in the range corresponding to the page, both state records and extent
2676 * map records are removed
2677 */
try_release_extent_mapping(struct folio * folio,gfp_t mask)2678 bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2679 {
2680 u64 start = folio_pos(folio);
2681 u64 end = start + folio_size(folio) - 1;
2682 struct btrfs_inode *inode = folio_to_inode(folio);
2683 struct extent_io_tree *io_tree = &inode->io_tree;
2684
2685 while (start <= end) {
2686 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2687 const u64 len = end - start + 1;
2688 struct extent_map_tree *extent_tree = &inode->extent_tree;
2689 struct extent_map *em;
2690
2691 write_lock(&extent_tree->lock);
2692 em = btrfs_lookup_extent_mapping(extent_tree, start, len);
2693 if (!em) {
2694 write_unlock(&extent_tree->lock);
2695 break;
2696 }
2697 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2698 write_unlock(&extent_tree->lock);
2699 btrfs_free_extent_map(em);
2700 break;
2701 }
2702 if (btrfs_test_range_bit_exists(io_tree, em->start,
2703 btrfs_extent_map_end(em) - 1,
2704 EXTENT_LOCKED))
2705 goto next;
2706 /*
2707 * If it's not in the list of modified extents, used by a fast
2708 * fsync, we can remove it. If it's being logged we can safely
2709 * remove it since fsync took an extra reference on the em.
2710 */
2711 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2712 goto remove_em;
2713 /*
2714 * If it's in the list of modified extents, remove it only if
2715 * its generation is older then the current one, in which case
2716 * we don't need it for a fast fsync. Otherwise don't remove it,
2717 * we could be racing with an ongoing fast fsync that could miss
2718 * the new extent.
2719 */
2720 if (em->generation >= cur_gen)
2721 goto next;
2722 remove_em:
2723 /*
2724 * We only remove extent maps that are not in the list of
2725 * modified extents or that are in the list but with a
2726 * generation lower then the current generation, so there is no
2727 * need to set the full fsync flag on the inode (it hurts the
2728 * fsync performance for workloads with a data size that exceeds
2729 * or is close to the system's memory).
2730 */
2731 btrfs_remove_extent_mapping(inode, em);
2732 /* Once for the inode's extent map tree. */
2733 btrfs_free_extent_map(em);
2734 next:
2735 start = btrfs_extent_map_end(em);
2736 write_unlock(&extent_tree->lock);
2737
2738 /* Once for us, for the lookup_extent_mapping() reference. */
2739 btrfs_free_extent_map(em);
2740
2741 if (need_resched()) {
2742 /*
2743 * If we need to resched but we can't block just exit
2744 * and leave any remaining extent maps.
2745 */
2746 if (!gfpflags_allow_blocking(mask))
2747 break;
2748
2749 cond_resched();
2750 }
2751 }
2752 return try_release_extent_state(io_tree, folio);
2753 }
2754
extent_buffer_under_io(const struct extent_buffer * eb)2755 static int extent_buffer_under_io(const struct extent_buffer *eb)
2756 {
2757 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2758 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2759 }
2760
folio_range_has_eb(struct folio * folio)2761 static bool folio_range_has_eb(struct folio *folio)
2762 {
2763 struct btrfs_folio_state *bfs;
2764
2765 lockdep_assert_held(&folio->mapping->i_private_lock);
2766
2767 if (folio_test_private(folio)) {
2768 bfs = folio_get_private(folio);
2769 if (atomic_read(&bfs->eb_refs))
2770 return true;
2771 }
2772 return false;
2773 }
2774
detach_extent_buffer_folio(const struct extent_buffer * eb,struct folio * folio)2775 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2776 {
2777 struct btrfs_fs_info *fs_info = eb->fs_info;
2778 struct address_space *mapping = folio->mapping;
2779 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2780
2781 /*
2782 * For mapped eb, we're going to change the folio private, which should
2783 * be done under the i_private_lock.
2784 */
2785 if (mapped)
2786 spin_lock(&mapping->i_private_lock);
2787
2788 if (!folio_test_private(folio)) {
2789 if (mapped)
2790 spin_unlock(&mapping->i_private_lock);
2791 return;
2792 }
2793
2794 if (!btrfs_meta_is_subpage(fs_info)) {
2795 /*
2796 * We do this since we'll remove the pages after we've removed
2797 * the eb from the xarray, so we could race and have this page
2798 * now attached to the new eb. So only clear folio if it's
2799 * still connected to this eb.
2800 */
2801 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2802 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2803 BUG_ON(folio_test_dirty(folio));
2804 BUG_ON(folio_test_writeback(folio));
2805 /* We need to make sure we haven't be attached to a new eb. */
2806 folio_detach_private(folio);
2807 }
2808 if (mapped)
2809 spin_unlock(&mapping->i_private_lock);
2810 return;
2811 }
2812
2813 /*
2814 * For subpage, we can have dummy eb with folio private attached. In
2815 * this case, we can directly detach the private as such folio is only
2816 * attached to one dummy eb, no sharing.
2817 */
2818 if (!mapped) {
2819 btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
2820 return;
2821 }
2822
2823 btrfs_folio_dec_eb_refs(fs_info, folio);
2824
2825 /*
2826 * We can only detach the folio private if there are no other ebs in the
2827 * page range and no unfinished IO.
2828 */
2829 if (!folio_range_has_eb(folio))
2830 btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA);
2831
2832 spin_unlock(&mapping->i_private_lock);
2833 }
2834
2835 /* Release all folios attached to the extent buffer */
btrfs_release_extent_buffer_folios(const struct extent_buffer * eb)2836 static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
2837 {
2838 ASSERT(!extent_buffer_under_io(eb));
2839
2840 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2841 struct folio *folio = eb->folios[i];
2842
2843 if (!folio)
2844 continue;
2845
2846 detach_extent_buffer_folio(eb, folio);
2847 }
2848 }
2849
2850 /*
2851 * Helper for releasing the extent buffer.
2852 */
btrfs_release_extent_buffer(struct extent_buffer * eb)2853 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2854 {
2855 btrfs_release_extent_buffer_folios(eb);
2856 btrfs_leak_debug_del_eb(eb);
2857 kmem_cache_free(extent_buffer_cache, eb);
2858 }
2859
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2860 static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2861 u64 start)
2862 {
2863 struct extent_buffer *eb = NULL;
2864
2865 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2866 eb->start = start;
2867 eb->len = fs_info->nodesize;
2868 eb->fs_info = fs_info;
2869 init_rwsem(&eb->lock);
2870
2871 btrfs_leak_debug_add_eb(eb);
2872
2873 spin_lock_init(&eb->refs_lock);
2874 refcount_set(&eb->refs, 1);
2875
2876 ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2877
2878 return eb;
2879 }
2880
2881 /*
2882 * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer()
2883 * does not call folio_put(), and we need to set the folios to NULL so that
2884 * btrfs_release_extent_buffer() will not detach them a second time.
2885 */
cleanup_extent_buffer_folios(struct extent_buffer * eb)2886 static void cleanup_extent_buffer_folios(struct extent_buffer *eb)
2887 {
2888 const int num_folios = num_extent_folios(eb);
2889
2890 /* We canont use num_extent_folios() as loop bound as eb->folios changes. */
2891 for (int i = 0; i < num_folios; i++) {
2892 ASSERT(eb->folios[i]);
2893 detach_extent_buffer_folio(eb, eb->folios[i]);
2894 folio_put(eb->folios[i]);
2895 eb->folios[i] = NULL;
2896 }
2897 }
2898
btrfs_clone_extent_buffer(const struct extent_buffer * src)2899 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2900 {
2901 struct extent_buffer *new;
2902 int num_folios;
2903 int ret;
2904
2905 new = __alloc_extent_buffer(src->fs_info, src->start);
2906 if (new == NULL)
2907 return NULL;
2908
2909 /*
2910 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2911 * btrfs_release_extent_buffer() have different behavior for
2912 * UNMAPPED subpage extent buffer.
2913 */
2914 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2915
2916 ret = alloc_eb_folio_array(new, false);
2917 if (ret)
2918 goto release_eb;
2919
2920 ASSERT(num_extent_folios(src) == num_extent_folios(new),
2921 "%d != %d", num_extent_folios(src), num_extent_folios(new));
2922 /* Explicitly use the cached num_extent value from now on. */
2923 num_folios = num_extent_folios(src);
2924 for (int i = 0; i < num_folios; i++) {
2925 struct folio *folio = new->folios[i];
2926
2927 ret = attach_extent_buffer_folio(new, folio, NULL);
2928 if (ret < 0)
2929 goto cleanup_folios;
2930 WARN_ON(folio_test_dirty(folio));
2931 }
2932 for (int i = 0; i < num_folios; i++)
2933 folio_put(new->folios[i]);
2934
2935 copy_extent_buffer_full(new, src);
2936 set_extent_buffer_uptodate(new);
2937
2938 return new;
2939
2940 cleanup_folios:
2941 cleanup_extent_buffer_folios(new);
2942 release_eb:
2943 btrfs_release_extent_buffer(new);
2944 return NULL;
2945 }
2946
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2947 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2948 u64 start)
2949 {
2950 struct extent_buffer *eb;
2951 int ret;
2952
2953 eb = __alloc_extent_buffer(fs_info, start);
2954 if (!eb)
2955 return NULL;
2956
2957 ret = alloc_eb_folio_array(eb, false);
2958 if (ret)
2959 goto release_eb;
2960
2961 for (int i = 0; i < num_extent_folios(eb); i++) {
2962 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2963 if (ret < 0)
2964 goto cleanup_folios;
2965 }
2966 for (int i = 0; i < num_extent_folios(eb); i++)
2967 folio_put(eb->folios[i]);
2968
2969 set_extent_buffer_uptodate(eb);
2970 btrfs_set_header_nritems(eb, 0);
2971 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2972
2973 return eb;
2974
2975 cleanup_folios:
2976 cleanup_extent_buffer_folios(eb);
2977 release_eb:
2978 btrfs_release_extent_buffer(eb);
2979 return NULL;
2980 }
2981
check_buffer_tree_ref(struct extent_buffer * eb)2982 static void check_buffer_tree_ref(struct extent_buffer *eb)
2983 {
2984 int refs;
2985 /*
2986 * The TREE_REF bit is first set when the extent_buffer is added to the
2987 * xarray. It is also reset, if unset, when a new reference is created
2988 * by find_extent_buffer.
2989 *
2990 * It is only cleared in two cases: freeing the last non-tree
2991 * reference to the extent_buffer when its STALE bit is set or
2992 * calling release_folio when the tree reference is the only reference.
2993 *
2994 * In both cases, care is taken to ensure that the extent_buffer's
2995 * pages are not under io. However, release_folio can be concurrently
2996 * called with creating new references, which is prone to race
2997 * conditions between the calls to check_buffer_tree_ref in those
2998 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2999 *
3000 * The actual lifetime of the extent_buffer in the xarray is adequately
3001 * protected by the refcount, but the TREE_REF bit and its corresponding
3002 * reference are not. To protect against this class of races, we call
3003 * check_buffer_tree_ref() from the code paths which trigger io. Note that
3004 * once io is initiated, TREE_REF can no longer be cleared, so that is
3005 * the moment at which any such race is best fixed.
3006 */
3007 refs = refcount_read(&eb->refs);
3008 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3009 return;
3010
3011 spin_lock(&eb->refs_lock);
3012 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3013 refcount_inc(&eb->refs);
3014 spin_unlock(&eb->refs_lock);
3015 }
3016
mark_extent_buffer_accessed(struct extent_buffer * eb)3017 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3018 {
3019 check_buffer_tree_ref(eb);
3020
3021 for (int i = 0; i < num_extent_folios(eb); i++)
3022 folio_mark_accessed(eb->folios[i]);
3023 }
3024
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)3025 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3026 u64 start)
3027 {
3028 struct extent_buffer *eb;
3029
3030 eb = find_extent_buffer_nolock(fs_info, start);
3031 if (!eb)
3032 return NULL;
3033 /*
3034 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3035 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3036 * another task running free_extent_buffer() might have seen that flag
3037 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3038 * writeback flags not set) and it's still in the tree (flag
3039 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3040 * decrementing the extent buffer's reference count twice. So here we
3041 * could race and increment the eb's reference count, clear its stale
3042 * flag, mark it as dirty and drop our reference before the other task
3043 * finishes executing free_extent_buffer, which would later result in
3044 * an attempt to free an extent buffer that is dirty.
3045 */
3046 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3047 spin_lock(&eb->refs_lock);
3048 spin_unlock(&eb->refs_lock);
3049 }
3050 mark_extent_buffer_accessed(eb);
3051 return eb;
3052 }
3053
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)3054 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3055 u64 start)
3056 {
3057 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3058 struct extent_buffer *eb, *exists = NULL;
3059 int ret;
3060
3061 eb = find_extent_buffer(fs_info, start);
3062 if (eb)
3063 return eb;
3064 eb = alloc_dummy_extent_buffer(fs_info, start);
3065 if (!eb)
3066 return ERR_PTR(-ENOMEM);
3067 eb->fs_info = fs_info;
3068 again:
3069 xa_lock_irq(&fs_info->buffer_tree);
3070 exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits,
3071 NULL, eb, GFP_NOFS);
3072 if (xa_is_err(exists)) {
3073 ret = xa_err(exists);
3074 xa_unlock_irq(&fs_info->buffer_tree);
3075 btrfs_release_extent_buffer(eb);
3076 return ERR_PTR(ret);
3077 }
3078 if (exists) {
3079 if (!refcount_inc_not_zero(&exists->refs)) {
3080 /* The extent buffer is being freed, retry. */
3081 xa_unlock_irq(&fs_info->buffer_tree);
3082 goto again;
3083 }
3084 xa_unlock_irq(&fs_info->buffer_tree);
3085 btrfs_release_extent_buffer(eb);
3086 return exists;
3087 }
3088 xa_unlock_irq(&fs_info->buffer_tree);
3089 check_buffer_tree_ref(eb);
3090
3091 return eb;
3092 #else
3093 /* Stub to avoid linker error when compiled with optimizations turned off. */
3094 return NULL;
3095 #endif
3096 }
3097
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct folio * folio)3098 static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
3099 struct folio *folio)
3100 {
3101 struct extent_buffer *exists;
3102
3103 lockdep_assert_held(&folio->mapping->i_private_lock);
3104
3105 /*
3106 * For subpage case, we completely rely on xarray to ensure we don't try
3107 * to insert two ebs for the same bytenr. So here we always return NULL
3108 * and just continue.
3109 */
3110 if (btrfs_meta_is_subpage(fs_info))
3111 return NULL;
3112
3113 /* Page not yet attached to an extent buffer */
3114 if (!folio_test_private(folio))
3115 return NULL;
3116
3117 /*
3118 * We could have already allocated an eb for this folio and attached one
3119 * so lets see if we can get a ref on the existing eb, and if we can we
3120 * know it's good and we can just return that one, else we know we can
3121 * just overwrite folio private.
3122 */
3123 exists = folio_get_private(folio);
3124 if (refcount_inc_not_zero(&exists->refs))
3125 return exists;
3126
3127 WARN_ON(folio_test_dirty(folio));
3128 folio_detach_private(folio);
3129 return NULL;
3130 }
3131
3132 /*
3133 * Validate alignment constraints of eb at logical address @start.
3134 */
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)3135 static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3136 {
3137 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3138 btrfs_err(fs_info, "bad tree block start %llu", start);
3139 return true;
3140 }
3141
3142 if (fs_info->nodesize < PAGE_SIZE && !IS_ALIGNED(start, fs_info->nodesize)) {
3143 btrfs_err(fs_info,
3144 "tree block is not nodesize aligned, start %llu nodesize %u",
3145 start, fs_info->nodesize);
3146 return true;
3147 }
3148 if (fs_info->nodesize >= PAGE_SIZE &&
3149 !PAGE_ALIGNED(start)) {
3150 btrfs_err(fs_info,
3151 "tree block is not page aligned, start %llu nodesize %u",
3152 start, fs_info->nodesize);
3153 return true;
3154 }
3155 if (!IS_ALIGNED(start, fs_info->nodesize) &&
3156 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3157 btrfs_warn(fs_info,
3158 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3159 start, fs_info->nodesize);
3160 }
3161 return false;
3162 }
3163
3164 /*
3165 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3166 * Return >0 if there is already another extent buffer for the range,
3167 * and @found_eb_ret would be updated.
3168 * Return -EAGAIN if the filemap has an existing folio but with different size
3169 * than @eb.
3170 * The caller needs to free the existing folios and retry using the same order.
3171 */
attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_folio_state * prealloc,struct extent_buffer ** found_eb_ret)3172 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3173 struct btrfs_folio_state *prealloc,
3174 struct extent_buffer **found_eb_ret)
3175 {
3176
3177 struct btrfs_fs_info *fs_info = eb->fs_info;
3178 struct address_space *mapping = fs_info->btree_inode->i_mapping;
3179 const pgoff_t index = eb->start >> PAGE_SHIFT;
3180 struct folio *existing_folio;
3181 int ret;
3182
3183 ASSERT(found_eb_ret);
3184
3185 /* Caller should ensure the folio exists. */
3186 ASSERT(eb->folios[i]);
3187
3188 retry:
3189 existing_folio = NULL;
3190 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3191 GFP_NOFS | __GFP_NOFAIL);
3192 if (!ret)
3193 goto finish;
3194
3195 existing_folio = filemap_lock_folio(mapping, index + i);
3196 /* The page cache only exists for a very short time, just retry. */
3197 if (IS_ERR(existing_folio))
3198 goto retry;
3199
3200 /* For now, we should only have single-page folios for btree inode. */
3201 ASSERT(folio_nr_pages(existing_folio) == 1);
3202
3203 if (folio_size(existing_folio) != eb->folio_size) {
3204 folio_unlock(existing_folio);
3205 folio_put(existing_folio);
3206 return -EAGAIN;
3207 }
3208
3209 finish:
3210 spin_lock(&mapping->i_private_lock);
3211 if (existing_folio && btrfs_meta_is_subpage(fs_info)) {
3212 /* We're going to reuse the existing page, can drop our folio now. */
3213 __free_page(folio_page(eb->folios[i], 0));
3214 eb->folios[i] = existing_folio;
3215 } else if (existing_folio) {
3216 struct extent_buffer *existing_eb;
3217
3218 existing_eb = grab_extent_buffer(fs_info, existing_folio);
3219 if (existing_eb) {
3220 /* The extent buffer still exists, we can use it directly. */
3221 *found_eb_ret = existing_eb;
3222 spin_unlock(&mapping->i_private_lock);
3223 folio_unlock(existing_folio);
3224 folio_put(existing_folio);
3225 return 1;
3226 }
3227 /* The extent buffer no longer exists, we can reuse the folio. */
3228 __free_page(folio_page(eb->folios[i], 0));
3229 eb->folios[i] = existing_folio;
3230 }
3231 eb->folio_size = folio_size(eb->folios[i]);
3232 eb->folio_shift = folio_shift(eb->folios[i]);
3233 /* Should not fail, as we have preallocated the memory. */
3234 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3235 ASSERT(!ret);
3236 /*
3237 * To inform we have an extra eb under allocation, so that
3238 * detach_extent_buffer_page() won't release the folio private when the
3239 * eb hasn't been inserted into the xarray yet.
3240 *
3241 * The ref will be decreased when the eb releases the page, in
3242 * detach_extent_buffer_page(). Thus needs no special handling in the
3243 * error path.
3244 */
3245 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3246 spin_unlock(&mapping->i_private_lock);
3247 return 0;
3248 }
3249
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)3250 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3251 u64 start, u64 owner_root, int level)
3252 {
3253 int attached = 0;
3254 struct extent_buffer *eb;
3255 struct extent_buffer *existing_eb = NULL;
3256 struct btrfs_folio_state *prealloc = NULL;
3257 u64 lockdep_owner = owner_root;
3258 bool page_contig = true;
3259 int uptodate = 1;
3260 int ret;
3261
3262 if (check_eb_alignment(fs_info, start))
3263 return ERR_PTR(-EINVAL);
3264
3265 #if BITS_PER_LONG == 32
3266 if (start >= MAX_LFS_FILESIZE) {
3267 btrfs_err_rl(fs_info,
3268 "extent buffer %llu is beyond 32bit page cache limit", start);
3269 btrfs_err_32bit_limit(fs_info);
3270 return ERR_PTR(-EOVERFLOW);
3271 }
3272 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3273 btrfs_warn_32bit_limit(fs_info);
3274 #endif
3275
3276 eb = find_extent_buffer(fs_info, start);
3277 if (eb)
3278 return eb;
3279
3280 eb = __alloc_extent_buffer(fs_info, start);
3281 if (!eb)
3282 return ERR_PTR(-ENOMEM);
3283
3284 /*
3285 * The reloc trees are just snapshots, so we need them to appear to be
3286 * just like any other fs tree WRT lockdep.
3287 */
3288 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3289 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3290
3291 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3292
3293 /*
3294 * Preallocate folio private for subpage case, so that we won't
3295 * allocate memory with i_private_lock nor page lock hold.
3296 *
3297 * The memory will be freed by attach_extent_buffer_page() or freed
3298 * manually if we exit earlier.
3299 */
3300 if (btrfs_meta_is_subpage(fs_info)) {
3301 prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA);
3302 if (IS_ERR(prealloc)) {
3303 ret = PTR_ERR(prealloc);
3304 goto out;
3305 }
3306 }
3307
3308 reallocate:
3309 /* Allocate all pages first. */
3310 ret = alloc_eb_folio_array(eb, true);
3311 if (ret < 0) {
3312 btrfs_free_folio_state(prealloc);
3313 goto out;
3314 }
3315
3316 /* Attach all pages to the filemap. */
3317 for (int i = 0; i < num_extent_folios(eb); i++) {
3318 struct folio *folio;
3319
3320 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3321 if (ret > 0) {
3322 ASSERT(existing_eb);
3323 goto out;
3324 }
3325
3326 /*
3327 * TODO: Special handling for a corner case where the order of
3328 * folios mismatch between the new eb and filemap.
3329 *
3330 * This happens when:
3331 *
3332 * - the new eb is using higher order folio
3333 *
3334 * - the filemap is still using 0-order folios for the range
3335 * This can happen at the previous eb allocation, and we don't
3336 * have higher order folio for the call.
3337 *
3338 * - the existing eb has already been freed
3339 *
3340 * In this case, we have to free the existing folios first, and
3341 * re-allocate using the same order.
3342 * Thankfully this is not going to happen yet, as we're still
3343 * using 0-order folios.
3344 */
3345 if (unlikely(ret == -EAGAIN)) {
3346 DEBUG_WARN("folio order mismatch between new eb and filemap");
3347 goto reallocate;
3348 }
3349 attached++;
3350
3351 /*
3352 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3353 * reliable, as we may choose to reuse the existing page cache
3354 * and free the allocated page.
3355 */
3356 folio = eb->folios[i];
3357 WARN_ON(btrfs_meta_folio_test_dirty(folio, eb));
3358
3359 /*
3360 * Check if the current page is physically contiguous with previous eb
3361 * page.
3362 * At this stage, either we allocated a large folio, thus @i
3363 * would only be 0, or we fall back to per-page allocation.
3364 */
3365 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3366 page_contig = false;
3367
3368 if (!btrfs_meta_folio_test_uptodate(folio, eb))
3369 uptodate = 0;
3370
3371 /*
3372 * We can't unlock the pages just yet since the extent buffer
3373 * hasn't been properly inserted into the xarray, this opens a
3374 * race with btree_release_folio() which can free a page while we
3375 * are still filling in all pages for the buffer and we could crash.
3376 */
3377 }
3378 if (uptodate)
3379 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3380 /* All pages are physically contiguous, can skip cross page handling. */
3381 if (page_contig)
3382 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3383 again:
3384 xa_lock_irq(&fs_info->buffer_tree);
3385 existing_eb = __xa_cmpxchg(&fs_info->buffer_tree,
3386 start >> fs_info->nodesize_bits, NULL, eb,
3387 GFP_NOFS);
3388 if (xa_is_err(existing_eb)) {
3389 ret = xa_err(existing_eb);
3390 xa_unlock_irq(&fs_info->buffer_tree);
3391 goto out;
3392 }
3393 if (existing_eb) {
3394 if (!refcount_inc_not_zero(&existing_eb->refs)) {
3395 xa_unlock_irq(&fs_info->buffer_tree);
3396 goto again;
3397 }
3398 xa_unlock_irq(&fs_info->buffer_tree);
3399 goto out;
3400 }
3401 xa_unlock_irq(&fs_info->buffer_tree);
3402
3403 /* add one reference for the tree */
3404 check_buffer_tree_ref(eb);
3405
3406 /*
3407 * Now it's safe to unlock the pages because any calls to
3408 * btree_release_folio will correctly detect that a page belongs to a
3409 * live buffer and won't free them prematurely.
3410 */
3411 for (int i = 0; i < num_extent_folios(eb); i++) {
3412 folio_unlock(eb->folios[i]);
3413 /*
3414 * A folio that has been added to an address_space mapping
3415 * should not continue holding the refcount from its original
3416 * allocation indefinitely.
3417 */
3418 folio_put(eb->folios[i]);
3419 }
3420 return eb;
3421
3422 out:
3423 WARN_ON(!refcount_dec_and_test(&eb->refs));
3424
3425 /*
3426 * Any attached folios need to be detached before we unlock them. This
3427 * is because when we're inserting our new folios into the mapping, and
3428 * then attaching our eb to that folio. If we fail to insert our folio
3429 * we'll lookup the folio for that index, and grab that EB. We do not
3430 * want that to grab this eb, as we're getting ready to free it. So we
3431 * have to detach it first and then unlock it.
3432 *
3433 * Note: the bounds is num_extent_pages() as we need to go through all slots.
3434 */
3435 for (int i = 0; i < num_extent_pages(eb); i++) {
3436 struct folio *folio = eb->folios[i];
3437
3438 if (i < attached) {
3439 ASSERT(folio);
3440 detach_extent_buffer_folio(eb, folio);
3441 folio_unlock(folio);
3442 } else if (!folio) {
3443 continue;
3444 }
3445
3446 folio_put(folio);
3447 eb->folios[i] = NULL;
3448 }
3449 btrfs_release_extent_buffer(eb);
3450 if (ret < 0)
3451 return ERR_PTR(ret);
3452 ASSERT(existing_eb);
3453 return existing_eb;
3454 }
3455
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3456 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3457 {
3458 struct extent_buffer *eb =
3459 container_of(head, struct extent_buffer, rcu_head);
3460
3461 kmem_cache_free(extent_buffer_cache, eb);
3462 }
3463
release_extent_buffer(struct extent_buffer * eb)3464 static int release_extent_buffer(struct extent_buffer *eb)
3465 __releases(&eb->refs_lock)
3466 {
3467 lockdep_assert_held(&eb->refs_lock);
3468
3469 if (refcount_dec_and_test(&eb->refs)) {
3470 struct btrfs_fs_info *fs_info = eb->fs_info;
3471
3472 spin_unlock(&eb->refs_lock);
3473
3474 /*
3475 * We're erasing, theoretically there will be no allocations, so
3476 * just use GFP_ATOMIC.
3477 *
3478 * We use cmpxchg instead of erase because we do not know if
3479 * this eb is actually in the tree or not, we could be cleaning
3480 * up an eb that we allocated but never inserted into the tree.
3481 * Thus use cmpxchg to remove it from the tree if it is there,
3482 * or leave the other entry if this isn't in the tree.
3483 *
3484 * The documentation says that putting a NULL value is the same
3485 * as erase as long as XA_FLAGS_ALLOC is not set, which it isn't
3486 * in this case.
3487 */
3488 xa_cmpxchg_irq(&fs_info->buffer_tree,
3489 eb->start >> fs_info->nodesize_bits, eb, NULL,
3490 GFP_ATOMIC);
3491
3492 btrfs_leak_debug_del_eb(eb);
3493 /* Should be safe to release folios at this point. */
3494 btrfs_release_extent_buffer_folios(eb);
3495 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3496 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3497 kmem_cache_free(extent_buffer_cache, eb);
3498 return 1;
3499 }
3500 #endif
3501 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3502 return 1;
3503 }
3504 spin_unlock(&eb->refs_lock);
3505
3506 return 0;
3507 }
3508
free_extent_buffer(struct extent_buffer * eb)3509 void free_extent_buffer(struct extent_buffer *eb)
3510 {
3511 int refs;
3512 if (!eb)
3513 return;
3514
3515 refs = refcount_read(&eb->refs);
3516 while (1) {
3517 if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
3518 if (refs == 1)
3519 break;
3520 } else if (refs <= 3) {
3521 break;
3522 }
3523
3524 /* Optimization to avoid locking eb->refs_lock. */
3525 if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
3526 return;
3527 }
3528
3529 spin_lock(&eb->refs_lock);
3530 if (refcount_read(&eb->refs) == 2 &&
3531 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3532 !extent_buffer_under_io(eb) &&
3533 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3534 refcount_dec(&eb->refs);
3535
3536 /*
3537 * I know this is terrible, but it's temporary until we stop tracking
3538 * the uptodate bits and such for the extent buffers.
3539 */
3540 release_extent_buffer(eb);
3541 }
3542
free_extent_buffer_stale(struct extent_buffer * eb)3543 void free_extent_buffer_stale(struct extent_buffer *eb)
3544 {
3545 if (!eb)
3546 return;
3547
3548 spin_lock(&eb->refs_lock);
3549 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3550
3551 if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3552 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3553 refcount_dec(&eb->refs);
3554 release_extent_buffer(eb);
3555 }
3556
btree_clear_folio_dirty_tag(struct folio * folio)3557 static void btree_clear_folio_dirty_tag(struct folio *folio)
3558 {
3559 ASSERT(!folio_test_dirty(folio));
3560 ASSERT(folio_test_locked(folio));
3561 xa_lock_irq(&folio->mapping->i_pages);
3562 if (!folio_test_dirty(folio))
3563 __xa_clear_mark(&folio->mapping->i_pages, folio->index,
3564 PAGECACHE_TAG_DIRTY);
3565 xa_unlock_irq(&folio->mapping->i_pages);
3566 }
3567
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3568 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3569 struct extent_buffer *eb)
3570 {
3571 struct btrfs_fs_info *fs_info = eb->fs_info;
3572
3573 btrfs_assert_tree_write_locked(eb);
3574
3575 if (trans && btrfs_header_generation(eb) != trans->transid)
3576 return;
3577
3578 /*
3579 * Instead of clearing the dirty flag off of the buffer, mark it as
3580 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3581 * write-ordering in zoned mode, without the need to later re-dirty
3582 * the extent_buffer.
3583 *
3584 * The actual zeroout of the buffer will happen later in
3585 * btree_csum_one_bio.
3586 */
3587 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3588 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3589 return;
3590 }
3591
3592 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3593 return;
3594
3595 buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY);
3596 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3597 fs_info->dirty_metadata_batch);
3598
3599 for (int i = 0; i < num_extent_folios(eb); i++) {
3600 struct folio *folio = eb->folios[i];
3601 bool last;
3602
3603 if (!folio_test_dirty(folio))
3604 continue;
3605 folio_lock(folio);
3606 last = btrfs_meta_folio_clear_and_test_dirty(folio, eb);
3607 if (last)
3608 btree_clear_folio_dirty_tag(folio);
3609 folio_unlock(folio);
3610 }
3611 WARN_ON(refcount_read(&eb->refs) == 0);
3612 }
3613
set_extent_buffer_dirty(struct extent_buffer * eb)3614 void set_extent_buffer_dirty(struct extent_buffer *eb)
3615 {
3616 bool was_dirty;
3617
3618 check_buffer_tree_ref(eb);
3619
3620 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3621
3622 WARN_ON(refcount_read(&eb->refs) == 0);
3623 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3624 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3625
3626 if (!was_dirty) {
3627 bool subpage = btrfs_meta_is_subpage(eb->fs_info);
3628
3629 /*
3630 * For subpage case, we can have other extent buffers in the
3631 * same page, and in clear_extent_buffer_dirty() we
3632 * have to clear page dirty without subpage lock held.
3633 * This can cause race where our page gets dirty cleared after
3634 * we just set it.
3635 *
3636 * Thankfully, clear_extent_buffer_dirty() has locked
3637 * its page for other reasons, we can use page lock to prevent
3638 * the above race.
3639 */
3640 if (subpage)
3641 folio_lock(eb->folios[0]);
3642 for (int i = 0; i < num_extent_folios(eb); i++)
3643 btrfs_meta_folio_set_dirty(eb->folios[i], eb);
3644 buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY);
3645 if (subpage)
3646 folio_unlock(eb->folios[0]);
3647 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3648 eb->len,
3649 eb->fs_info->dirty_metadata_batch);
3650 }
3651 #ifdef CONFIG_BTRFS_DEBUG
3652 for (int i = 0; i < num_extent_folios(eb); i++)
3653 ASSERT(folio_test_dirty(eb->folios[i]));
3654 #endif
3655 }
3656
clear_extent_buffer_uptodate(struct extent_buffer * eb)3657 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3658 {
3659
3660 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3661 for (int i = 0; i < num_extent_folios(eb); i++) {
3662 struct folio *folio = eb->folios[i];
3663
3664 if (!folio)
3665 continue;
3666
3667 btrfs_meta_folio_clear_uptodate(folio, eb);
3668 }
3669 }
3670
set_extent_buffer_uptodate(struct extent_buffer * eb)3671 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3672 {
3673
3674 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3675 for (int i = 0; i < num_extent_folios(eb); i++)
3676 btrfs_meta_folio_set_uptodate(eb->folios[i], eb);
3677 }
3678
clear_extent_buffer_reading(struct extent_buffer * eb)3679 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3680 {
3681 clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags);
3682 }
3683
end_bbio_meta_read(struct btrfs_bio * bbio)3684 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3685 {
3686 struct extent_buffer *eb = bbio->private;
3687 bool uptodate = !bbio->bio.bi_status;
3688
3689 /*
3690 * If the extent buffer is marked UPTODATE before the read operation
3691 * completes, other calls to read_extent_buffer_pages() will return
3692 * early without waiting for the read to finish, causing data races.
3693 */
3694 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3695
3696 eb->read_mirror = bbio->mirror_num;
3697
3698 if (uptodate &&
3699 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3700 uptodate = false;
3701
3702 if (uptodate)
3703 set_extent_buffer_uptodate(eb);
3704 else
3705 clear_extent_buffer_uptodate(eb);
3706
3707 clear_extent_buffer_reading(eb);
3708 free_extent_buffer(eb);
3709
3710 bio_put(&bbio->bio);
3711 }
3712
read_extent_buffer_pages_nowait(struct extent_buffer * eb,int mirror_num,const struct btrfs_tree_parent_check * check)3713 int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
3714 const struct btrfs_tree_parent_check *check)
3715 {
3716 struct btrfs_bio *bbio;
3717
3718 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3719 return 0;
3720
3721 /*
3722 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3723 * operation, which could potentially still be in flight. In this case
3724 * we simply want to return an error.
3725 */
3726 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3727 return -EIO;
3728
3729 /* Someone else is already reading the buffer, just wait for it. */
3730 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3731 return 0;
3732
3733 /*
3734 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3735 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3736 * started and finished reading the same eb. In this case, UPTODATE
3737 * will now be set, and we shouldn't read it in again.
3738 */
3739 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3740 clear_extent_buffer_reading(eb);
3741 return 0;
3742 }
3743
3744 eb->read_mirror = 0;
3745 check_buffer_tree_ref(eb);
3746 refcount_inc(&eb->refs);
3747
3748 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3749 REQ_OP_READ | REQ_META, eb->fs_info,
3750 end_bbio_meta_read, eb);
3751 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3752 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3753 bbio->file_offset = eb->start;
3754 memcpy(&bbio->parent_check, check, sizeof(*check));
3755 for (int i = 0; i < num_extent_folios(eb); i++) {
3756 struct folio *folio = eb->folios[i];
3757 u64 range_start = max_t(u64, eb->start, folio_pos(folio));
3758 u32 range_len = min_t(u64, folio_end(folio),
3759 eb->start + eb->len) - range_start;
3760
3761 bio_add_folio_nofail(&bbio->bio, folio, range_len,
3762 offset_in_folio(folio, range_start));
3763 }
3764 btrfs_submit_bbio(bbio, mirror_num);
3765 return 0;
3766 }
3767
read_extent_buffer_pages(struct extent_buffer * eb,int mirror_num,const struct btrfs_tree_parent_check * check)3768 int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
3769 const struct btrfs_tree_parent_check *check)
3770 {
3771 int ret;
3772
3773 ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
3774 if (ret < 0)
3775 return ret;
3776
3777 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3778 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3779 return -EIO;
3780 return 0;
3781 }
3782
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3783 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3784 unsigned long len)
3785 {
3786 btrfs_warn(eb->fs_info,
3787 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3788 eb->start, eb->len, start, len);
3789 DEBUG_WARN();
3790
3791 return true;
3792 }
3793
3794 /*
3795 * Check if the [start, start + len) range is valid before reading/writing
3796 * the eb.
3797 * NOTE: @start and @len are offset inside the eb, not logical address.
3798 *
3799 * Caller should not touch the dst/src memory if this function returns error.
3800 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3801 static inline int check_eb_range(const struct extent_buffer *eb,
3802 unsigned long start, unsigned long len)
3803 {
3804 unsigned long offset;
3805
3806 /* start, start + len should not go beyond eb->len nor overflow */
3807 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3808 return report_eb_range(eb, start, len);
3809
3810 return false;
3811 }
3812
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3813 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3814 unsigned long start, unsigned long len)
3815 {
3816 const int unit_size = eb->folio_size;
3817 size_t cur;
3818 size_t offset;
3819 char *dst = (char *)dstv;
3820 unsigned long i = get_eb_folio_index(eb, start);
3821
3822 if (check_eb_range(eb, start, len)) {
3823 /*
3824 * Invalid range hit, reset the memory, so callers won't get
3825 * some random garbage for their uninitialized memory.
3826 */
3827 memset(dstv, 0, len);
3828 return;
3829 }
3830
3831 if (eb->addr) {
3832 memcpy(dstv, eb->addr + start, len);
3833 return;
3834 }
3835
3836 offset = get_eb_offset_in_folio(eb, start);
3837
3838 while (len > 0) {
3839 char *kaddr;
3840
3841 cur = min(len, unit_size - offset);
3842 kaddr = folio_address(eb->folios[i]);
3843 memcpy(dst, kaddr + offset, cur);
3844
3845 dst += cur;
3846 len -= cur;
3847 offset = 0;
3848 i++;
3849 }
3850 }
3851
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)3852 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3853 void __user *dstv,
3854 unsigned long start, unsigned long len)
3855 {
3856 const int unit_size = eb->folio_size;
3857 size_t cur;
3858 size_t offset;
3859 char __user *dst = (char __user *)dstv;
3860 unsigned long i = get_eb_folio_index(eb, start);
3861 int ret = 0;
3862
3863 WARN_ON(start > eb->len);
3864 WARN_ON(start + len > eb->start + eb->len);
3865
3866 if (eb->addr) {
3867 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3868 ret = -EFAULT;
3869 return ret;
3870 }
3871
3872 offset = get_eb_offset_in_folio(eb, start);
3873
3874 while (len > 0) {
3875 char *kaddr;
3876
3877 cur = min(len, unit_size - offset);
3878 kaddr = folio_address(eb->folios[i]);
3879 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3880 ret = -EFAULT;
3881 break;
3882 }
3883
3884 dst += cur;
3885 len -= cur;
3886 offset = 0;
3887 i++;
3888 }
3889
3890 return ret;
3891 }
3892
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3893 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3894 unsigned long start, unsigned long len)
3895 {
3896 const int unit_size = eb->folio_size;
3897 size_t cur;
3898 size_t offset;
3899 char *kaddr;
3900 char *ptr = (char *)ptrv;
3901 unsigned long i = get_eb_folio_index(eb, start);
3902 int ret = 0;
3903
3904 if (check_eb_range(eb, start, len))
3905 return -EINVAL;
3906
3907 if (eb->addr)
3908 return memcmp(ptrv, eb->addr + start, len);
3909
3910 offset = get_eb_offset_in_folio(eb, start);
3911
3912 while (len > 0) {
3913 cur = min(len, unit_size - offset);
3914 kaddr = folio_address(eb->folios[i]);
3915 ret = memcmp(ptr, kaddr + offset, cur);
3916 if (ret)
3917 break;
3918
3919 ptr += cur;
3920 len -= cur;
3921 offset = 0;
3922 i++;
3923 }
3924 return ret;
3925 }
3926
3927 /*
3928 * Check that the extent buffer is uptodate.
3929 *
3930 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3931 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3932 */
assert_eb_folio_uptodate(const struct extent_buffer * eb,int i)3933 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3934 {
3935 struct btrfs_fs_info *fs_info = eb->fs_info;
3936 struct folio *folio = eb->folios[i];
3937
3938 ASSERT(folio);
3939
3940 /*
3941 * If we are using the commit root we could potentially clear a page
3942 * Uptodate while we're using the extent buffer that we've previously
3943 * looked up. We don't want to complain in this case, as the page was
3944 * valid before, we just didn't write it out. Instead we want to catch
3945 * the case where we didn't actually read the block properly, which
3946 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3947 */
3948 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3949 return;
3950
3951 if (btrfs_meta_is_subpage(fs_info)) {
3952 folio = eb->folios[0];
3953 ASSERT(i == 0);
3954 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3955 eb->start, eb->len)))
3956 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3957 } else {
3958 WARN_ON(!folio_test_uptodate(folio));
3959 }
3960 }
3961
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)3962 static void __write_extent_buffer(const struct extent_buffer *eb,
3963 const void *srcv, unsigned long start,
3964 unsigned long len, bool use_memmove)
3965 {
3966 const int unit_size = eb->folio_size;
3967 size_t cur;
3968 size_t offset;
3969 char *kaddr;
3970 const char *src = (const char *)srcv;
3971 unsigned long i = get_eb_folio_index(eb, start);
3972 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3973 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3974
3975 if (check_eb_range(eb, start, len))
3976 return;
3977
3978 if (eb->addr) {
3979 if (use_memmove)
3980 memmove(eb->addr + start, srcv, len);
3981 else
3982 memcpy(eb->addr + start, srcv, len);
3983 return;
3984 }
3985
3986 offset = get_eb_offset_in_folio(eb, start);
3987
3988 while (len > 0) {
3989 if (check_uptodate)
3990 assert_eb_folio_uptodate(eb, i);
3991
3992 cur = min(len, unit_size - offset);
3993 kaddr = folio_address(eb->folios[i]);
3994 if (use_memmove)
3995 memmove(kaddr + offset, src, cur);
3996 else
3997 memcpy(kaddr + offset, src, cur);
3998
3999 src += cur;
4000 len -= cur;
4001 offset = 0;
4002 i++;
4003 }
4004 }
4005
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)4006 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4007 unsigned long start, unsigned long len)
4008 {
4009 return __write_extent_buffer(eb, srcv, start, len, false);
4010 }
4011
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)4012 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4013 unsigned long start, unsigned long len)
4014 {
4015 const int unit_size = eb->folio_size;
4016 unsigned long cur = start;
4017
4018 if (eb->addr) {
4019 memset(eb->addr + start, c, len);
4020 return;
4021 }
4022
4023 while (cur < start + len) {
4024 unsigned long index = get_eb_folio_index(eb, cur);
4025 unsigned int offset = get_eb_offset_in_folio(eb, cur);
4026 unsigned int cur_len = min(start + len - cur, unit_size - offset);
4027
4028 assert_eb_folio_uptodate(eb, index);
4029 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4030
4031 cur += cur_len;
4032 }
4033 }
4034
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)4035 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4036 unsigned long len)
4037 {
4038 if (check_eb_range(eb, start, len))
4039 return;
4040 return memset_extent_buffer(eb, 0, start, len);
4041 }
4042
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)4043 void copy_extent_buffer_full(const struct extent_buffer *dst,
4044 const struct extent_buffer *src)
4045 {
4046 const int unit_size = src->folio_size;
4047 unsigned long cur = 0;
4048
4049 ASSERT(dst->len == src->len);
4050
4051 while (cur < src->len) {
4052 unsigned long index = get_eb_folio_index(src, cur);
4053 unsigned long offset = get_eb_offset_in_folio(src, cur);
4054 unsigned long cur_len = min(src->len, unit_size - offset);
4055 void *addr = folio_address(src->folios[index]) + offset;
4056
4057 write_extent_buffer(dst, addr, cur, cur_len);
4058
4059 cur += cur_len;
4060 }
4061 }
4062
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4063 void copy_extent_buffer(const struct extent_buffer *dst,
4064 const struct extent_buffer *src,
4065 unsigned long dst_offset, unsigned long src_offset,
4066 unsigned long len)
4067 {
4068 const int unit_size = dst->folio_size;
4069 u64 dst_len = dst->len;
4070 size_t cur;
4071 size_t offset;
4072 char *kaddr;
4073 unsigned long i = get_eb_folio_index(dst, dst_offset);
4074
4075 if (check_eb_range(dst, dst_offset, len) ||
4076 check_eb_range(src, src_offset, len))
4077 return;
4078
4079 WARN_ON(src->len != dst_len);
4080
4081 offset = get_eb_offset_in_folio(dst, dst_offset);
4082
4083 while (len > 0) {
4084 assert_eb_folio_uptodate(dst, i);
4085
4086 cur = min(len, (unsigned long)(unit_size - offset));
4087
4088 kaddr = folio_address(dst->folios[i]);
4089 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4090
4091 src_offset += cur;
4092 len -= cur;
4093 offset = 0;
4094 i++;
4095 }
4096 }
4097
4098 /*
4099 * Calculate the folio and offset of the byte containing the given bit number.
4100 *
4101 * @eb: the extent buffer
4102 * @start: offset of the bitmap item in the extent buffer
4103 * @nr: bit number
4104 * @folio_index: return index of the folio in the extent buffer that contains
4105 * the given bit number
4106 * @folio_offset: return offset into the folio given by folio_index
4107 *
4108 * This helper hides the ugliness of finding the byte in an extent buffer which
4109 * contains a given bit.
4110 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * folio_index,size_t * folio_offset)4111 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4112 unsigned long start, unsigned long nr,
4113 unsigned long *folio_index,
4114 size_t *folio_offset)
4115 {
4116 size_t byte_offset = BIT_BYTE(nr);
4117 size_t offset;
4118
4119 /*
4120 * The byte we want is the offset of the extent buffer + the offset of
4121 * the bitmap item in the extent buffer + the offset of the byte in the
4122 * bitmap item.
4123 */
4124 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
4125
4126 *folio_index = offset >> eb->folio_shift;
4127 *folio_offset = offset_in_eb_folio(eb, offset);
4128 }
4129
4130 /*
4131 * Determine whether a bit in a bitmap item is set.
4132 *
4133 * @eb: the extent buffer
4134 * @start: offset of the bitmap item in the extent buffer
4135 * @nr: bit number to test
4136 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)4137 bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4138 unsigned long nr)
4139 {
4140 unsigned long i;
4141 size_t offset;
4142 u8 *kaddr;
4143
4144 eb_bitmap_offset(eb, start, nr, &i, &offset);
4145 assert_eb_folio_uptodate(eb, i);
4146 kaddr = folio_address(eb->folios[i]);
4147 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4148 }
4149
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)4150 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4151 {
4152 unsigned long index = get_eb_folio_index(eb, bytenr);
4153
4154 if (check_eb_range(eb, bytenr, 1))
4155 return NULL;
4156 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4157 }
4158
4159 /*
4160 * Set an area of a bitmap to 1.
4161 *
4162 * @eb: the extent buffer
4163 * @start: offset of the bitmap item in the extent buffer
4164 * @pos: bit number of the first bit
4165 * @len: number of bits to set
4166 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4167 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4168 unsigned long pos, unsigned long len)
4169 {
4170 unsigned int first_byte = start + BIT_BYTE(pos);
4171 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4172 const bool same_byte = (first_byte == last_byte);
4173 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4174 u8 *kaddr;
4175
4176 if (same_byte)
4177 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4178
4179 /* Handle the first byte. */
4180 kaddr = extent_buffer_get_byte(eb, first_byte);
4181 *kaddr |= mask;
4182 if (same_byte)
4183 return;
4184
4185 /* Handle the byte aligned part. */
4186 ASSERT(first_byte + 1 <= last_byte);
4187 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4188
4189 /* Handle the last byte. */
4190 kaddr = extent_buffer_get_byte(eb, last_byte);
4191 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4192 }
4193
4194
4195 /*
4196 * Clear an area of a bitmap.
4197 *
4198 * @eb: the extent buffer
4199 * @start: offset of the bitmap item in the extent buffer
4200 * @pos: bit number of the first bit
4201 * @len: number of bits to clear
4202 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4203 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4204 unsigned long start, unsigned long pos,
4205 unsigned long len)
4206 {
4207 unsigned int first_byte = start + BIT_BYTE(pos);
4208 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4209 const bool same_byte = (first_byte == last_byte);
4210 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4211 u8 *kaddr;
4212
4213 if (same_byte)
4214 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4215
4216 /* Handle the first byte. */
4217 kaddr = extent_buffer_get_byte(eb, first_byte);
4218 *kaddr &= ~mask;
4219 if (same_byte)
4220 return;
4221
4222 /* Handle the byte aligned part. */
4223 ASSERT(first_byte + 1 <= last_byte);
4224 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4225
4226 /* Handle the last byte. */
4227 kaddr = extent_buffer_get_byte(eb, last_byte);
4228 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4229 }
4230
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)4231 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4232 {
4233 unsigned long distance = (src > dst) ? src - dst : dst - src;
4234 return distance < len;
4235 }
4236
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4237 void memcpy_extent_buffer(const struct extent_buffer *dst,
4238 unsigned long dst_offset, unsigned long src_offset,
4239 unsigned long len)
4240 {
4241 const int unit_size = dst->folio_size;
4242 unsigned long cur_off = 0;
4243
4244 if (check_eb_range(dst, dst_offset, len) ||
4245 check_eb_range(dst, src_offset, len))
4246 return;
4247
4248 if (dst->addr) {
4249 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4250
4251 if (use_memmove)
4252 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4253 else
4254 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4255 return;
4256 }
4257
4258 while (cur_off < len) {
4259 unsigned long cur_src = cur_off + src_offset;
4260 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4261 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4262 unsigned long cur_len = min(src_offset + len - cur_src,
4263 unit_size - folio_off);
4264 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4265 const bool use_memmove = areas_overlap(src_offset + cur_off,
4266 dst_offset + cur_off, cur_len);
4267
4268 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4269 use_memmove);
4270 cur_off += cur_len;
4271 }
4272 }
4273
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4274 void memmove_extent_buffer(const struct extent_buffer *dst,
4275 unsigned long dst_offset, unsigned long src_offset,
4276 unsigned long len)
4277 {
4278 unsigned long dst_end = dst_offset + len - 1;
4279 unsigned long src_end = src_offset + len - 1;
4280
4281 if (check_eb_range(dst, dst_offset, len) ||
4282 check_eb_range(dst, src_offset, len))
4283 return;
4284
4285 if (dst_offset < src_offset) {
4286 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4287 return;
4288 }
4289
4290 if (dst->addr) {
4291 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4292 return;
4293 }
4294
4295 while (len > 0) {
4296 unsigned long src_i;
4297 size_t cur;
4298 size_t dst_off_in_folio;
4299 size_t src_off_in_folio;
4300 void *src_addr;
4301 bool use_memmove;
4302
4303 src_i = get_eb_folio_index(dst, src_end);
4304
4305 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4306 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4307
4308 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4309 cur = min(cur, dst_off_in_folio + 1);
4310
4311 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4312 cur + 1;
4313 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4314 cur);
4315
4316 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4317 use_memmove);
4318
4319 dst_end -= cur;
4320 src_end -= cur;
4321 len -= cur;
4322 }
4323 }
4324
try_release_subpage_extent_buffer(struct folio * folio)4325 static int try_release_subpage_extent_buffer(struct folio *folio)
4326 {
4327 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4328 struct extent_buffer *eb;
4329 unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits);
4330 unsigned long index = start;
4331 unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1;
4332 int ret;
4333
4334 xa_lock_irq(&fs_info->buffer_tree);
4335 xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) {
4336 /*
4337 * The same as try_release_extent_buffer(), to ensure the eb
4338 * won't disappear out from under us.
4339 */
4340 spin_lock(&eb->refs_lock);
4341 if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4342 spin_unlock(&eb->refs_lock);
4343 continue;
4344 }
4345
4346 /*
4347 * If tree ref isn't set then we know the ref on this eb is a
4348 * real ref, so just return, this eb will likely be freed soon
4349 * anyway.
4350 */
4351 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4352 spin_unlock(&eb->refs_lock);
4353 break;
4354 }
4355
4356 /*
4357 * Here we don't care about the return value, we will always
4358 * check the folio private at the end. And
4359 * release_extent_buffer() will release the refs_lock.
4360 */
4361 xa_unlock_irq(&fs_info->buffer_tree);
4362 release_extent_buffer(eb);
4363 xa_lock_irq(&fs_info->buffer_tree);
4364 }
4365 xa_unlock_irq(&fs_info->buffer_tree);
4366
4367 /*
4368 * Finally to check if we have cleared folio private, as if we have
4369 * released all ebs in the page, the folio private should be cleared now.
4370 */
4371 spin_lock(&folio->mapping->i_private_lock);
4372 if (!folio_test_private(folio))
4373 ret = 1;
4374 else
4375 ret = 0;
4376 spin_unlock(&folio->mapping->i_private_lock);
4377 return ret;
4378
4379 }
4380
try_release_extent_buffer(struct folio * folio)4381 int try_release_extent_buffer(struct folio *folio)
4382 {
4383 struct extent_buffer *eb;
4384
4385 if (btrfs_meta_is_subpage(folio_to_fs_info(folio)))
4386 return try_release_subpage_extent_buffer(folio);
4387
4388 /*
4389 * We need to make sure nobody is changing folio private, as we rely on
4390 * folio private as the pointer to extent buffer.
4391 */
4392 spin_lock(&folio->mapping->i_private_lock);
4393 if (!folio_test_private(folio)) {
4394 spin_unlock(&folio->mapping->i_private_lock);
4395 return 1;
4396 }
4397
4398 eb = folio_get_private(folio);
4399 BUG_ON(!eb);
4400
4401 /*
4402 * This is a little awful but should be ok, we need to make sure that
4403 * the eb doesn't disappear out from under us while we're looking at
4404 * this page.
4405 */
4406 spin_lock(&eb->refs_lock);
4407 if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4408 spin_unlock(&eb->refs_lock);
4409 spin_unlock(&folio->mapping->i_private_lock);
4410 return 0;
4411 }
4412 spin_unlock(&folio->mapping->i_private_lock);
4413
4414 /*
4415 * If tree ref isn't set then we know the ref on this eb is a real ref,
4416 * so just return, this page will likely be freed soon anyway.
4417 */
4418 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4419 spin_unlock(&eb->refs_lock);
4420 return 0;
4421 }
4422
4423 return release_extent_buffer(eb);
4424 }
4425
4426 /*
4427 * Attempt to readahead a child block.
4428 *
4429 * @fs_info: the fs_info
4430 * @bytenr: bytenr to read
4431 * @owner_root: objectid of the root that owns this eb
4432 * @gen: generation for the uptodate check, can be 0
4433 * @level: level for the eb
4434 *
4435 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4436 * normal uptodate check of the eb, without checking the generation. If we have
4437 * to read the block we will not block on anything.
4438 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4439 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4440 u64 bytenr, u64 owner_root, u64 gen, int level)
4441 {
4442 struct btrfs_tree_parent_check check = {
4443 .level = level,
4444 .transid = gen
4445 };
4446 struct extent_buffer *eb;
4447 int ret;
4448
4449 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4450 if (IS_ERR(eb))
4451 return;
4452
4453 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4454 free_extent_buffer(eb);
4455 return;
4456 }
4457
4458 ret = read_extent_buffer_pages_nowait(eb, 0, &check);
4459 if (ret < 0)
4460 free_extent_buffer_stale(eb);
4461 else
4462 free_extent_buffer(eb);
4463 }
4464
4465 /*
4466 * Readahead a node's child block.
4467 *
4468 * @node: parent node we're reading from
4469 * @slot: slot in the parent node for the child we want to read
4470 *
4471 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4472 * the slot in the node provided.
4473 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4474 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4475 {
4476 btrfs_readahead_tree_block(node->fs_info,
4477 btrfs_node_blockptr(node, slot),
4478 btrfs_header_owner(node),
4479 btrfs_node_ptr_generation(node, slot),
4480 btrfs_header_level(node) - 1);
4481 }
4482