1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "bio.h"
23 #include "locking.h"
24 #include "backref.h"
25 #include "disk-io.h"
26 #include "subpage.h"
27 #include "zoned.h"
28 #include "block-group.h"
29 #include "compression.h"
30 #include "fs.h"
31 #include "accessors.h"
32 #include "file-item.h"
33 #include "file.h"
34 #include "dev-replace.h"
35 #include "super.h"
36 #include "transaction.h"
37
38 static struct kmem_cache *extent_buffer_cache;
39
40 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42 {
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49 }
50
btrfs_leak_debug_del_eb(struct extent_buffer * eb)51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52 {
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59 }
60
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62 {
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87 }
88 #else
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
91 #endif
92
93 /*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97 struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 enum btrfs_compression_type compress_type;
100 u32 len_to_oe_boundary;
101 blk_opf_t opf;
102 btrfs_bio_end_io_t end_io_func;
103 struct writeback_control *wbc;
104
105 /*
106 * The sectors of the page which are going to be submitted by
107 * extent_writepage_io().
108 * This is to avoid touching ranges covered by compression/inline.
109 */
110 unsigned long submit_bitmap;
111 };
112
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)113 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
114 {
115 struct btrfs_bio *bbio = bio_ctrl->bbio;
116
117 if (!bbio)
118 return;
119
120 /* Caller should ensure the bio has at least some range added */
121 ASSERT(bbio->bio.bi_iter.bi_size);
122
123 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
124 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
125 btrfs_submit_compressed_read(bbio);
126 else
127 btrfs_submit_bbio(bbio, 0);
128
129 /* The bbio is owned by the end_io handler now */
130 bio_ctrl->bbio = NULL;
131 }
132
133 /*
134 * Submit or fail the current bio in the bio_ctrl structure.
135 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)136 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
137 {
138 struct btrfs_bio *bbio = bio_ctrl->bbio;
139
140 if (!bbio)
141 return;
142
143 if (ret) {
144 ASSERT(ret < 0);
145 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
146 /* The bio is owned by the end_io handler now */
147 bio_ctrl->bbio = NULL;
148 } else {
149 submit_one_bio(bio_ctrl);
150 }
151 }
152
extent_buffer_init_cachep(void)153 int __init extent_buffer_init_cachep(void)
154 {
155 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
156 sizeof(struct extent_buffer), 0, 0,
157 NULL);
158 if (!extent_buffer_cache)
159 return -ENOMEM;
160
161 return 0;
162 }
163
extent_buffer_free_cachep(void)164 void __cold extent_buffer_free_cachep(void)
165 {
166 /*
167 * Make sure all delayed rcu free are flushed before we
168 * destroy caches.
169 */
170 rcu_barrier();
171 kmem_cache_destroy(extent_buffer_cache);
172 }
173
process_one_folio(struct btrfs_fs_info * fs_info,struct folio * folio,const struct folio * locked_folio,unsigned long page_ops,u64 start,u64 end)174 static void process_one_folio(struct btrfs_fs_info *fs_info,
175 struct folio *folio, const struct folio *locked_folio,
176 unsigned long page_ops, u64 start, u64 end)
177 {
178 u32 len;
179
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
181 len = end + 1 - start;
182
183 if (page_ops & PAGE_SET_ORDERED)
184 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
185 if (page_ops & PAGE_START_WRITEBACK) {
186 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
187 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
188 }
189 if (page_ops & PAGE_END_WRITEBACK)
190 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
191
192 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
193 btrfs_folio_end_lock(fs_info, folio, start, len);
194 }
195
__process_folios_contig(struct address_space * mapping,const struct folio * locked_folio,u64 start,u64 end,unsigned long page_ops)196 static void __process_folios_contig(struct address_space *mapping,
197 const struct folio *locked_folio, u64 start,
198 u64 end, unsigned long page_ops)
199 {
200 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
201 pgoff_t start_index = start >> PAGE_SHIFT;
202 pgoff_t end_index = end >> PAGE_SHIFT;
203 pgoff_t index = start_index;
204 struct folio_batch fbatch;
205 int i;
206
207 folio_batch_init(&fbatch);
208 while (index <= end_index) {
209 int found_folios;
210
211 found_folios = filemap_get_folios_contig(mapping, &index,
212 end_index, &fbatch);
213 for (i = 0; i < found_folios; i++) {
214 struct folio *folio = fbatch.folios[i];
215
216 process_one_folio(fs_info, folio, locked_folio,
217 page_ops, start, end);
218 }
219 folio_batch_release(&fbatch);
220 cond_resched();
221 }
222 }
223
__unlock_for_delalloc(const struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)224 static noinline void __unlock_for_delalloc(const struct inode *inode,
225 const struct folio *locked_folio,
226 u64 start, u64 end)
227 {
228 unsigned long index = start >> PAGE_SHIFT;
229 unsigned long end_index = end >> PAGE_SHIFT;
230
231 ASSERT(locked_folio);
232 if (index == locked_folio->index && end_index == index)
233 return;
234
235 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
236 PAGE_UNLOCK);
237 }
238
lock_delalloc_folios(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)239 static noinline int lock_delalloc_folios(struct inode *inode,
240 const struct folio *locked_folio,
241 u64 start, u64 end)
242 {
243 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
244 struct address_space *mapping = inode->i_mapping;
245 pgoff_t start_index = start >> PAGE_SHIFT;
246 pgoff_t end_index = end >> PAGE_SHIFT;
247 pgoff_t index = start_index;
248 u64 processed_end = start;
249 struct folio_batch fbatch;
250
251 if (index == locked_folio->index && index == end_index)
252 return 0;
253
254 folio_batch_init(&fbatch);
255 while (index <= end_index) {
256 unsigned int found_folios, i;
257
258 found_folios = filemap_get_folios_contig(mapping, &index,
259 end_index, &fbatch);
260 if (found_folios == 0)
261 goto out;
262
263 for (i = 0; i < found_folios; i++) {
264 struct folio *folio = fbatch.folios[i];
265 u64 range_start;
266 u32 range_len;
267
268 if (folio == locked_folio)
269 continue;
270
271 folio_lock(folio);
272 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
273 folio_unlock(folio);
274 goto out;
275 }
276 range_start = max_t(u64, folio_pos(folio), start);
277 range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
278 end + 1) - range_start;
279 btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
280
281 processed_end = range_start + range_len - 1;
282 }
283 folio_batch_release(&fbatch);
284 cond_resched();
285 }
286
287 return 0;
288 out:
289 folio_batch_release(&fbatch);
290 if (processed_end > start)
291 __unlock_for_delalloc(inode, locked_folio, start,
292 processed_end);
293 return -EAGAIN;
294 }
295
296 /*
297 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
298 * more than @max_bytes.
299 *
300 * @start: The original start bytenr to search.
301 * Will store the extent range start bytenr.
302 * @end: The original end bytenr of the search range
303 * Will store the extent range end bytenr.
304 *
305 * Return true if we find a delalloc range which starts inside the original
306 * range, and @start/@end will store the delalloc range start/end.
307 *
308 * Return false if we can't find any delalloc range which starts inside the
309 * original range, and @start/@end will be the non-delalloc range start/end.
310 */
311 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct folio * locked_folio,u64 * start,u64 * end)312 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
313 struct folio *locked_folio,
314 u64 *start, u64 *end)
315 {
316 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
317 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
318 const u64 orig_start = *start;
319 const u64 orig_end = *end;
320 /* The sanity tests may not set a valid fs_info. */
321 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
322 u64 delalloc_start;
323 u64 delalloc_end;
324 bool found;
325 struct extent_state *cached_state = NULL;
326 int ret;
327 int loops = 0;
328
329 /* Caller should pass a valid @end to indicate the search range end */
330 ASSERT(orig_end > orig_start);
331
332 /* The range should at least cover part of the folio */
333 ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
334 orig_end <= folio_pos(locked_folio)));
335 again:
336 /* step one, find a bunch of delalloc bytes starting at start */
337 delalloc_start = *start;
338 delalloc_end = 0;
339 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
340 max_bytes, &cached_state);
341 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
342 *start = delalloc_start;
343
344 /* @delalloc_end can be -1, never go beyond @orig_end */
345 *end = min(delalloc_end, orig_end);
346 free_extent_state(cached_state);
347 return false;
348 }
349
350 /*
351 * start comes from the offset of locked_folio. We have to lock
352 * folios in order, so we can't process delalloc bytes before
353 * locked_folio
354 */
355 if (delalloc_start < *start)
356 delalloc_start = *start;
357
358 /*
359 * make sure to limit the number of folios we try to lock down
360 */
361 if (delalloc_end + 1 - delalloc_start > max_bytes)
362 delalloc_end = delalloc_start + max_bytes - 1;
363
364 /* step two, lock all the folioss after the folios that has start */
365 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
366 delalloc_end);
367 ASSERT(!ret || ret == -EAGAIN);
368 if (ret == -EAGAIN) {
369 /* some of the folios are gone, lets avoid looping by
370 * shortening the size of the delalloc range we're searching
371 */
372 free_extent_state(cached_state);
373 cached_state = NULL;
374 if (!loops) {
375 max_bytes = PAGE_SIZE;
376 loops = 1;
377 goto again;
378 } else {
379 found = false;
380 goto out_failed;
381 }
382 }
383
384 /* step three, lock the state bits for the whole range */
385 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
386
387 /* then test to make sure it is all still delalloc */
388 ret = test_range_bit(tree, delalloc_start, delalloc_end,
389 EXTENT_DELALLOC, cached_state);
390
391 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
392 if (!ret) {
393 __unlock_for_delalloc(inode, locked_folio, delalloc_start,
394 delalloc_end);
395 cond_resched();
396 goto again;
397 }
398 *start = delalloc_start;
399 *end = delalloc_end;
400 out_failed:
401 return found;
402 }
403
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,const struct folio * locked_folio,struct extent_state ** cached,u32 clear_bits,unsigned long page_ops)404 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
405 const struct folio *locked_folio,
406 struct extent_state **cached,
407 u32 clear_bits, unsigned long page_ops)
408 {
409 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
410
411 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
412 end, page_ops);
413 }
414
btrfs_verify_folio(struct folio * folio,u64 start,u32 len)415 static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
416 {
417 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
418
419 if (!fsverity_active(folio->mapping->host) ||
420 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
421 start >= i_size_read(folio->mapping->host))
422 return true;
423 return fsverity_verify_folio(folio);
424 }
425
end_folio_read(struct folio * folio,bool uptodate,u64 start,u32 len)426 static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
427 {
428 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
429
430 ASSERT(folio_pos(folio) <= start &&
431 start + len <= folio_pos(folio) + PAGE_SIZE);
432
433 if (uptodate && btrfs_verify_folio(folio, start, len))
434 btrfs_folio_set_uptodate(fs_info, folio, start, len);
435 else
436 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
437
438 if (!btrfs_is_subpage(fs_info, folio->mapping))
439 folio_unlock(folio);
440 else
441 btrfs_folio_end_lock(fs_info, folio, start, len);
442 }
443
444 /*
445 * After a write IO is done, we need to:
446 *
447 * - clear the uptodate bits on error
448 * - clear the writeback bits in the extent tree for the range
449 * - filio_end_writeback() if there is no more pending io for the folio
450 *
451 * Scheduling is not allowed, so the extent state tree is expected
452 * to have one and only one object corresponding to this IO.
453 */
end_bbio_data_write(struct btrfs_bio * bbio)454 static void end_bbio_data_write(struct btrfs_bio *bbio)
455 {
456 struct btrfs_fs_info *fs_info = bbio->fs_info;
457 struct bio *bio = &bbio->bio;
458 int error = blk_status_to_errno(bio->bi_status);
459 struct folio_iter fi;
460 const u32 sectorsize = fs_info->sectorsize;
461
462 ASSERT(!bio_flagged(bio, BIO_CLONED));
463 bio_for_each_folio_all(fi, bio) {
464 struct folio *folio = fi.folio;
465 u64 start = folio_pos(folio) + fi.offset;
466 u32 len = fi.length;
467
468 /* Only order 0 (single page) folios are allowed for data. */
469 ASSERT(folio_order(folio) == 0);
470
471 /* Our read/write should always be sector aligned. */
472 if (!IS_ALIGNED(fi.offset, sectorsize))
473 btrfs_err(fs_info,
474 "partial page write in btrfs with offset %zu and length %zu",
475 fi.offset, fi.length);
476 else if (!IS_ALIGNED(fi.length, sectorsize))
477 btrfs_info(fs_info,
478 "incomplete page write with offset %zu and length %zu",
479 fi.offset, fi.length);
480
481 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
482 !error);
483 if (error)
484 mapping_set_error(folio->mapping, error);
485 btrfs_folio_clear_writeback(fs_info, folio, start, len);
486 }
487
488 bio_put(bio);
489 }
490
begin_folio_read(struct btrfs_fs_info * fs_info,struct folio * folio)491 static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
492 {
493 ASSERT(folio_test_locked(folio));
494 if (!btrfs_is_subpage(fs_info, folio->mapping))
495 return;
496
497 ASSERT(folio_test_private(folio));
498 btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
499 }
500
501 /*
502 * After a data read IO is done, we need to:
503 *
504 * - clear the uptodate bits on error
505 * - set the uptodate bits if things worked
506 * - set the folio up to date if all extents in the tree are uptodate
507 * - clear the lock bit in the extent tree
508 * - unlock the folio if there are no other extents locked for it
509 *
510 * Scheduling is not allowed, so the extent state tree is expected
511 * to have one and only one object corresponding to this IO.
512 */
end_bbio_data_read(struct btrfs_bio * bbio)513 static void end_bbio_data_read(struct btrfs_bio *bbio)
514 {
515 struct btrfs_fs_info *fs_info = bbio->fs_info;
516 struct bio *bio = &bbio->bio;
517 struct folio_iter fi;
518 const u32 sectorsize = fs_info->sectorsize;
519
520 ASSERT(!bio_flagged(bio, BIO_CLONED));
521 bio_for_each_folio_all(fi, &bbio->bio) {
522 bool uptodate = !bio->bi_status;
523 struct folio *folio = fi.folio;
524 struct inode *inode = folio->mapping->host;
525 u64 start;
526 u64 end;
527 u32 len;
528
529 /* For now only order 0 folios are supported for data. */
530 ASSERT(folio_order(folio) == 0);
531 btrfs_debug(fs_info,
532 "%s: bi_sector=%llu, err=%d, mirror=%u",
533 __func__, bio->bi_iter.bi_sector, bio->bi_status,
534 bbio->mirror_num);
535
536 /*
537 * We always issue full-sector reads, but if some block in a
538 * folio fails to read, blk_update_request() will advance
539 * bv_offset and adjust bv_len to compensate. Print a warning
540 * for unaligned offsets, and an error if they don't add up to
541 * a full sector.
542 */
543 if (!IS_ALIGNED(fi.offset, sectorsize))
544 btrfs_err(fs_info,
545 "partial page read in btrfs with offset %zu and length %zu",
546 fi.offset, fi.length);
547 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
548 btrfs_info(fs_info,
549 "incomplete page read with offset %zu and length %zu",
550 fi.offset, fi.length);
551
552 start = folio_pos(folio) + fi.offset;
553 end = start + fi.length - 1;
554 len = fi.length;
555
556 if (likely(uptodate)) {
557 loff_t i_size = i_size_read(inode);
558 pgoff_t end_index = i_size >> folio_shift(folio);
559
560 /*
561 * Zero out the remaining part if this range straddles
562 * i_size.
563 *
564 * Here we should only zero the range inside the folio,
565 * not touch anything else.
566 *
567 * NOTE: i_size is exclusive while end is inclusive.
568 */
569 if (folio_index(folio) == end_index && i_size <= end) {
570 u32 zero_start = max(offset_in_folio(folio, i_size),
571 offset_in_folio(folio, start));
572 u32 zero_len = offset_in_folio(folio, end) + 1 -
573 zero_start;
574
575 folio_zero_range(folio, zero_start, zero_len);
576 }
577 }
578
579 /* Update page status and unlock. */
580 end_folio_read(folio, uptodate, start, len);
581 }
582 bio_put(bio);
583 }
584
585 /*
586 * Populate every free slot in a provided array with folios using GFP_NOFS.
587 *
588 * @nr_folios: number of folios to allocate
589 * @folio_array: the array to fill with folios; any existing non-NULL entries in
590 * the array will be skipped
591 *
592 * Return: 0 if all folios were able to be allocated;
593 * -ENOMEM otherwise, the partially allocated folios would be freed and
594 * the array slots zeroed
595 */
btrfs_alloc_folio_array(unsigned int nr_folios,struct folio ** folio_array)596 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
597 {
598 for (int i = 0; i < nr_folios; i++) {
599 if (folio_array[i])
600 continue;
601 folio_array[i] = folio_alloc(GFP_NOFS, 0);
602 if (!folio_array[i])
603 goto error;
604 }
605 return 0;
606 error:
607 for (int i = 0; i < nr_folios; i++) {
608 if (folio_array[i])
609 folio_put(folio_array[i]);
610 }
611 return -ENOMEM;
612 }
613
614 /*
615 * Populate every free slot in a provided array with pages, using GFP_NOFS.
616 *
617 * @nr_pages: number of pages to allocate
618 * @page_array: the array to fill with pages; any existing non-null entries in
619 * the array will be skipped
620 * @nofail: whether using __GFP_NOFAIL flag
621 *
622 * Return: 0 if all pages were able to be allocated;
623 * -ENOMEM otherwise, the partially allocated pages would be freed and
624 * the array slots zeroed
625 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array,bool nofail)626 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
627 bool nofail)
628 {
629 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
630 unsigned int allocated;
631
632 for (allocated = 0; allocated < nr_pages;) {
633 unsigned int last = allocated;
634
635 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
636 if (unlikely(allocated == last)) {
637 /* No progress, fail and do cleanup. */
638 for (int i = 0; i < allocated; i++) {
639 __free_page(page_array[i]);
640 page_array[i] = NULL;
641 }
642 return -ENOMEM;
643 }
644 }
645 return 0;
646 }
647
648 /*
649 * Populate needed folios for the extent buffer.
650 *
651 * For now, the folios populated are always in order 0 (aka, single page).
652 */
alloc_eb_folio_array(struct extent_buffer * eb,bool nofail)653 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
654 {
655 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
656 int num_pages = num_extent_pages(eb);
657 int ret;
658
659 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
660 if (ret < 0)
661 return ret;
662
663 for (int i = 0; i < num_pages; i++)
664 eb->folios[i] = page_folio(page_array[i]);
665 eb->folio_size = PAGE_SIZE;
666 eb->folio_shift = PAGE_SHIFT;
667 return 0;
668 }
669
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,struct folio * folio,u64 disk_bytenr,unsigned int pg_offset)670 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
671 struct folio *folio, u64 disk_bytenr,
672 unsigned int pg_offset)
673 {
674 struct bio *bio = &bio_ctrl->bbio->bio;
675 struct bio_vec *bvec = bio_last_bvec_all(bio);
676 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
677 struct folio *bv_folio = page_folio(bvec->bv_page);
678
679 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
680 /*
681 * For compression, all IO should have its logical bytenr set
682 * to the starting bytenr of the compressed extent.
683 */
684 return bio->bi_iter.bi_sector == sector;
685 }
686
687 /*
688 * The contig check requires the following conditions to be met:
689 *
690 * 1) The folios are belonging to the same inode
691 * This is implied by the call chain.
692 *
693 * 2) The range has adjacent logical bytenr
694 *
695 * 3) The range has adjacent file offset
696 * This is required for the usage of btrfs_bio->file_offset.
697 */
698 return bio_end_sector(bio) == sector &&
699 folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
700 folio_pos(folio) + pg_offset;
701 }
702
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)703 static void alloc_new_bio(struct btrfs_inode *inode,
704 struct btrfs_bio_ctrl *bio_ctrl,
705 u64 disk_bytenr, u64 file_offset)
706 {
707 struct btrfs_fs_info *fs_info = inode->root->fs_info;
708 struct btrfs_bio *bbio;
709
710 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
711 bio_ctrl->end_io_func, NULL);
712 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
713 bbio->inode = inode;
714 bbio->file_offset = file_offset;
715 bio_ctrl->bbio = bbio;
716 bio_ctrl->len_to_oe_boundary = U32_MAX;
717
718 /* Limit data write bios to the ordered boundary. */
719 if (bio_ctrl->wbc) {
720 struct btrfs_ordered_extent *ordered;
721
722 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
723 if (ordered) {
724 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
725 ordered->file_offset +
726 ordered->disk_num_bytes - file_offset);
727 bbio->ordered = ordered;
728 }
729
730 /*
731 * Pick the last added device to support cgroup writeback. For
732 * multi-device file systems this means blk-cgroup policies have
733 * to always be set on the last added/replaced device.
734 * This is a bit odd but has been like that for a long time.
735 */
736 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
737 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
738 }
739 }
740
741 /*
742 * @disk_bytenr: logical bytenr where the write will be
743 * @page: page to add to the bio
744 * @size: portion of page that we want to write to
745 * @pg_offset: offset of the new bio or to check whether we are adding
746 * a contiguous page to the previous one
747 *
748 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
749 * new one in @bio_ctrl->bbio.
750 * The mirror number for this IO should already be initizlied in
751 * @bio_ctrl->mirror_num.
752 */
submit_extent_folio(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct folio * folio,size_t size,unsigned long pg_offset)753 static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
754 u64 disk_bytenr, struct folio *folio,
755 size_t size, unsigned long pg_offset)
756 {
757 struct btrfs_inode *inode = folio_to_inode(folio);
758
759 ASSERT(pg_offset + size <= PAGE_SIZE);
760 ASSERT(bio_ctrl->end_io_func);
761
762 if (bio_ctrl->bbio &&
763 !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
764 submit_one_bio(bio_ctrl);
765
766 do {
767 u32 len = size;
768
769 /* Allocate new bio if needed */
770 if (!bio_ctrl->bbio) {
771 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
772 folio_pos(folio) + pg_offset);
773 }
774
775 /* Cap to the current ordered extent boundary if there is one. */
776 if (len > bio_ctrl->len_to_oe_boundary) {
777 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
778 ASSERT(is_data_inode(inode));
779 len = bio_ctrl->len_to_oe_boundary;
780 }
781
782 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
783 /* bio full: move on to a new one */
784 submit_one_bio(bio_ctrl);
785 continue;
786 }
787
788 if (bio_ctrl->wbc)
789 wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
790 len);
791
792 size -= len;
793 pg_offset += len;
794 disk_bytenr += len;
795
796 /*
797 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
798 * sector aligned. alloc_new_bio() then sets it to the end of
799 * our ordered extent for writes into zoned devices.
800 *
801 * When len_to_oe_boundary is tracking an ordered extent, we
802 * trust the ordered extent code to align things properly, and
803 * the check above to cap our write to the ordered extent
804 * boundary is correct.
805 *
806 * When len_to_oe_boundary is U32_MAX, the cap above would
807 * result in a 4095 byte IO for the last folio right before
808 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
809 * the checks required to make sure we don't overflow the bio,
810 * and we should just ignore len_to_oe_boundary completely
811 * unless we're using it to track an ordered extent.
812 *
813 * It's pretty hard to make a bio sized U32_MAX, but it can
814 * happen when the page cache is able to feed us contiguous
815 * folios for large extents.
816 */
817 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
818 bio_ctrl->len_to_oe_boundary -= len;
819
820 /* Ordered extent boundary: move on to a new bio. */
821 if (bio_ctrl->len_to_oe_boundary == 0)
822 submit_one_bio(bio_ctrl);
823 } while (size);
824 }
825
attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_subpage * prealloc)826 static int attach_extent_buffer_folio(struct extent_buffer *eb,
827 struct folio *folio,
828 struct btrfs_subpage *prealloc)
829 {
830 struct btrfs_fs_info *fs_info = eb->fs_info;
831 int ret = 0;
832
833 /*
834 * If the page is mapped to btree inode, we should hold the private
835 * lock to prevent race.
836 * For cloned or dummy extent buffers, their pages are not mapped and
837 * will not race with any other ebs.
838 */
839 if (folio->mapping)
840 lockdep_assert_held(&folio->mapping->i_private_lock);
841
842 if (fs_info->nodesize >= PAGE_SIZE) {
843 if (!folio_test_private(folio))
844 folio_attach_private(folio, eb);
845 else
846 WARN_ON(folio_get_private(folio) != eb);
847 return 0;
848 }
849
850 /* Already mapped, just free prealloc */
851 if (folio_test_private(folio)) {
852 btrfs_free_subpage(prealloc);
853 return 0;
854 }
855
856 if (prealloc)
857 /* Has preallocated memory for subpage */
858 folio_attach_private(folio, prealloc);
859 else
860 /* Do new allocation to attach subpage */
861 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
862 return ret;
863 }
864
set_page_extent_mapped(struct page * page)865 int set_page_extent_mapped(struct page *page)
866 {
867 return set_folio_extent_mapped(page_folio(page));
868 }
869
set_folio_extent_mapped(struct folio * folio)870 int set_folio_extent_mapped(struct folio *folio)
871 {
872 struct btrfs_fs_info *fs_info;
873
874 ASSERT(folio->mapping);
875
876 if (folio_test_private(folio))
877 return 0;
878
879 fs_info = folio_to_fs_info(folio);
880
881 if (btrfs_is_subpage(fs_info, folio->mapping))
882 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
883
884 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
885 return 0;
886 }
887
clear_folio_extent_mapped(struct folio * folio)888 void clear_folio_extent_mapped(struct folio *folio)
889 {
890 struct btrfs_fs_info *fs_info;
891
892 ASSERT(folio->mapping);
893
894 if (!folio_test_private(folio))
895 return;
896
897 fs_info = folio_to_fs_info(folio);
898 if (btrfs_is_subpage(fs_info, folio->mapping))
899 return btrfs_detach_subpage(fs_info, folio);
900
901 folio_detach_private(folio);
902 }
903
__get_extent_map(struct inode * inode,struct folio * folio,u64 start,u64 len,struct extent_map ** em_cached)904 static struct extent_map *__get_extent_map(struct inode *inode,
905 struct folio *folio, u64 start,
906 u64 len, struct extent_map **em_cached)
907 {
908 struct extent_map *em;
909 struct extent_state *cached_state = NULL;
910
911 ASSERT(em_cached);
912
913 if (*em_cached) {
914 em = *em_cached;
915 if (extent_map_in_tree(em) && start >= em->start &&
916 start < extent_map_end(em)) {
917 refcount_inc(&em->refs);
918 return em;
919 }
920
921 free_extent_map(em);
922 *em_cached = NULL;
923 }
924
925 btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
926 em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
927 if (!IS_ERR(em)) {
928 BUG_ON(*em_cached);
929 refcount_inc(&em->refs);
930 *em_cached = em;
931 }
932 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
933
934 return em;
935 }
936 /*
937 * basic readpage implementation. Locked extent state structs are inserted
938 * into the tree that are removed when the IO is done (by the end_io
939 * handlers)
940 * XXX JDM: This needs looking at to ensure proper page locking
941 * return 0 on success, otherwise return error
942 */
btrfs_do_readpage(struct folio * folio,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)943 static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
944 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
945 {
946 struct inode *inode = folio->mapping->host;
947 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
948 u64 start = folio_pos(folio);
949 const u64 end = start + PAGE_SIZE - 1;
950 u64 cur = start;
951 u64 extent_offset;
952 u64 last_byte = i_size_read(inode);
953 u64 block_start;
954 struct extent_map *em;
955 int ret = 0;
956 size_t pg_offset = 0;
957 size_t iosize;
958 size_t blocksize = fs_info->sectorsize;
959
960 ret = set_folio_extent_mapped(folio);
961 if (ret < 0) {
962 folio_unlock(folio);
963 return ret;
964 }
965
966 if (folio->index == last_byte >> folio_shift(folio)) {
967 size_t zero_offset = offset_in_folio(folio, last_byte);
968
969 if (zero_offset) {
970 iosize = folio_size(folio) - zero_offset;
971 folio_zero_range(folio, zero_offset, iosize);
972 }
973 }
974 bio_ctrl->end_io_func = end_bbio_data_read;
975 begin_folio_read(fs_info, folio);
976 while (cur <= end) {
977 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
978 bool force_bio_submit = false;
979 u64 disk_bytenr;
980
981 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
982 if (cur >= last_byte) {
983 iosize = folio_size(folio) - pg_offset;
984 folio_zero_range(folio, pg_offset, iosize);
985 end_folio_read(folio, true, cur, iosize);
986 break;
987 }
988 em = __get_extent_map(inode, folio, cur, end - cur + 1,
989 em_cached);
990 if (IS_ERR(em)) {
991 end_folio_read(folio, false, cur, end + 1 - cur);
992 return PTR_ERR(em);
993 }
994 extent_offset = cur - em->start;
995 BUG_ON(extent_map_end(em) <= cur);
996 BUG_ON(end < cur);
997
998 compress_type = extent_map_compression(em);
999
1000 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1001 iosize = ALIGN(iosize, blocksize);
1002 if (compress_type != BTRFS_COMPRESS_NONE)
1003 disk_bytenr = em->disk_bytenr;
1004 else
1005 disk_bytenr = extent_map_block_start(em) + extent_offset;
1006 block_start = extent_map_block_start(em);
1007 if (em->flags & EXTENT_FLAG_PREALLOC)
1008 block_start = EXTENT_MAP_HOLE;
1009
1010 /*
1011 * If we have a file range that points to a compressed extent
1012 * and it's followed by a consecutive file range that points
1013 * to the same compressed extent (possibly with a different
1014 * offset and/or length, so it either points to the whole extent
1015 * or only part of it), we must make sure we do not submit a
1016 * single bio to populate the folios for the 2 ranges because
1017 * this makes the compressed extent read zero out the folios
1018 * belonging to the 2nd range. Imagine the following scenario:
1019 *
1020 * File layout
1021 * [0 - 8K] [8K - 24K]
1022 * | |
1023 * | |
1024 * points to extent X, points to extent X,
1025 * offset 4K, length of 8K offset 0, length 16K
1026 *
1027 * [extent X, compressed length = 4K uncompressed length = 16K]
1028 *
1029 * If the bio to read the compressed extent covers both ranges,
1030 * it will decompress extent X into the folios belonging to the
1031 * first range and then it will stop, zeroing out the remaining
1032 * folios that belong to the other range that points to extent X.
1033 * So here we make sure we submit 2 bios, one for the first
1034 * range and another one for the third range. Both will target
1035 * the same physical extent from disk, but we can't currently
1036 * make the compressed bio endio callback populate the folios
1037 * for both ranges because each compressed bio is tightly
1038 * coupled with a single extent map, and each range can have
1039 * an extent map with a different offset value relative to the
1040 * uncompressed data of our extent and different lengths. This
1041 * is a corner case so we prioritize correctness over
1042 * non-optimal behavior (submitting 2 bios for the same extent).
1043 */
1044 if (compress_type != BTRFS_COMPRESS_NONE &&
1045 prev_em_start && *prev_em_start != (u64)-1 &&
1046 *prev_em_start != em->start)
1047 force_bio_submit = true;
1048
1049 if (prev_em_start)
1050 *prev_em_start = em->start;
1051
1052 free_extent_map(em);
1053 em = NULL;
1054
1055 /* we've found a hole, just zero and go on */
1056 if (block_start == EXTENT_MAP_HOLE) {
1057 folio_zero_range(folio, pg_offset, iosize);
1058
1059 end_folio_read(folio, true, cur, iosize);
1060 cur = cur + iosize;
1061 pg_offset += iosize;
1062 continue;
1063 }
1064 /* the get_extent function already copied into the folio */
1065 if (block_start == EXTENT_MAP_INLINE) {
1066 end_folio_read(folio, true, cur, iosize);
1067 cur = cur + iosize;
1068 pg_offset += iosize;
1069 continue;
1070 }
1071
1072 if (bio_ctrl->compress_type != compress_type) {
1073 submit_one_bio(bio_ctrl);
1074 bio_ctrl->compress_type = compress_type;
1075 }
1076
1077 if (force_bio_submit)
1078 submit_one_bio(bio_ctrl);
1079 submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1080 pg_offset);
1081 cur = cur + iosize;
1082 pg_offset += iosize;
1083 }
1084
1085 return 0;
1086 }
1087
btrfs_read_folio(struct file * file,struct folio * folio)1088 int btrfs_read_folio(struct file *file, struct folio *folio)
1089 {
1090 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1091 struct extent_map *em_cached = NULL;
1092 int ret;
1093
1094 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1095 free_extent_map(em_cached);
1096
1097 /*
1098 * If btrfs_do_readpage() failed we will want to submit the assembled
1099 * bio to do the cleanup.
1100 */
1101 submit_one_bio(&bio_ctrl);
1102 return ret;
1103 }
1104
set_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u32 len)1105 static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1106 u64 start, u32 len)
1107 {
1108 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1109 const u64 folio_start = folio_pos(folio);
1110 unsigned int start_bit;
1111 unsigned int nbits;
1112
1113 ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
1114 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1115 nbits = len >> fs_info->sectorsize_bits;
1116 ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1117 bitmap_set(delalloc_bitmap, start_bit, nbits);
1118 }
1119
find_next_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u64 * found_start,u32 * found_len)1120 static bool find_next_delalloc_bitmap(struct folio *folio,
1121 unsigned long *delalloc_bitmap, u64 start,
1122 u64 *found_start, u32 *found_len)
1123 {
1124 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1125 const u64 folio_start = folio_pos(folio);
1126 const unsigned int bitmap_size = fs_info->sectors_per_page;
1127 unsigned int start_bit;
1128 unsigned int first_zero;
1129 unsigned int first_set;
1130
1131 ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
1132
1133 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1134 first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1135 if (first_set >= bitmap_size)
1136 return false;
1137
1138 *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1139 first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1140 *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1141 return true;
1142 }
1143
1144 /*
1145 * helper for extent_writepage(), doing all of the delayed allocation setup.
1146 *
1147 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1148 * to write the page (copy into inline extent). In this case the IO has
1149 * been started and the page is already unlocked.
1150 *
1151 * This returns 0 if all went well (page still locked)
1152 * This returns < 0 if there were errors (page still locked)
1153 */
writepage_delalloc(struct btrfs_inode * inode,struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1154 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1155 struct folio *folio,
1156 struct btrfs_bio_ctrl *bio_ctrl)
1157 {
1158 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1159 struct writeback_control *wbc = bio_ctrl->wbc;
1160 const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1161 const u64 page_start = folio_pos(folio);
1162 const u64 page_end = page_start + folio_size(folio) - 1;
1163 unsigned long delalloc_bitmap = 0;
1164 /*
1165 * Save the last found delalloc end. As the delalloc end can go beyond
1166 * page boundary, thus we cannot rely on subpage bitmap to locate the
1167 * last delalloc end.
1168 */
1169 u64 last_delalloc_end = 0;
1170 u64 delalloc_start = page_start;
1171 u64 delalloc_end = page_end;
1172 u64 delalloc_to_write = 0;
1173 int ret = 0;
1174 int bit;
1175
1176 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1177 if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1178 ASSERT(fs_info->sectors_per_page > 1);
1179 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1180 } else {
1181 bio_ctrl->submit_bitmap = 1;
1182 }
1183
1184 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1185 u64 start = page_start + (bit << fs_info->sectorsize_bits);
1186
1187 btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1188 }
1189
1190 /* Lock all (subpage) delalloc ranges inside the folio first. */
1191 while (delalloc_start < page_end) {
1192 delalloc_end = page_end;
1193 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1194 &delalloc_start, &delalloc_end)) {
1195 delalloc_start = delalloc_end + 1;
1196 continue;
1197 }
1198 set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1199 min(delalloc_end, page_end) + 1 - delalloc_start);
1200 last_delalloc_end = delalloc_end;
1201 delalloc_start = delalloc_end + 1;
1202 }
1203 delalloc_start = page_start;
1204
1205 if (!last_delalloc_end)
1206 goto out;
1207
1208 /* Run the delalloc ranges for the above locked ranges. */
1209 while (delalloc_start < page_end) {
1210 u64 found_start;
1211 u32 found_len;
1212 bool found;
1213
1214 if (!is_subpage) {
1215 /*
1216 * For non-subpage case, the found delalloc range must
1217 * cover this folio and there must be only one locked
1218 * delalloc range.
1219 */
1220 found_start = page_start;
1221 found_len = last_delalloc_end + 1 - found_start;
1222 found = true;
1223 } else {
1224 found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1225 delalloc_start, &found_start, &found_len);
1226 }
1227 if (!found)
1228 break;
1229 /*
1230 * The subpage range covers the last sector, the delalloc range may
1231 * end beyond the folio boundary, use the saved delalloc_end
1232 * instead.
1233 */
1234 if (found_start + found_len >= page_end)
1235 found_len = last_delalloc_end + 1 - found_start;
1236
1237 if (ret >= 0) {
1238 /* No errors hit so far, run the current delalloc range. */
1239 ret = btrfs_run_delalloc_range(inode, folio,
1240 found_start,
1241 found_start + found_len - 1,
1242 wbc);
1243 } else {
1244 /*
1245 * We've hit an error during previous delalloc range,
1246 * have to cleanup the remaining locked ranges.
1247 */
1248 unlock_extent(&inode->io_tree, found_start,
1249 found_start + found_len - 1, NULL);
1250 __unlock_for_delalloc(&inode->vfs_inode, folio,
1251 found_start,
1252 found_start + found_len - 1);
1253 }
1254
1255 /*
1256 * We have some ranges that's going to be submitted asynchronously
1257 * (compression or inline). These range have their own control
1258 * on when to unlock the pages. We should not touch them
1259 * anymore, so clear the range from the submission bitmap.
1260 */
1261 if (ret > 0) {
1262 unsigned int start_bit = (found_start - page_start) >>
1263 fs_info->sectorsize_bits;
1264 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1265 page_start) >> fs_info->sectorsize_bits;
1266 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1267 }
1268 /*
1269 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1270 * thus for the last range, we cannot touch the folio anymore.
1271 */
1272 if (found_start + found_len >= last_delalloc_end + 1)
1273 break;
1274
1275 delalloc_start = found_start + found_len;
1276 }
1277 if (ret < 0)
1278 return ret;
1279 out:
1280 if (last_delalloc_end)
1281 delalloc_end = last_delalloc_end;
1282 else
1283 delalloc_end = page_end;
1284 /*
1285 * delalloc_end is already one less than the total length, so
1286 * we don't subtract one from PAGE_SIZE
1287 */
1288 delalloc_to_write +=
1289 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1290
1291 /*
1292 * If all ranges are submitted asynchronously, we just need to account
1293 * for them here.
1294 */
1295 if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1296 wbc->nr_to_write -= delalloc_to_write;
1297 return 1;
1298 }
1299
1300 if (wbc->nr_to_write < delalloc_to_write) {
1301 int thresh = 8192;
1302
1303 if (delalloc_to_write < thresh * 2)
1304 thresh = delalloc_to_write;
1305 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1306 thresh);
1307 }
1308
1309 return 0;
1310 }
1311
1312 /*
1313 * Return 0 if we have submitted or queued the sector for submission.
1314 * Return <0 for critical errors.
1315 *
1316 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1317 */
submit_one_sector(struct btrfs_inode * inode,struct folio * folio,u64 filepos,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1318 static int submit_one_sector(struct btrfs_inode *inode,
1319 struct folio *folio,
1320 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1321 loff_t i_size)
1322 {
1323 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1324 struct extent_map *em;
1325 u64 block_start;
1326 u64 disk_bytenr;
1327 u64 extent_offset;
1328 u64 em_end;
1329 const u32 sectorsize = fs_info->sectorsize;
1330
1331 ASSERT(IS_ALIGNED(filepos, sectorsize));
1332
1333 /* @filepos >= i_size case should be handled by the caller. */
1334 ASSERT(filepos < i_size);
1335
1336 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1337 if (IS_ERR(em))
1338 return PTR_ERR_OR_ZERO(em);
1339
1340 extent_offset = filepos - em->start;
1341 em_end = extent_map_end(em);
1342 ASSERT(filepos <= em_end);
1343 ASSERT(IS_ALIGNED(em->start, sectorsize));
1344 ASSERT(IS_ALIGNED(em->len, sectorsize));
1345
1346 block_start = extent_map_block_start(em);
1347 disk_bytenr = extent_map_block_start(em) + extent_offset;
1348
1349 ASSERT(!extent_map_is_compressed(em));
1350 ASSERT(block_start != EXTENT_MAP_HOLE);
1351 ASSERT(block_start != EXTENT_MAP_INLINE);
1352
1353 free_extent_map(em);
1354 em = NULL;
1355
1356 /*
1357 * Although the PageDirty bit is cleared before entering this
1358 * function, subpage dirty bit is not cleared.
1359 * So clear subpage dirty bit here so next time we won't submit
1360 * a folio for a range already written to disk.
1361 */
1362 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1363 btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1364 /*
1365 * Above call should set the whole folio with writeback flag, even
1366 * just for a single subpage sector.
1367 * As long as the folio is properly locked and the range is correct,
1368 * we should always get the folio with writeback flag.
1369 */
1370 ASSERT(folio_test_writeback(folio));
1371
1372 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1373 sectorsize, filepos - folio_pos(folio));
1374 return 0;
1375 }
1376
1377 /*
1378 * Helper for extent_writepage(). This calls the writepage start hooks,
1379 * and does the loop to map the page into extents and bios.
1380 *
1381 * We return 1 if the IO is started and the page is unlocked,
1382 * 0 if all went well (page still locked)
1383 * < 0 if there were errors (page still locked)
1384 */
extent_writepage_io(struct btrfs_inode * inode,struct folio * folio,u64 start,u32 len,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1385 static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1386 struct folio *folio,
1387 u64 start, u32 len,
1388 struct btrfs_bio_ctrl *bio_ctrl,
1389 loff_t i_size)
1390 {
1391 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1392 unsigned long range_bitmap = 0;
1393 bool submitted_io = false;
1394 const u64 folio_start = folio_pos(folio);
1395 u64 cur;
1396 int bit;
1397 int ret = 0;
1398
1399 ASSERT(start >= folio_start &&
1400 start + len <= folio_start + folio_size(folio));
1401
1402 ret = btrfs_writepage_cow_fixup(folio);
1403 if (ret) {
1404 /* Fixup worker will requeue */
1405 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1406 folio_unlock(folio);
1407 return 1;
1408 }
1409
1410 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1411 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1412 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1413 fs_info->sectors_per_page);
1414
1415 bio_ctrl->end_io_func = end_bbio_data_write;
1416
1417 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1418 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1419
1420 if (cur >= i_size) {
1421 btrfs_mark_ordered_io_finished(inode, folio, cur,
1422 start + len - cur, true);
1423 /*
1424 * This range is beyond i_size, thus we don't need to
1425 * bother writing back.
1426 * But we still need to clear the dirty subpage bit, or
1427 * the next time the folio gets dirtied, we will try to
1428 * writeback the sectors with subpage dirty bits,
1429 * causing writeback without ordered extent.
1430 */
1431 btrfs_folio_clear_dirty(fs_info, folio, cur,
1432 start + len - cur);
1433 break;
1434 }
1435 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1436 if (ret < 0)
1437 goto out;
1438 submitted_io = true;
1439 }
1440 out:
1441 /*
1442 * If we didn't submitted any sector (>= i_size), folio dirty get
1443 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1444 * by folio_start_writeback() if the folio is not dirty).
1445 *
1446 * Here we set writeback and clear for the range. If the full folio
1447 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1448 */
1449 if (!submitted_io) {
1450 btrfs_folio_set_writeback(fs_info, folio, start, len);
1451 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1452 }
1453 return ret;
1454 }
1455
1456 /*
1457 * the writepage semantics are similar to regular writepage. extent
1458 * records are inserted to lock ranges in the tree, and as dirty areas
1459 * are found, they are marked writeback. Then the lock bits are removed
1460 * and the end_io handler clears the writeback ranges
1461 *
1462 * Return 0 if everything goes well.
1463 * Return <0 for error.
1464 */
extent_writepage(struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1465 static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1466 {
1467 struct inode *inode = folio->mapping->host;
1468 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1469 const u64 page_start = folio_pos(folio);
1470 int ret;
1471 size_t pg_offset;
1472 loff_t i_size = i_size_read(inode);
1473 unsigned long end_index = i_size >> PAGE_SHIFT;
1474
1475 trace_extent_writepage(folio, inode, bio_ctrl->wbc);
1476
1477 WARN_ON(!folio_test_locked(folio));
1478
1479 pg_offset = offset_in_folio(folio, i_size);
1480 if (folio->index > end_index ||
1481 (folio->index == end_index && !pg_offset)) {
1482 folio_invalidate(folio, 0, folio_size(folio));
1483 folio_unlock(folio);
1484 return 0;
1485 }
1486
1487 if (folio->index == end_index)
1488 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1489
1490 /*
1491 * Default to unlock the whole folio.
1492 * The proper bitmap can only be initialized until writepage_delalloc().
1493 */
1494 bio_ctrl->submit_bitmap = (unsigned long)-1;
1495 ret = set_folio_extent_mapped(folio);
1496 if (ret < 0)
1497 goto done;
1498
1499 ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
1500 if (ret == 1)
1501 return 0;
1502 if (ret)
1503 goto done;
1504
1505 ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
1506 PAGE_SIZE, bio_ctrl, i_size);
1507 if (ret == 1)
1508 return 0;
1509
1510 bio_ctrl->wbc->nr_to_write--;
1511
1512 done:
1513 if (ret) {
1514 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
1515 page_start, PAGE_SIZE, !ret);
1516 mapping_set_error(folio->mapping, ret);
1517 }
1518
1519 /*
1520 * Only unlock ranges that are submitted. As there can be some async
1521 * submitted ranges inside the folio.
1522 */
1523 btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1524 ASSERT(ret <= 0);
1525 return ret;
1526 }
1527
wait_on_extent_buffer_writeback(struct extent_buffer * eb)1528 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1529 {
1530 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1531 TASK_UNINTERRUPTIBLE);
1532 }
1533
1534 /*
1535 * Lock extent buffer status and pages for writeback.
1536 *
1537 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1538 * extent buffer is not dirty)
1539 * Return %true is the extent buffer is submitted to bio.
1540 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1541 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1542 struct writeback_control *wbc)
1543 {
1544 struct btrfs_fs_info *fs_info = eb->fs_info;
1545 bool ret = false;
1546
1547 btrfs_tree_lock(eb);
1548 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1549 btrfs_tree_unlock(eb);
1550 if (wbc->sync_mode != WB_SYNC_ALL)
1551 return false;
1552 wait_on_extent_buffer_writeback(eb);
1553 btrfs_tree_lock(eb);
1554 }
1555
1556 /*
1557 * We need to do this to prevent races in people who check if the eb is
1558 * under IO since we can end up having no IO bits set for a short period
1559 * of time.
1560 */
1561 spin_lock(&eb->refs_lock);
1562 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1563 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1564 spin_unlock(&eb->refs_lock);
1565 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1566 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1567 -eb->len,
1568 fs_info->dirty_metadata_batch);
1569 ret = true;
1570 } else {
1571 spin_unlock(&eb->refs_lock);
1572 }
1573 btrfs_tree_unlock(eb);
1574 return ret;
1575 }
1576
set_btree_ioerr(struct extent_buffer * eb)1577 static void set_btree_ioerr(struct extent_buffer *eb)
1578 {
1579 struct btrfs_fs_info *fs_info = eb->fs_info;
1580
1581 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1582
1583 /*
1584 * A read may stumble upon this buffer later, make sure that it gets an
1585 * error and knows there was an error.
1586 */
1587 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1588
1589 /*
1590 * We need to set the mapping with the io error as well because a write
1591 * error will flip the file system readonly, and then syncfs() will
1592 * return a 0 because we are readonly if we don't modify the err seq for
1593 * the superblock.
1594 */
1595 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1596
1597 /*
1598 * If writeback for a btree extent that doesn't belong to a log tree
1599 * failed, increment the counter transaction->eb_write_errors.
1600 * We do this because while the transaction is running and before it's
1601 * committing (when we call filemap_fdata[write|wait]_range against
1602 * the btree inode), we might have
1603 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1604 * returns an error or an error happens during writeback, when we're
1605 * committing the transaction we wouldn't know about it, since the pages
1606 * can be no longer dirty nor marked anymore for writeback (if a
1607 * subsequent modification to the extent buffer didn't happen before the
1608 * transaction commit), which makes filemap_fdata[write|wait]_range not
1609 * able to find the pages which contain errors at transaction
1610 * commit time. So if this happens we must abort the transaction,
1611 * otherwise we commit a super block with btree roots that point to
1612 * btree nodes/leafs whose content on disk is invalid - either garbage
1613 * or the content of some node/leaf from a past generation that got
1614 * cowed or deleted and is no longer valid.
1615 *
1616 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1617 * not be enough - we need to distinguish between log tree extents vs
1618 * non-log tree extents, and the next filemap_fdatawait_range() call
1619 * will catch and clear such errors in the mapping - and that call might
1620 * be from a log sync and not from a transaction commit. Also, checking
1621 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1622 * not done and would not be reliable - the eb might have been released
1623 * from memory and reading it back again means that flag would not be
1624 * set (since it's a runtime flag, not persisted on disk).
1625 *
1626 * Using the flags below in the btree inode also makes us achieve the
1627 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1628 * writeback for all dirty pages and before filemap_fdatawait_range()
1629 * is called, the writeback for all dirty pages had already finished
1630 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1631 * filemap_fdatawait_range() would return success, as it could not know
1632 * that writeback errors happened (the pages were no longer tagged for
1633 * writeback).
1634 */
1635 switch (eb->log_index) {
1636 case -1:
1637 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1638 break;
1639 case 0:
1640 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1641 break;
1642 case 1:
1643 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1644 break;
1645 default:
1646 BUG(); /* unexpected, logic error */
1647 }
1648 }
1649
1650 /*
1651 * The endio specific version which won't touch any unsafe spinlock in endio
1652 * context.
1653 */
find_extent_buffer_nolock(const struct btrfs_fs_info * fs_info,u64 start)1654 static struct extent_buffer *find_extent_buffer_nolock(
1655 const struct btrfs_fs_info *fs_info, u64 start)
1656 {
1657 struct extent_buffer *eb;
1658
1659 rcu_read_lock();
1660 eb = radix_tree_lookup(&fs_info->buffer_radix,
1661 start >> fs_info->sectorsize_bits);
1662 if (eb && atomic_inc_not_zero(&eb->refs)) {
1663 rcu_read_unlock();
1664 return eb;
1665 }
1666 rcu_read_unlock();
1667 return NULL;
1668 }
1669
end_bbio_meta_write(struct btrfs_bio * bbio)1670 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1671 {
1672 struct extent_buffer *eb = bbio->private;
1673 struct btrfs_fs_info *fs_info = eb->fs_info;
1674 bool uptodate = !bbio->bio.bi_status;
1675 struct folio_iter fi;
1676 u32 bio_offset = 0;
1677
1678 if (!uptodate)
1679 set_btree_ioerr(eb);
1680
1681 bio_for_each_folio_all(fi, &bbio->bio) {
1682 u64 start = eb->start + bio_offset;
1683 struct folio *folio = fi.folio;
1684 u32 len = fi.length;
1685
1686 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1687 bio_offset += len;
1688 }
1689
1690 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1691 smp_mb__after_atomic();
1692 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1693
1694 bio_put(&bbio->bio);
1695 }
1696
prepare_eb_write(struct extent_buffer * eb)1697 static void prepare_eb_write(struct extent_buffer *eb)
1698 {
1699 u32 nritems;
1700 unsigned long start;
1701 unsigned long end;
1702
1703 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1704
1705 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1706 nritems = btrfs_header_nritems(eb);
1707 if (btrfs_header_level(eb) > 0) {
1708 end = btrfs_node_key_ptr_offset(eb, nritems);
1709 memzero_extent_buffer(eb, end, eb->len - end);
1710 } else {
1711 /*
1712 * Leaf:
1713 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1714 */
1715 start = btrfs_item_nr_offset(eb, nritems);
1716 end = btrfs_item_nr_offset(eb, 0);
1717 if (nritems == 0)
1718 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1719 else
1720 end += btrfs_item_offset(eb, nritems - 1);
1721 memzero_extent_buffer(eb, start, end - start);
1722 }
1723 }
1724
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)1725 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1726 struct writeback_control *wbc)
1727 {
1728 struct btrfs_fs_info *fs_info = eb->fs_info;
1729 struct btrfs_bio *bbio;
1730
1731 prepare_eb_write(eb);
1732
1733 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1734 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1735 eb->fs_info, end_bbio_meta_write, eb);
1736 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1737 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1738 wbc_init_bio(wbc, &bbio->bio);
1739 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1740 bbio->file_offset = eb->start;
1741 if (fs_info->nodesize < PAGE_SIZE) {
1742 struct folio *folio = eb->folios[0];
1743 bool ret;
1744
1745 folio_lock(folio);
1746 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1747 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1748 eb->len)) {
1749 folio_clear_dirty_for_io(folio);
1750 wbc->nr_to_write--;
1751 }
1752 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1753 eb->start - folio_pos(folio));
1754 ASSERT(ret);
1755 wbc_account_cgroup_owner(wbc, folio, eb->len);
1756 folio_unlock(folio);
1757 } else {
1758 int num_folios = num_extent_folios(eb);
1759
1760 for (int i = 0; i < num_folios; i++) {
1761 struct folio *folio = eb->folios[i];
1762 bool ret;
1763
1764 folio_lock(folio);
1765 folio_clear_dirty_for_io(folio);
1766 folio_start_writeback(folio);
1767 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1768 ASSERT(ret);
1769 wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
1770 wbc->nr_to_write -= folio_nr_pages(folio);
1771 folio_unlock(folio);
1772 }
1773 }
1774 btrfs_submit_bbio(bbio, 0);
1775 }
1776
1777 /*
1778 * Submit one subpage btree page.
1779 *
1780 * The main difference to submit_eb_page() is:
1781 * - Page locking
1782 * For subpage, we don't rely on page locking at all.
1783 *
1784 * - Flush write bio
1785 * We only flush bio if we may be unable to fit current extent buffers into
1786 * current bio.
1787 *
1788 * Return >=0 for the number of submitted extent buffers.
1789 * Return <0 for fatal error.
1790 */
submit_eb_subpage(struct folio * folio,struct writeback_control * wbc)1791 static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1792 {
1793 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1794 int submitted = 0;
1795 u64 folio_start = folio_pos(folio);
1796 int bit_start = 0;
1797 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1798
1799 /* Lock and write each dirty extent buffers in the range */
1800 while (bit_start < fs_info->sectors_per_page) {
1801 struct btrfs_subpage *subpage = folio_get_private(folio);
1802 struct extent_buffer *eb;
1803 unsigned long flags;
1804 u64 start;
1805
1806 /*
1807 * Take private lock to ensure the subpage won't be detached
1808 * in the meantime.
1809 */
1810 spin_lock(&folio->mapping->i_private_lock);
1811 if (!folio_test_private(folio)) {
1812 spin_unlock(&folio->mapping->i_private_lock);
1813 break;
1814 }
1815 spin_lock_irqsave(&subpage->lock, flags);
1816 if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1817 subpage->bitmaps)) {
1818 spin_unlock_irqrestore(&subpage->lock, flags);
1819 spin_unlock(&folio->mapping->i_private_lock);
1820 bit_start++;
1821 continue;
1822 }
1823
1824 start = folio_start + bit_start * fs_info->sectorsize;
1825 bit_start += sectors_per_node;
1826
1827 /*
1828 * Here we just want to grab the eb without touching extra
1829 * spin locks, so call find_extent_buffer_nolock().
1830 */
1831 eb = find_extent_buffer_nolock(fs_info, start);
1832 spin_unlock_irqrestore(&subpage->lock, flags);
1833 spin_unlock(&folio->mapping->i_private_lock);
1834
1835 /*
1836 * The eb has already reached 0 refs thus find_extent_buffer()
1837 * doesn't return it. We don't need to write back such eb
1838 * anyway.
1839 */
1840 if (!eb)
1841 continue;
1842
1843 if (lock_extent_buffer_for_io(eb, wbc)) {
1844 write_one_eb(eb, wbc);
1845 submitted++;
1846 }
1847 free_extent_buffer(eb);
1848 }
1849 return submitted;
1850 }
1851
1852 /*
1853 * Submit all page(s) of one extent buffer.
1854 *
1855 * @page: the page of one extent buffer
1856 * @eb_context: to determine if we need to submit this page, if current page
1857 * belongs to this eb, we don't need to submit
1858 *
1859 * The caller should pass each page in their bytenr order, and here we use
1860 * @eb_context to determine if we have submitted pages of one extent buffer.
1861 *
1862 * If we have, we just skip until we hit a new page that doesn't belong to
1863 * current @eb_context.
1864 *
1865 * If not, we submit all the page(s) of the extent buffer.
1866 *
1867 * Return >0 if we have submitted the extent buffer successfully.
1868 * Return 0 if we don't need to submit the page, as it's already submitted by
1869 * previous call.
1870 * Return <0 for fatal error.
1871 */
submit_eb_page(struct folio * folio,struct btrfs_eb_write_context * ctx)1872 static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1873 {
1874 struct writeback_control *wbc = ctx->wbc;
1875 struct address_space *mapping = folio->mapping;
1876 struct extent_buffer *eb;
1877 int ret;
1878
1879 if (!folio_test_private(folio))
1880 return 0;
1881
1882 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1883 return submit_eb_subpage(folio, wbc);
1884
1885 spin_lock(&mapping->i_private_lock);
1886 if (!folio_test_private(folio)) {
1887 spin_unlock(&mapping->i_private_lock);
1888 return 0;
1889 }
1890
1891 eb = folio_get_private(folio);
1892
1893 /*
1894 * Shouldn't happen and normally this would be a BUG_ON but no point
1895 * crashing the machine for something we can survive anyway.
1896 */
1897 if (WARN_ON(!eb)) {
1898 spin_unlock(&mapping->i_private_lock);
1899 return 0;
1900 }
1901
1902 if (eb == ctx->eb) {
1903 spin_unlock(&mapping->i_private_lock);
1904 return 0;
1905 }
1906 ret = atomic_inc_not_zero(&eb->refs);
1907 spin_unlock(&mapping->i_private_lock);
1908 if (!ret)
1909 return 0;
1910
1911 ctx->eb = eb;
1912
1913 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1914 if (ret) {
1915 if (ret == -EBUSY)
1916 ret = 0;
1917 free_extent_buffer(eb);
1918 return ret;
1919 }
1920
1921 if (!lock_extent_buffer_for_io(eb, wbc)) {
1922 free_extent_buffer(eb);
1923 return 0;
1924 }
1925 /* Implies write in zoned mode. */
1926 if (ctx->zoned_bg) {
1927 /* Mark the last eb in the block group. */
1928 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1929 ctx->zoned_bg->meta_write_pointer += eb->len;
1930 }
1931 write_one_eb(eb, wbc);
1932 free_extent_buffer(eb);
1933 return 1;
1934 }
1935
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)1936 int btree_write_cache_pages(struct address_space *mapping,
1937 struct writeback_control *wbc)
1938 {
1939 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1940 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1941 int ret = 0;
1942 int done = 0;
1943 int nr_to_write_done = 0;
1944 struct folio_batch fbatch;
1945 unsigned int nr_folios;
1946 pgoff_t index;
1947 pgoff_t end; /* Inclusive */
1948 int scanned = 0;
1949 xa_mark_t tag;
1950
1951 folio_batch_init(&fbatch);
1952 if (wbc->range_cyclic) {
1953 index = mapping->writeback_index; /* Start from prev offset */
1954 end = -1;
1955 /*
1956 * Start from the beginning does not need to cycle over the
1957 * range, mark it as scanned.
1958 */
1959 scanned = (index == 0);
1960 } else {
1961 index = wbc->range_start >> PAGE_SHIFT;
1962 end = wbc->range_end >> PAGE_SHIFT;
1963 scanned = 1;
1964 }
1965 if (wbc->sync_mode == WB_SYNC_ALL)
1966 tag = PAGECACHE_TAG_TOWRITE;
1967 else
1968 tag = PAGECACHE_TAG_DIRTY;
1969 btrfs_zoned_meta_io_lock(fs_info);
1970 retry:
1971 if (wbc->sync_mode == WB_SYNC_ALL)
1972 tag_pages_for_writeback(mapping, index, end);
1973 while (!done && !nr_to_write_done && (index <= end) &&
1974 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1975 tag, &fbatch))) {
1976 unsigned i;
1977
1978 for (i = 0; i < nr_folios; i++) {
1979 struct folio *folio = fbatch.folios[i];
1980
1981 ret = submit_eb_page(folio, &ctx);
1982 if (ret == 0)
1983 continue;
1984 if (ret < 0) {
1985 done = 1;
1986 break;
1987 }
1988
1989 /*
1990 * the filesystem may choose to bump up nr_to_write.
1991 * We have to make sure to honor the new nr_to_write
1992 * at any time
1993 */
1994 nr_to_write_done = wbc->nr_to_write <= 0;
1995 }
1996 folio_batch_release(&fbatch);
1997 cond_resched();
1998 }
1999 if (!scanned && !done) {
2000 /*
2001 * We hit the last page and there is more work to be done: wrap
2002 * back to the start of the file
2003 */
2004 scanned = 1;
2005 index = 0;
2006 goto retry;
2007 }
2008 /*
2009 * If something went wrong, don't allow any metadata write bio to be
2010 * submitted.
2011 *
2012 * This would prevent use-after-free if we had dirty pages not
2013 * cleaned up, which can still happen by fuzzed images.
2014 *
2015 * - Bad extent tree
2016 * Allowing existing tree block to be allocated for other trees.
2017 *
2018 * - Log tree operations
2019 * Exiting tree blocks get allocated to log tree, bumps its
2020 * generation, then get cleaned in tree re-balance.
2021 * Such tree block will not be written back, since it's clean,
2022 * thus no WRITTEN flag set.
2023 * And after log writes back, this tree block is not traced by
2024 * any dirty extent_io_tree.
2025 *
2026 * - Offending tree block gets re-dirtied from its original owner
2027 * Since it has bumped generation, no WRITTEN flag, it can be
2028 * reused without COWing. This tree block will not be traced
2029 * by btrfs_transaction::dirty_pages.
2030 *
2031 * Now such dirty tree block will not be cleaned by any dirty
2032 * extent io tree. Thus we don't want to submit such wild eb
2033 * if the fs already has error.
2034 *
2035 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2036 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2037 */
2038 if (ret > 0)
2039 ret = 0;
2040 if (!ret && BTRFS_FS_ERROR(fs_info))
2041 ret = -EROFS;
2042
2043 if (ctx.zoned_bg)
2044 btrfs_put_block_group(ctx.zoned_bg);
2045 btrfs_zoned_meta_io_unlock(fs_info);
2046 return ret;
2047 }
2048
2049 /*
2050 * Walk the list of dirty pages of the given address space and write all of them.
2051 *
2052 * @mapping: address space structure to write
2053 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2054 * @bio_ctrl: holds context for the write, namely the bio
2055 *
2056 * If a page is already under I/O, write_cache_pages() skips it, even
2057 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2058 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2059 * and msync() need to guarantee that all the data which was dirty at the time
2060 * the call was made get new I/O started against them. If wbc->sync_mode is
2061 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2062 * existing IO to complete.
2063 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)2064 static int extent_write_cache_pages(struct address_space *mapping,
2065 struct btrfs_bio_ctrl *bio_ctrl)
2066 {
2067 struct writeback_control *wbc = bio_ctrl->wbc;
2068 struct inode *inode = mapping->host;
2069 int ret = 0;
2070 int done = 0;
2071 int nr_to_write_done = 0;
2072 struct folio_batch fbatch;
2073 unsigned int nr_folios;
2074 pgoff_t index;
2075 pgoff_t end; /* Inclusive */
2076 pgoff_t done_index;
2077 int range_whole = 0;
2078 int scanned = 0;
2079 xa_mark_t tag;
2080
2081 /*
2082 * We have to hold onto the inode so that ordered extents can do their
2083 * work when the IO finishes. The alternative to this is failing to add
2084 * an ordered extent if the igrab() fails there and that is a huge pain
2085 * to deal with, so instead just hold onto the inode throughout the
2086 * writepages operation. If it fails here we are freeing up the inode
2087 * anyway and we'd rather not waste our time writing out stuff that is
2088 * going to be truncated anyway.
2089 */
2090 if (!igrab(inode))
2091 return 0;
2092
2093 folio_batch_init(&fbatch);
2094 if (wbc->range_cyclic) {
2095 index = mapping->writeback_index; /* Start from prev offset */
2096 end = -1;
2097 /*
2098 * Start from the beginning does not need to cycle over the
2099 * range, mark it as scanned.
2100 */
2101 scanned = (index == 0);
2102 } else {
2103 index = wbc->range_start >> PAGE_SHIFT;
2104 end = wbc->range_end >> PAGE_SHIFT;
2105 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2106 range_whole = 1;
2107 scanned = 1;
2108 }
2109
2110 /*
2111 * We do the tagged writepage as long as the snapshot flush bit is set
2112 * and we are the first one who do the filemap_flush() on this inode.
2113 *
2114 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2115 * not race in and drop the bit.
2116 */
2117 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2118 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2119 &BTRFS_I(inode)->runtime_flags))
2120 wbc->tagged_writepages = 1;
2121
2122 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2123 tag = PAGECACHE_TAG_TOWRITE;
2124 else
2125 tag = PAGECACHE_TAG_DIRTY;
2126 retry:
2127 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2128 tag_pages_for_writeback(mapping, index, end);
2129 done_index = index;
2130 while (!done && !nr_to_write_done && (index <= end) &&
2131 (nr_folios = filemap_get_folios_tag(mapping, &index,
2132 end, tag, &fbatch))) {
2133 unsigned i;
2134
2135 for (i = 0; i < nr_folios; i++) {
2136 struct folio *folio = fbatch.folios[i];
2137
2138 done_index = folio_next_index(folio);
2139 /*
2140 * At this point we hold neither the i_pages lock nor
2141 * the page lock: the page may be truncated or
2142 * invalidated (changing page->mapping to NULL),
2143 * or even swizzled back from swapper_space to
2144 * tmpfs file mapping
2145 */
2146 if (!folio_trylock(folio)) {
2147 submit_write_bio(bio_ctrl, 0);
2148 folio_lock(folio);
2149 }
2150
2151 if (unlikely(folio->mapping != mapping)) {
2152 folio_unlock(folio);
2153 continue;
2154 }
2155
2156 if (!folio_test_dirty(folio)) {
2157 /* Someone wrote it for us. */
2158 folio_unlock(folio);
2159 continue;
2160 }
2161
2162 /*
2163 * For subpage case, compression can lead to mixed
2164 * writeback and dirty flags, e.g:
2165 * 0 32K 64K 96K 128K
2166 * | |//////||/////| |//|
2167 *
2168 * In above case, [32K, 96K) is asynchronously submitted
2169 * for compression, and [124K, 128K) needs to be written back.
2170 *
2171 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2172 * won't be submitted as the page still has writeback flag
2173 * and will be skipped in the next check.
2174 *
2175 * This mixed writeback and dirty case is only possible for
2176 * subpage case.
2177 *
2178 * TODO: Remove this check after migrating compression to
2179 * regular submission.
2180 */
2181 if (wbc->sync_mode != WB_SYNC_NONE ||
2182 btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
2183 if (folio_test_writeback(folio))
2184 submit_write_bio(bio_ctrl, 0);
2185 folio_wait_writeback(folio);
2186 }
2187
2188 if (folio_test_writeback(folio) ||
2189 !folio_clear_dirty_for_io(folio)) {
2190 folio_unlock(folio);
2191 continue;
2192 }
2193
2194 ret = extent_writepage(folio, bio_ctrl);
2195 if (ret < 0) {
2196 done = 1;
2197 break;
2198 }
2199
2200 /*
2201 * The filesystem may choose to bump up nr_to_write.
2202 * We have to make sure to honor the new nr_to_write
2203 * at any time.
2204 */
2205 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2206 wbc->nr_to_write <= 0);
2207 }
2208 folio_batch_release(&fbatch);
2209 cond_resched();
2210 }
2211 if (!scanned && !done) {
2212 /*
2213 * We hit the last page and there is more work to be done: wrap
2214 * back to the start of the file
2215 */
2216 scanned = 1;
2217 index = 0;
2218
2219 /*
2220 * If we're looping we could run into a page that is locked by a
2221 * writer and that writer could be waiting on writeback for a
2222 * page in our current bio, and thus deadlock, so flush the
2223 * write bio here.
2224 */
2225 submit_write_bio(bio_ctrl, 0);
2226 goto retry;
2227 }
2228
2229 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2230 mapping->writeback_index = done_index;
2231
2232 btrfs_add_delayed_iput(BTRFS_I(inode));
2233 return ret;
2234 }
2235
2236 /*
2237 * Submit the pages in the range to bio for call sites which delalloc range has
2238 * already been ran (aka, ordered extent inserted) and all pages are still
2239 * locked.
2240 */
extent_write_locked_range(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2241 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2242 u64 start, u64 end, struct writeback_control *wbc,
2243 bool pages_dirty)
2244 {
2245 bool found_error = false;
2246 int ret = 0;
2247 struct address_space *mapping = inode->i_mapping;
2248 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2249 const u32 sectorsize = fs_info->sectorsize;
2250 loff_t i_size = i_size_read(inode);
2251 u64 cur = start;
2252 struct btrfs_bio_ctrl bio_ctrl = {
2253 .wbc = wbc,
2254 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2255 };
2256
2257 if (wbc->no_cgroup_owner)
2258 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2259
2260 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2261
2262 while (cur <= end) {
2263 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2264 u32 cur_len = cur_end + 1 - cur;
2265 struct folio *folio;
2266
2267 folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
2268
2269 /*
2270 * This shouldn't happen, the pages are pinned and locked, this
2271 * code is just in case, but shouldn't actually be run.
2272 */
2273 if (IS_ERR(folio)) {
2274 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2275 cur, cur_len, false);
2276 mapping_set_error(mapping, PTR_ERR(folio));
2277 cur = cur_end + 1;
2278 continue;
2279 }
2280
2281 ASSERT(folio_test_locked(folio));
2282 if (pages_dirty && folio != locked_folio)
2283 ASSERT(folio_test_dirty(folio));
2284
2285 /*
2286 * Set the submission bitmap to submit all sectors.
2287 * extent_writepage_io() will do the truncation correctly.
2288 */
2289 bio_ctrl.submit_bitmap = (unsigned long)-1;
2290 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2291 &bio_ctrl, i_size);
2292 if (ret == 1)
2293 goto next_page;
2294
2295 if (ret) {
2296 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
2297 cur, cur_len, !ret);
2298 mapping_set_error(mapping, ret);
2299 }
2300 btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
2301 if (ret < 0)
2302 found_error = true;
2303 next_page:
2304 folio_put(folio);
2305 cur = cur_end + 1;
2306 }
2307
2308 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2309 }
2310
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2311 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2312 {
2313 struct inode *inode = mapping->host;
2314 int ret = 0;
2315 struct btrfs_bio_ctrl bio_ctrl = {
2316 .wbc = wbc,
2317 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2318 };
2319
2320 /*
2321 * Allow only a single thread to do the reloc work in zoned mode to
2322 * protect the write pointer updates.
2323 */
2324 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2325 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2326 submit_write_bio(&bio_ctrl, ret);
2327 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2328 return ret;
2329 }
2330
btrfs_readahead(struct readahead_control * rac)2331 void btrfs_readahead(struct readahead_control *rac)
2332 {
2333 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2334 struct folio *folio;
2335 struct extent_map *em_cached = NULL;
2336 u64 prev_em_start = (u64)-1;
2337
2338 while ((folio = readahead_folio(rac)) != NULL)
2339 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2340
2341 if (em_cached)
2342 free_extent_map(em_cached);
2343 submit_one_bio(&bio_ctrl);
2344 }
2345
2346 /*
2347 * basic invalidate_folio code, this waits on any locked or writeback
2348 * ranges corresponding to the folio, and then deletes any extent state
2349 * records from the tree
2350 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2351 int extent_invalidate_folio(struct extent_io_tree *tree,
2352 struct folio *folio, size_t offset)
2353 {
2354 struct extent_state *cached_state = NULL;
2355 u64 start = folio_pos(folio);
2356 u64 end = start + folio_size(folio) - 1;
2357 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2358
2359 /* This function is only called for the btree inode */
2360 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2361
2362 start += ALIGN(offset, blocksize);
2363 if (start > end)
2364 return 0;
2365
2366 lock_extent(tree, start, end, &cached_state);
2367 folio_wait_writeback(folio);
2368
2369 /*
2370 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2371 * so here we only need to unlock the extent range to free any
2372 * existing extent state.
2373 */
2374 unlock_extent(tree, start, end, &cached_state);
2375 return 0;
2376 }
2377
2378 /*
2379 * a helper for release_folio, this tests for areas of the page that
2380 * are locked or under IO and drops the related state bits if it is safe
2381 * to drop the page.
2382 */
try_release_extent_state(struct extent_io_tree * tree,struct folio * folio)2383 static bool try_release_extent_state(struct extent_io_tree *tree,
2384 struct folio *folio)
2385 {
2386 u64 start = folio_pos(folio);
2387 u64 end = start + PAGE_SIZE - 1;
2388 bool ret;
2389
2390 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2391 ret = false;
2392 } else {
2393 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2394 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2395 EXTENT_QGROUP_RESERVED);
2396 int ret2;
2397
2398 /*
2399 * At this point we can safely clear everything except the
2400 * locked bit, the nodatasum bit and the delalloc new bit.
2401 * The delalloc new bit will be cleared by ordered extent
2402 * completion.
2403 */
2404 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2405
2406 /* if clear_extent_bit failed for enomem reasons,
2407 * we can't allow the release to continue.
2408 */
2409 if (ret2 < 0)
2410 ret = false;
2411 else
2412 ret = true;
2413 }
2414 return ret;
2415 }
2416
2417 /*
2418 * a helper for release_folio. As long as there are no locked extents
2419 * in the range corresponding to the page, both state records and extent
2420 * map records are removed
2421 */
try_release_extent_mapping(struct folio * folio,gfp_t mask)2422 bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2423 {
2424 u64 start = folio_pos(folio);
2425 u64 end = start + PAGE_SIZE - 1;
2426 struct btrfs_inode *inode = folio_to_inode(folio);
2427 struct extent_io_tree *io_tree = &inode->io_tree;
2428
2429 while (start <= end) {
2430 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2431 const u64 len = end - start + 1;
2432 struct extent_map_tree *extent_tree = &inode->extent_tree;
2433 struct extent_map *em;
2434
2435 write_lock(&extent_tree->lock);
2436 em = lookup_extent_mapping(extent_tree, start, len);
2437 if (!em) {
2438 write_unlock(&extent_tree->lock);
2439 break;
2440 }
2441 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2442 write_unlock(&extent_tree->lock);
2443 free_extent_map(em);
2444 break;
2445 }
2446 if (test_range_bit_exists(io_tree, em->start,
2447 extent_map_end(em) - 1, EXTENT_LOCKED))
2448 goto next;
2449 /*
2450 * If it's not in the list of modified extents, used by a fast
2451 * fsync, we can remove it. If it's being logged we can safely
2452 * remove it since fsync took an extra reference on the em.
2453 */
2454 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2455 goto remove_em;
2456 /*
2457 * If it's in the list of modified extents, remove it only if
2458 * its generation is older then the current one, in which case
2459 * we don't need it for a fast fsync. Otherwise don't remove it,
2460 * we could be racing with an ongoing fast fsync that could miss
2461 * the new extent.
2462 */
2463 if (em->generation >= cur_gen)
2464 goto next;
2465 remove_em:
2466 /*
2467 * We only remove extent maps that are not in the list of
2468 * modified extents or that are in the list but with a
2469 * generation lower then the current generation, so there is no
2470 * need to set the full fsync flag on the inode (it hurts the
2471 * fsync performance for workloads with a data size that exceeds
2472 * or is close to the system's memory).
2473 */
2474 remove_extent_mapping(inode, em);
2475 /* Once for the inode's extent map tree. */
2476 free_extent_map(em);
2477 next:
2478 start = extent_map_end(em);
2479 write_unlock(&extent_tree->lock);
2480
2481 /* Once for us, for the lookup_extent_mapping() reference. */
2482 free_extent_map(em);
2483
2484 if (need_resched()) {
2485 /*
2486 * If we need to resched but we can't block just exit
2487 * and leave any remaining extent maps.
2488 */
2489 if (!gfpflags_allow_blocking(mask))
2490 break;
2491
2492 cond_resched();
2493 }
2494 }
2495 return try_release_extent_state(io_tree, folio);
2496 }
2497
__free_extent_buffer(struct extent_buffer * eb)2498 static void __free_extent_buffer(struct extent_buffer *eb)
2499 {
2500 kmem_cache_free(extent_buffer_cache, eb);
2501 }
2502
extent_buffer_under_io(const struct extent_buffer * eb)2503 static int extent_buffer_under_io(const struct extent_buffer *eb)
2504 {
2505 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2506 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2507 }
2508
folio_range_has_eb(struct folio * folio)2509 static bool folio_range_has_eb(struct folio *folio)
2510 {
2511 struct btrfs_subpage *subpage;
2512
2513 lockdep_assert_held(&folio->mapping->i_private_lock);
2514
2515 if (folio_test_private(folio)) {
2516 subpage = folio_get_private(folio);
2517 if (atomic_read(&subpage->eb_refs))
2518 return true;
2519 }
2520 return false;
2521 }
2522
detach_extent_buffer_folio(const struct extent_buffer * eb,struct folio * folio)2523 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2524 {
2525 struct btrfs_fs_info *fs_info = eb->fs_info;
2526 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2527
2528 /*
2529 * For mapped eb, we're going to change the folio private, which should
2530 * be done under the i_private_lock.
2531 */
2532 if (mapped)
2533 spin_lock(&folio->mapping->i_private_lock);
2534
2535 if (!folio_test_private(folio)) {
2536 if (mapped)
2537 spin_unlock(&folio->mapping->i_private_lock);
2538 return;
2539 }
2540
2541 if (fs_info->nodesize >= PAGE_SIZE) {
2542 /*
2543 * We do this since we'll remove the pages after we've
2544 * removed the eb from the radix tree, so we could race
2545 * and have this page now attached to the new eb. So
2546 * only clear folio if it's still connected to
2547 * this eb.
2548 */
2549 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2550 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2551 BUG_ON(folio_test_dirty(folio));
2552 BUG_ON(folio_test_writeback(folio));
2553 /* We need to make sure we haven't be attached to a new eb. */
2554 folio_detach_private(folio);
2555 }
2556 if (mapped)
2557 spin_unlock(&folio->mapping->i_private_lock);
2558 return;
2559 }
2560
2561 /*
2562 * For subpage, we can have dummy eb with folio private attached. In
2563 * this case, we can directly detach the private as such folio is only
2564 * attached to one dummy eb, no sharing.
2565 */
2566 if (!mapped) {
2567 btrfs_detach_subpage(fs_info, folio);
2568 return;
2569 }
2570
2571 btrfs_folio_dec_eb_refs(fs_info, folio);
2572
2573 /*
2574 * We can only detach the folio private if there are no other ebs in the
2575 * page range and no unfinished IO.
2576 */
2577 if (!folio_range_has_eb(folio))
2578 btrfs_detach_subpage(fs_info, folio);
2579
2580 spin_unlock(&folio->mapping->i_private_lock);
2581 }
2582
2583 /* Release all pages attached to the extent buffer */
btrfs_release_extent_buffer_pages(const struct extent_buffer * eb)2584 static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2585 {
2586 ASSERT(!extent_buffer_under_io(eb));
2587
2588 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2589 struct folio *folio = eb->folios[i];
2590
2591 if (!folio)
2592 continue;
2593
2594 detach_extent_buffer_folio(eb, folio);
2595
2596 /* One for when we allocated the folio. */
2597 folio_put(folio);
2598 }
2599 }
2600
2601 /*
2602 * Helper for releasing the extent buffer.
2603 */
btrfs_release_extent_buffer(struct extent_buffer * eb)2604 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2605 {
2606 btrfs_release_extent_buffer_pages(eb);
2607 btrfs_leak_debug_del_eb(eb);
2608 __free_extent_buffer(eb);
2609 }
2610
2611 static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2612 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2613 unsigned long len)
2614 {
2615 struct extent_buffer *eb = NULL;
2616
2617 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2618 eb->start = start;
2619 eb->len = len;
2620 eb->fs_info = fs_info;
2621 init_rwsem(&eb->lock);
2622
2623 btrfs_leak_debug_add_eb(eb);
2624
2625 spin_lock_init(&eb->refs_lock);
2626 atomic_set(&eb->refs, 1);
2627
2628 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2629
2630 return eb;
2631 }
2632
btrfs_clone_extent_buffer(const struct extent_buffer * src)2633 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2634 {
2635 struct extent_buffer *new;
2636 int num_folios = num_extent_folios(src);
2637 int ret;
2638
2639 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2640 if (new == NULL)
2641 return NULL;
2642
2643 /*
2644 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2645 * btrfs_release_extent_buffer() have different behavior for
2646 * UNMAPPED subpage extent buffer.
2647 */
2648 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2649
2650 ret = alloc_eb_folio_array(new, false);
2651 if (ret) {
2652 btrfs_release_extent_buffer(new);
2653 return NULL;
2654 }
2655
2656 for (int i = 0; i < num_folios; i++) {
2657 struct folio *folio = new->folios[i];
2658
2659 ret = attach_extent_buffer_folio(new, folio, NULL);
2660 if (ret < 0) {
2661 btrfs_release_extent_buffer(new);
2662 return NULL;
2663 }
2664 WARN_ON(folio_test_dirty(folio));
2665 }
2666 copy_extent_buffer_full(new, src);
2667 set_extent_buffer_uptodate(new);
2668
2669 return new;
2670 }
2671
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2672 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2673 u64 start, unsigned long len)
2674 {
2675 struct extent_buffer *eb;
2676 int num_folios = 0;
2677 int ret;
2678
2679 eb = __alloc_extent_buffer(fs_info, start, len);
2680 if (!eb)
2681 return NULL;
2682
2683 ret = alloc_eb_folio_array(eb, false);
2684 if (ret)
2685 goto err;
2686
2687 num_folios = num_extent_folios(eb);
2688 for (int i = 0; i < num_folios; i++) {
2689 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2690 if (ret < 0)
2691 goto err;
2692 }
2693
2694 set_extent_buffer_uptodate(eb);
2695 btrfs_set_header_nritems(eb, 0);
2696 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2697
2698 return eb;
2699 err:
2700 for (int i = 0; i < num_folios; i++) {
2701 if (eb->folios[i]) {
2702 detach_extent_buffer_folio(eb, eb->folios[i]);
2703 folio_put(eb->folios[i]);
2704 }
2705 }
2706 __free_extent_buffer(eb);
2707 return NULL;
2708 }
2709
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2710 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2711 u64 start)
2712 {
2713 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2714 }
2715
check_buffer_tree_ref(struct extent_buffer * eb)2716 static void check_buffer_tree_ref(struct extent_buffer *eb)
2717 {
2718 int refs;
2719 /*
2720 * The TREE_REF bit is first set when the extent_buffer is added
2721 * to the radix tree. It is also reset, if unset, when a new reference
2722 * is created by find_extent_buffer.
2723 *
2724 * It is only cleared in two cases: freeing the last non-tree
2725 * reference to the extent_buffer when its STALE bit is set or
2726 * calling release_folio when the tree reference is the only reference.
2727 *
2728 * In both cases, care is taken to ensure that the extent_buffer's
2729 * pages are not under io. However, release_folio can be concurrently
2730 * called with creating new references, which is prone to race
2731 * conditions between the calls to check_buffer_tree_ref in those
2732 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2733 *
2734 * The actual lifetime of the extent_buffer in the radix tree is
2735 * adequately protected by the refcount, but the TREE_REF bit and
2736 * its corresponding reference are not. To protect against this
2737 * class of races, we call check_buffer_tree_ref from the codepaths
2738 * which trigger io. Note that once io is initiated, TREE_REF can no
2739 * longer be cleared, so that is the moment at which any such race is
2740 * best fixed.
2741 */
2742 refs = atomic_read(&eb->refs);
2743 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2744 return;
2745
2746 spin_lock(&eb->refs_lock);
2747 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2748 atomic_inc(&eb->refs);
2749 spin_unlock(&eb->refs_lock);
2750 }
2751
mark_extent_buffer_accessed(struct extent_buffer * eb)2752 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2753 {
2754 int num_folios= num_extent_folios(eb);
2755
2756 check_buffer_tree_ref(eb);
2757
2758 for (int i = 0; i < num_folios; i++)
2759 folio_mark_accessed(eb->folios[i]);
2760 }
2761
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2762 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2763 u64 start)
2764 {
2765 struct extent_buffer *eb;
2766
2767 eb = find_extent_buffer_nolock(fs_info, start);
2768 if (!eb)
2769 return NULL;
2770 /*
2771 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2772 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2773 * another task running free_extent_buffer() might have seen that flag
2774 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2775 * writeback flags not set) and it's still in the tree (flag
2776 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2777 * decrementing the extent buffer's reference count twice. So here we
2778 * could race and increment the eb's reference count, clear its stale
2779 * flag, mark it as dirty and drop our reference before the other task
2780 * finishes executing free_extent_buffer, which would later result in
2781 * an attempt to free an extent buffer that is dirty.
2782 */
2783 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2784 spin_lock(&eb->refs_lock);
2785 spin_unlock(&eb->refs_lock);
2786 }
2787 mark_extent_buffer_accessed(eb);
2788 return eb;
2789 }
2790
2791 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2792 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2793 u64 start)
2794 {
2795 struct extent_buffer *eb, *exists = NULL;
2796 int ret;
2797
2798 eb = find_extent_buffer(fs_info, start);
2799 if (eb)
2800 return eb;
2801 eb = alloc_dummy_extent_buffer(fs_info, start);
2802 if (!eb)
2803 return ERR_PTR(-ENOMEM);
2804 eb->fs_info = fs_info;
2805 again:
2806 ret = radix_tree_preload(GFP_NOFS);
2807 if (ret) {
2808 exists = ERR_PTR(ret);
2809 goto free_eb;
2810 }
2811 spin_lock(&fs_info->buffer_lock);
2812 ret = radix_tree_insert(&fs_info->buffer_radix,
2813 start >> fs_info->sectorsize_bits, eb);
2814 spin_unlock(&fs_info->buffer_lock);
2815 radix_tree_preload_end();
2816 if (ret == -EEXIST) {
2817 exists = find_extent_buffer(fs_info, start);
2818 if (exists)
2819 goto free_eb;
2820 else
2821 goto again;
2822 }
2823 check_buffer_tree_ref(eb);
2824 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2825
2826 return eb;
2827 free_eb:
2828 btrfs_release_extent_buffer(eb);
2829 return exists;
2830 }
2831 #endif
2832
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct page * page)2833 static struct extent_buffer *grab_extent_buffer(
2834 struct btrfs_fs_info *fs_info, struct page *page)
2835 {
2836 struct folio *folio = page_folio(page);
2837 struct extent_buffer *exists;
2838
2839 lockdep_assert_held(&page->mapping->i_private_lock);
2840
2841 /*
2842 * For subpage case, we completely rely on radix tree to ensure we
2843 * don't try to insert two ebs for the same bytenr. So here we always
2844 * return NULL and just continue.
2845 */
2846 if (fs_info->nodesize < PAGE_SIZE)
2847 return NULL;
2848
2849 /* Page not yet attached to an extent buffer */
2850 if (!folio_test_private(folio))
2851 return NULL;
2852
2853 /*
2854 * We could have already allocated an eb for this page and attached one
2855 * so lets see if we can get a ref on the existing eb, and if we can we
2856 * know it's good and we can just return that one, else we know we can
2857 * just overwrite folio private.
2858 */
2859 exists = folio_get_private(folio);
2860 if (atomic_inc_not_zero(&exists->refs))
2861 return exists;
2862
2863 WARN_ON(PageDirty(page));
2864 folio_detach_private(folio);
2865 return NULL;
2866 }
2867
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)2868 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2869 {
2870 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2871 btrfs_err(fs_info, "bad tree block start %llu", start);
2872 return -EINVAL;
2873 }
2874
2875 if (fs_info->nodesize < PAGE_SIZE &&
2876 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2877 btrfs_err(fs_info,
2878 "tree block crosses page boundary, start %llu nodesize %u",
2879 start, fs_info->nodesize);
2880 return -EINVAL;
2881 }
2882 if (fs_info->nodesize >= PAGE_SIZE &&
2883 !PAGE_ALIGNED(start)) {
2884 btrfs_err(fs_info,
2885 "tree block is not page aligned, start %llu nodesize %u",
2886 start, fs_info->nodesize);
2887 return -EINVAL;
2888 }
2889 if (!IS_ALIGNED(start, fs_info->nodesize) &&
2890 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2891 btrfs_warn(fs_info,
2892 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2893 start, fs_info->nodesize);
2894 }
2895 return 0;
2896 }
2897
2898
2899 /*
2900 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2901 * Return >0 if there is already another extent buffer for the range,
2902 * and @found_eb_ret would be updated.
2903 * Return -EAGAIN if the filemap has an existing folio but with different size
2904 * than @eb.
2905 * The caller needs to free the existing folios and retry using the same order.
2906 */
attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_subpage * prealloc,struct extent_buffer ** found_eb_ret)2907 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2908 struct btrfs_subpage *prealloc,
2909 struct extent_buffer **found_eb_ret)
2910 {
2911
2912 struct btrfs_fs_info *fs_info = eb->fs_info;
2913 struct address_space *mapping = fs_info->btree_inode->i_mapping;
2914 const unsigned long index = eb->start >> PAGE_SHIFT;
2915 struct folio *existing_folio = NULL;
2916 int ret;
2917
2918 ASSERT(found_eb_ret);
2919
2920 /* Caller should ensure the folio exists. */
2921 ASSERT(eb->folios[i]);
2922
2923 retry:
2924 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2925 GFP_NOFS | __GFP_NOFAIL);
2926 if (!ret)
2927 goto finish;
2928
2929 existing_folio = filemap_lock_folio(mapping, index + i);
2930 /* The page cache only exists for a very short time, just retry. */
2931 if (IS_ERR(existing_folio)) {
2932 existing_folio = NULL;
2933 goto retry;
2934 }
2935
2936 /* For now, we should only have single-page folios for btree inode. */
2937 ASSERT(folio_nr_pages(existing_folio) == 1);
2938
2939 if (folio_size(existing_folio) != eb->folio_size) {
2940 folio_unlock(existing_folio);
2941 folio_put(existing_folio);
2942 return -EAGAIN;
2943 }
2944
2945 finish:
2946 spin_lock(&mapping->i_private_lock);
2947 if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
2948 /* We're going to reuse the existing page, can drop our folio now. */
2949 __free_page(folio_page(eb->folios[i], 0));
2950 eb->folios[i] = existing_folio;
2951 } else if (existing_folio) {
2952 struct extent_buffer *existing_eb;
2953
2954 existing_eb = grab_extent_buffer(fs_info,
2955 folio_page(existing_folio, 0));
2956 if (existing_eb) {
2957 /* The extent buffer still exists, we can use it directly. */
2958 *found_eb_ret = existing_eb;
2959 spin_unlock(&mapping->i_private_lock);
2960 folio_unlock(existing_folio);
2961 folio_put(existing_folio);
2962 return 1;
2963 }
2964 /* The extent buffer no longer exists, we can reuse the folio. */
2965 __free_page(folio_page(eb->folios[i], 0));
2966 eb->folios[i] = existing_folio;
2967 }
2968 eb->folio_size = folio_size(eb->folios[i]);
2969 eb->folio_shift = folio_shift(eb->folios[i]);
2970 /* Should not fail, as we have preallocated the memory. */
2971 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
2972 ASSERT(!ret);
2973 /*
2974 * To inform we have an extra eb under allocation, so that
2975 * detach_extent_buffer_page() won't release the folio private when the
2976 * eb hasn't been inserted into radix tree yet.
2977 *
2978 * The ref will be decreased when the eb releases the page, in
2979 * detach_extent_buffer_page(). Thus needs no special handling in the
2980 * error path.
2981 */
2982 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
2983 spin_unlock(&mapping->i_private_lock);
2984 return 0;
2985 }
2986
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)2987 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2988 u64 start, u64 owner_root, int level)
2989 {
2990 unsigned long len = fs_info->nodesize;
2991 int num_folios;
2992 int attached = 0;
2993 struct extent_buffer *eb;
2994 struct extent_buffer *existing_eb = NULL;
2995 struct btrfs_subpage *prealloc = NULL;
2996 u64 lockdep_owner = owner_root;
2997 bool page_contig = true;
2998 int uptodate = 1;
2999 int ret;
3000
3001 if (check_eb_alignment(fs_info, start))
3002 return ERR_PTR(-EINVAL);
3003
3004 #if BITS_PER_LONG == 32
3005 if (start >= MAX_LFS_FILESIZE) {
3006 btrfs_err_rl(fs_info,
3007 "extent buffer %llu is beyond 32bit page cache limit", start);
3008 btrfs_err_32bit_limit(fs_info);
3009 return ERR_PTR(-EOVERFLOW);
3010 }
3011 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3012 btrfs_warn_32bit_limit(fs_info);
3013 #endif
3014
3015 eb = find_extent_buffer(fs_info, start);
3016 if (eb)
3017 return eb;
3018
3019 eb = __alloc_extent_buffer(fs_info, start, len);
3020 if (!eb)
3021 return ERR_PTR(-ENOMEM);
3022
3023 /*
3024 * The reloc trees are just snapshots, so we need them to appear to be
3025 * just like any other fs tree WRT lockdep.
3026 */
3027 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3028 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3029
3030 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3031
3032 /*
3033 * Preallocate folio private for subpage case, so that we won't
3034 * allocate memory with i_private_lock nor page lock hold.
3035 *
3036 * The memory will be freed by attach_extent_buffer_page() or freed
3037 * manually if we exit earlier.
3038 */
3039 if (fs_info->nodesize < PAGE_SIZE) {
3040 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3041 if (IS_ERR(prealloc)) {
3042 ret = PTR_ERR(prealloc);
3043 goto out;
3044 }
3045 }
3046
3047 reallocate:
3048 /* Allocate all pages first. */
3049 ret = alloc_eb_folio_array(eb, true);
3050 if (ret < 0) {
3051 btrfs_free_subpage(prealloc);
3052 goto out;
3053 }
3054
3055 num_folios = num_extent_folios(eb);
3056 /* Attach all pages to the filemap. */
3057 for (int i = 0; i < num_folios; i++) {
3058 struct folio *folio;
3059
3060 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3061 if (ret > 0) {
3062 ASSERT(existing_eb);
3063 goto out;
3064 }
3065
3066 /*
3067 * TODO: Special handling for a corner case where the order of
3068 * folios mismatch between the new eb and filemap.
3069 *
3070 * This happens when:
3071 *
3072 * - the new eb is using higher order folio
3073 *
3074 * - the filemap is still using 0-order folios for the range
3075 * This can happen at the previous eb allocation, and we don't
3076 * have higher order folio for the call.
3077 *
3078 * - the existing eb has already been freed
3079 *
3080 * In this case, we have to free the existing folios first, and
3081 * re-allocate using the same order.
3082 * Thankfully this is not going to happen yet, as we're still
3083 * using 0-order folios.
3084 */
3085 if (unlikely(ret == -EAGAIN)) {
3086 ASSERT(0);
3087 goto reallocate;
3088 }
3089 attached++;
3090
3091 /*
3092 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3093 * reliable, as we may choose to reuse the existing page cache
3094 * and free the allocated page.
3095 */
3096 folio = eb->folios[i];
3097 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3098
3099 /*
3100 * Check if the current page is physically contiguous with previous eb
3101 * page.
3102 * At this stage, either we allocated a large folio, thus @i
3103 * would only be 0, or we fall back to per-page allocation.
3104 */
3105 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3106 page_contig = false;
3107
3108 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3109 uptodate = 0;
3110
3111 /*
3112 * We can't unlock the pages just yet since the extent buffer
3113 * hasn't been properly inserted in the radix tree, this
3114 * opens a race with btree_release_folio which can free a page
3115 * while we are still filling in all pages for the buffer and
3116 * we could crash.
3117 */
3118 }
3119 if (uptodate)
3120 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3121 /* All pages are physically contiguous, can skip cross page handling. */
3122 if (page_contig)
3123 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3124 again:
3125 ret = radix_tree_preload(GFP_NOFS);
3126 if (ret)
3127 goto out;
3128
3129 spin_lock(&fs_info->buffer_lock);
3130 ret = radix_tree_insert(&fs_info->buffer_radix,
3131 start >> fs_info->sectorsize_bits, eb);
3132 spin_unlock(&fs_info->buffer_lock);
3133 radix_tree_preload_end();
3134 if (ret == -EEXIST) {
3135 ret = 0;
3136 existing_eb = find_extent_buffer(fs_info, start);
3137 if (existing_eb)
3138 goto out;
3139 else
3140 goto again;
3141 }
3142 /* add one reference for the tree */
3143 check_buffer_tree_ref(eb);
3144 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3145
3146 /*
3147 * Now it's safe to unlock the pages because any calls to
3148 * btree_release_folio will correctly detect that a page belongs to a
3149 * live buffer and won't free them prematurely.
3150 */
3151 for (int i = 0; i < num_folios; i++)
3152 unlock_page(folio_page(eb->folios[i], 0));
3153 return eb;
3154
3155 out:
3156 WARN_ON(!atomic_dec_and_test(&eb->refs));
3157
3158 /*
3159 * Any attached folios need to be detached before we unlock them. This
3160 * is because when we're inserting our new folios into the mapping, and
3161 * then attaching our eb to that folio. If we fail to insert our folio
3162 * we'll lookup the folio for that index, and grab that EB. We do not
3163 * want that to grab this eb, as we're getting ready to free it. So we
3164 * have to detach it first and then unlock it.
3165 *
3166 * We have to drop our reference and NULL it out here because in the
3167 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3168 * Below when we call btrfs_release_extent_buffer() we will call
3169 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3170 * case. If we left eb->folios[i] populated in the subpage case we'd
3171 * double put our reference and be super sad.
3172 */
3173 for (int i = 0; i < attached; i++) {
3174 ASSERT(eb->folios[i]);
3175 detach_extent_buffer_folio(eb, eb->folios[i]);
3176 unlock_page(folio_page(eb->folios[i], 0));
3177 folio_put(eb->folios[i]);
3178 eb->folios[i] = NULL;
3179 }
3180 /*
3181 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3182 * so it can be cleaned up without utilizing page->mapping.
3183 */
3184 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3185
3186 btrfs_release_extent_buffer(eb);
3187 if (ret < 0)
3188 return ERR_PTR(ret);
3189 ASSERT(existing_eb);
3190 return existing_eb;
3191 }
3192
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3193 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3194 {
3195 struct extent_buffer *eb =
3196 container_of(head, struct extent_buffer, rcu_head);
3197
3198 __free_extent_buffer(eb);
3199 }
3200
release_extent_buffer(struct extent_buffer * eb)3201 static int release_extent_buffer(struct extent_buffer *eb)
3202 __releases(&eb->refs_lock)
3203 {
3204 lockdep_assert_held(&eb->refs_lock);
3205
3206 WARN_ON(atomic_read(&eb->refs) == 0);
3207 if (atomic_dec_and_test(&eb->refs)) {
3208 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3209 struct btrfs_fs_info *fs_info = eb->fs_info;
3210
3211 spin_unlock(&eb->refs_lock);
3212
3213 spin_lock(&fs_info->buffer_lock);
3214 radix_tree_delete(&fs_info->buffer_radix,
3215 eb->start >> fs_info->sectorsize_bits);
3216 spin_unlock(&fs_info->buffer_lock);
3217 } else {
3218 spin_unlock(&eb->refs_lock);
3219 }
3220
3221 btrfs_leak_debug_del_eb(eb);
3222 /* Should be safe to release our pages at this point */
3223 btrfs_release_extent_buffer_pages(eb);
3224 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3225 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3226 __free_extent_buffer(eb);
3227 return 1;
3228 }
3229 #endif
3230 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3231 return 1;
3232 }
3233 spin_unlock(&eb->refs_lock);
3234
3235 return 0;
3236 }
3237
free_extent_buffer(struct extent_buffer * eb)3238 void free_extent_buffer(struct extent_buffer *eb)
3239 {
3240 int refs;
3241 if (!eb)
3242 return;
3243
3244 refs = atomic_read(&eb->refs);
3245 while (1) {
3246 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3247 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3248 refs == 1))
3249 break;
3250 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3251 return;
3252 }
3253
3254 spin_lock(&eb->refs_lock);
3255 if (atomic_read(&eb->refs) == 2 &&
3256 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3257 !extent_buffer_under_io(eb) &&
3258 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3259 atomic_dec(&eb->refs);
3260
3261 /*
3262 * I know this is terrible, but it's temporary until we stop tracking
3263 * the uptodate bits and such for the extent buffers.
3264 */
3265 release_extent_buffer(eb);
3266 }
3267
free_extent_buffer_stale(struct extent_buffer * eb)3268 void free_extent_buffer_stale(struct extent_buffer *eb)
3269 {
3270 if (!eb)
3271 return;
3272
3273 spin_lock(&eb->refs_lock);
3274 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3275
3276 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3277 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3278 atomic_dec(&eb->refs);
3279 release_extent_buffer(eb);
3280 }
3281
btree_clear_folio_dirty(struct folio * folio)3282 static void btree_clear_folio_dirty(struct folio *folio)
3283 {
3284 ASSERT(folio_test_dirty(folio));
3285 ASSERT(folio_test_locked(folio));
3286 folio_clear_dirty_for_io(folio);
3287 xa_lock_irq(&folio->mapping->i_pages);
3288 if (!folio_test_dirty(folio))
3289 __xa_clear_mark(&folio->mapping->i_pages,
3290 folio_index(folio), PAGECACHE_TAG_DIRTY);
3291 xa_unlock_irq(&folio->mapping->i_pages);
3292 }
3293
clear_subpage_extent_buffer_dirty(const struct extent_buffer * eb)3294 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3295 {
3296 struct btrfs_fs_info *fs_info = eb->fs_info;
3297 struct folio *folio = eb->folios[0];
3298 bool last;
3299
3300 /* btree_clear_folio_dirty() needs page locked. */
3301 folio_lock(folio);
3302 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3303 if (last)
3304 btree_clear_folio_dirty(folio);
3305 folio_unlock(folio);
3306 WARN_ON(atomic_read(&eb->refs) == 0);
3307 }
3308
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3309 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3310 struct extent_buffer *eb)
3311 {
3312 struct btrfs_fs_info *fs_info = eb->fs_info;
3313 int num_folios;
3314
3315 btrfs_assert_tree_write_locked(eb);
3316
3317 if (trans && btrfs_header_generation(eb) != trans->transid)
3318 return;
3319
3320 /*
3321 * Instead of clearing the dirty flag off of the buffer, mark it as
3322 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3323 * write-ordering in zoned mode, without the need to later re-dirty
3324 * the extent_buffer.
3325 *
3326 * The actual zeroout of the buffer will happen later in
3327 * btree_csum_one_bio.
3328 */
3329 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3330 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3331 return;
3332 }
3333
3334 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3335 return;
3336
3337 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3338 fs_info->dirty_metadata_batch);
3339
3340 if (eb->fs_info->nodesize < PAGE_SIZE)
3341 return clear_subpage_extent_buffer_dirty(eb);
3342
3343 num_folios = num_extent_folios(eb);
3344 for (int i = 0; i < num_folios; i++) {
3345 struct folio *folio = eb->folios[i];
3346
3347 if (!folio_test_dirty(folio))
3348 continue;
3349 folio_lock(folio);
3350 btree_clear_folio_dirty(folio);
3351 folio_unlock(folio);
3352 }
3353 WARN_ON(atomic_read(&eb->refs) == 0);
3354 }
3355
set_extent_buffer_dirty(struct extent_buffer * eb)3356 void set_extent_buffer_dirty(struct extent_buffer *eb)
3357 {
3358 int num_folios;
3359 bool was_dirty;
3360
3361 check_buffer_tree_ref(eb);
3362
3363 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3364
3365 num_folios = num_extent_folios(eb);
3366 WARN_ON(atomic_read(&eb->refs) == 0);
3367 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3368 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3369
3370 if (!was_dirty) {
3371 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3372
3373 /*
3374 * For subpage case, we can have other extent buffers in the
3375 * same page, and in clear_subpage_extent_buffer_dirty() we
3376 * have to clear page dirty without subpage lock held.
3377 * This can cause race where our page gets dirty cleared after
3378 * we just set it.
3379 *
3380 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3381 * its page for other reasons, we can use page lock to prevent
3382 * the above race.
3383 */
3384 if (subpage)
3385 lock_page(folio_page(eb->folios[0], 0));
3386 for (int i = 0; i < num_folios; i++)
3387 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3388 eb->start, eb->len);
3389 if (subpage)
3390 unlock_page(folio_page(eb->folios[0], 0));
3391 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3392 eb->len,
3393 eb->fs_info->dirty_metadata_batch);
3394 }
3395 #ifdef CONFIG_BTRFS_DEBUG
3396 for (int i = 0; i < num_folios; i++)
3397 ASSERT(folio_test_dirty(eb->folios[i]));
3398 #endif
3399 }
3400
clear_extent_buffer_uptodate(struct extent_buffer * eb)3401 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3402 {
3403 struct btrfs_fs_info *fs_info = eb->fs_info;
3404 int num_folios = num_extent_folios(eb);
3405
3406 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3407 for (int i = 0; i < num_folios; i++) {
3408 struct folio *folio = eb->folios[i];
3409
3410 if (!folio)
3411 continue;
3412
3413 /*
3414 * This is special handling for metadata subpage, as regular
3415 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3416 */
3417 if (fs_info->nodesize >= PAGE_SIZE)
3418 folio_clear_uptodate(folio);
3419 else
3420 btrfs_subpage_clear_uptodate(fs_info, folio,
3421 eb->start, eb->len);
3422 }
3423 }
3424
set_extent_buffer_uptodate(struct extent_buffer * eb)3425 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3426 {
3427 struct btrfs_fs_info *fs_info = eb->fs_info;
3428 int num_folios = num_extent_folios(eb);
3429
3430 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3431 for (int i = 0; i < num_folios; i++) {
3432 struct folio *folio = eb->folios[i];
3433
3434 /*
3435 * This is special handling for metadata subpage, as regular
3436 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3437 */
3438 if (fs_info->nodesize >= PAGE_SIZE)
3439 folio_mark_uptodate(folio);
3440 else
3441 btrfs_subpage_set_uptodate(fs_info, folio,
3442 eb->start, eb->len);
3443 }
3444 }
3445
clear_extent_buffer_reading(struct extent_buffer * eb)3446 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3447 {
3448 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3449 smp_mb__after_atomic();
3450 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3451 }
3452
end_bbio_meta_read(struct btrfs_bio * bbio)3453 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3454 {
3455 struct extent_buffer *eb = bbio->private;
3456 struct btrfs_fs_info *fs_info = eb->fs_info;
3457 bool uptodate = !bbio->bio.bi_status;
3458 struct folio_iter fi;
3459 u32 bio_offset = 0;
3460
3461 /*
3462 * If the extent buffer is marked UPTODATE before the read operation
3463 * completes, other calls to read_extent_buffer_pages() will return
3464 * early without waiting for the read to finish, causing data races.
3465 */
3466 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3467
3468 eb->read_mirror = bbio->mirror_num;
3469
3470 if (uptodate &&
3471 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3472 uptodate = false;
3473
3474 if (uptodate) {
3475 set_extent_buffer_uptodate(eb);
3476 } else {
3477 clear_extent_buffer_uptodate(eb);
3478 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3479 }
3480
3481 bio_for_each_folio_all(fi, &bbio->bio) {
3482 struct folio *folio = fi.folio;
3483 u64 start = eb->start + bio_offset;
3484 u32 len = fi.length;
3485
3486 if (uptodate)
3487 btrfs_folio_set_uptodate(fs_info, folio, start, len);
3488 else
3489 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3490
3491 bio_offset += len;
3492 }
3493
3494 clear_extent_buffer_reading(eb);
3495 free_extent_buffer(eb);
3496
3497 bio_put(&bbio->bio);
3498 }
3499
read_extent_buffer_pages(struct extent_buffer * eb,int wait,int mirror_num,const struct btrfs_tree_parent_check * check)3500 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3501 const struct btrfs_tree_parent_check *check)
3502 {
3503 struct btrfs_bio *bbio;
3504 bool ret;
3505
3506 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3507 return 0;
3508
3509 /*
3510 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3511 * operation, which could potentially still be in flight. In this case
3512 * we simply want to return an error.
3513 */
3514 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3515 return -EIO;
3516
3517 /* Someone else is already reading the buffer, just wait for it. */
3518 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3519 goto done;
3520
3521 /*
3522 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3523 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3524 * started and finished reading the same eb. In this case, UPTODATE
3525 * will now be set, and we shouldn't read it in again.
3526 */
3527 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3528 clear_extent_buffer_reading(eb);
3529 return 0;
3530 }
3531
3532 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3533 eb->read_mirror = 0;
3534 check_buffer_tree_ref(eb);
3535 atomic_inc(&eb->refs);
3536
3537 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3538 REQ_OP_READ | REQ_META, eb->fs_info,
3539 end_bbio_meta_read, eb);
3540 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3541 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3542 bbio->file_offset = eb->start;
3543 memcpy(&bbio->parent_check, check, sizeof(*check));
3544 if (eb->fs_info->nodesize < PAGE_SIZE) {
3545 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3546 eb->start - folio_pos(eb->folios[0]));
3547 ASSERT(ret);
3548 } else {
3549 int num_folios = num_extent_folios(eb);
3550
3551 for (int i = 0; i < num_folios; i++) {
3552 struct folio *folio = eb->folios[i];
3553
3554 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3555 ASSERT(ret);
3556 }
3557 }
3558 btrfs_submit_bbio(bbio, mirror_num);
3559
3560 done:
3561 if (wait == WAIT_COMPLETE) {
3562 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3563 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3564 return -EIO;
3565 }
3566
3567 return 0;
3568 }
3569
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3570 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3571 unsigned long len)
3572 {
3573 btrfs_warn(eb->fs_info,
3574 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3575 eb->start, eb->len, start, len);
3576 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3577
3578 return true;
3579 }
3580
3581 /*
3582 * Check if the [start, start + len) range is valid before reading/writing
3583 * the eb.
3584 * NOTE: @start and @len are offset inside the eb, not logical address.
3585 *
3586 * Caller should not touch the dst/src memory if this function returns error.
3587 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3588 static inline int check_eb_range(const struct extent_buffer *eb,
3589 unsigned long start, unsigned long len)
3590 {
3591 unsigned long offset;
3592
3593 /* start, start + len should not go beyond eb->len nor overflow */
3594 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3595 return report_eb_range(eb, start, len);
3596
3597 return false;
3598 }
3599
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3600 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3601 unsigned long start, unsigned long len)
3602 {
3603 const int unit_size = eb->folio_size;
3604 size_t cur;
3605 size_t offset;
3606 char *dst = (char *)dstv;
3607 unsigned long i = get_eb_folio_index(eb, start);
3608
3609 if (check_eb_range(eb, start, len)) {
3610 /*
3611 * Invalid range hit, reset the memory, so callers won't get
3612 * some random garbage for their uninitialized memory.
3613 */
3614 memset(dstv, 0, len);
3615 return;
3616 }
3617
3618 if (eb->addr) {
3619 memcpy(dstv, eb->addr + start, len);
3620 return;
3621 }
3622
3623 offset = get_eb_offset_in_folio(eb, start);
3624
3625 while (len > 0) {
3626 char *kaddr;
3627
3628 cur = min(len, unit_size - offset);
3629 kaddr = folio_address(eb->folios[i]);
3630 memcpy(dst, kaddr + offset, cur);
3631
3632 dst += cur;
3633 len -= cur;
3634 offset = 0;
3635 i++;
3636 }
3637 }
3638
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)3639 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3640 void __user *dstv,
3641 unsigned long start, unsigned long len)
3642 {
3643 const int unit_size = eb->folio_size;
3644 size_t cur;
3645 size_t offset;
3646 char __user *dst = (char __user *)dstv;
3647 unsigned long i = get_eb_folio_index(eb, start);
3648 int ret = 0;
3649
3650 WARN_ON(start > eb->len);
3651 WARN_ON(start + len > eb->start + eb->len);
3652
3653 if (eb->addr) {
3654 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3655 ret = -EFAULT;
3656 return ret;
3657 }
3658
3659 offset = get_eb_offset_in_folio(eb, start);
3660
3661 while (len > 0) {
3662 char *kaddr;
3663
3664 cur = min(len, unit_size - offset);
3665 kaddr = folio_address(eb->folios[i]);
3666 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3667 ret = -EFAULT;
3668 break;
3669 }
3670
3671 dst += cur;
3672 len -= cur;
3673 offset = 0;
3674 i++;
3675 }
3676
3677 return ret;
3678 }
3679
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3680 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3681 unsigned long start, unsigned long len)
3682 {
3683 const int unit_size = eb->folio_size;
3684 size_t cur;
3685 size_t offset;
3686 char *kaddr;
3687 char *ptr = (char *)ptrv;
3688 unsigned long i = get_eb_folio_index(eb, start);
3689 int ret = 0;
3690
3691 if (check_eb_range(eb, start, len))
3692 return -EINVAL;
3693
3694 if (eb->addr)
3695 return memcmp(ptrv, eb->addr + start, len);
3696
3697 offset = get_eb_offset_in_folio(eb, start);
3698
3699 while (len > 0) {
3700 cur = min(len, unit_size - offset);
3701 kaddr = folio_address(eb->folios[i]);
3702 ret = memcmp(ptr, kaddr + offset, cur);
3703 if (ret)
3704 break;
3705
3706 ptr += cur;
3707 len -= cur;
3708 offset = 0;
3709 i++;
3710 }
3711 return ret;
3712 }
3713
3714 /*
3715 * Check that the extent buffer is uptodate.
3716 *
3717 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3718 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3719 */
assert_eb_folio_uptodate(const struct extent_buffer * eb,int i)3720 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3721 {
3722 struct btrfs_fs_info *fs_info = eb->fs_info;
3723 struct folio *folio = eb->folios[i];
3724
3725 ASSERT(folio);
3726
3727 /*
3728 * If we are using the commit root we could potentially clear a page
3729 * Uptodate while we're using the extent buffer that we've previously
3730 * looked up. We don't want to complain in this case, as the page was
3731 * valid before, we just didn't write it out. Instead we want to catch
3732 * the case where we didn't actually read the block properly, which
3733 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3734 */
3735 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3736 return;
3737
3738 if (fs_info->nodesize < PAGE_SIZE) {
3739 folio = eb->folios[0];
3740 ASSERT(i == 0);
3741 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3742 eb->start, eb->len)))
3743 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3744 } else {
3745 WARN_ON(!folio_test_uptodate(folio));
3746 }
3747 }
3748
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)3749 static void __write_extent_buffer(const struct extent_buffer *eb,
3750 const void *srcv, unsigned long start,
3751 unsigned long len, bool use_memmove)
3752 {
3753 const int unit_size = eb->folio_size;
3754 size_t cur;
3755 size_t offset;
3756 char *kaddr;
3757 const char *src = (const char *)srcv;
3758 unsigned long i = get_eb_folio_index(eb, start);
3759 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3760 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3761
3762 if (check_eb_range(eb, start, len))
3763 return;
3764
3765 if (eb->addr) {
3766 if (use_memmove)
3767 memmove(eb->addr + start, srcv, len);
3768 else
3769 memcpy(eb->addr + start, srcv, len);
3770 return;
3771 }
3772
3773 offset = get_eb_offset_in_folio(eb, start);
3774
3775 while (len > 0) {
3776 if (check_uptodate)
3777 assert_eb_folio_uptodate(eb, i);
3778
3779 cur = min(len, unit_size - offset);
3780 kaddr = folio_address(eb->folios[i]);
3781 if (use_memmove)
3782 memmove(kaddr + offset, src, cur);
3783 else
3784 memcpy(kaddr + offset, src, cur);
3785
3786 src += cur;
3787 len -= cur;
3788 offset = 0;
3789 i++;
3790 }
3791 }
3792
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)3793 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3794 unsigned long start, unsigned long len)
3795 {
3796 return __write_extent_buffer(eb, srcv, start, len, false);
3797 }
3798
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)3799 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3800 unsigned long start, unsigned long len)
3801 {
3802 const int unit_size = eb->folio_size;
3803 unsigned long cur = start;
3804
3805 if (eb->addr) {
3806 memset(eb->addr + start, c, len);
3807 return;
3808 }
3809
3810 while (cur < start + len) {
3811 unsigned long index = get_eb_folio_index(eb, cur);
3812 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3813 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3814
3815 assert_eb_folio_uptodate(eb, index);
3816 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3817
3818 cur += cur_len;
3819 }
3820 }
3821
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)3822 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3823 unsigned long len)
3824 {
3825 if (check_eb_range(eb, start, len))
3826 return;
3827 return memset_extent_buffer(eb, 0, start, len);
3828 }
3829
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)3830 void copy_extent_buffer_full(const struct extent_buffer *dst,
3831 const struct extent_buffer *src)
3832 {
3833 const int unit_size = src->folio_size;
3834 unsigned long cur = 0;
3835
3836 ASSERT(dst->len == src->len);
3837
3838 while (cur < src->len) {
3839 unsigned long index = get_eb_folio_index(src, cur);
3840 unsigned long offset = get_eb_offset_in_folio(src, cur);
3841 unsigned long cur_len = min(src->len, unit_size - offset);
3842 void *addr = folio_address(src->folios[index]) + offset;
3843
3844 write_extent_buffer(dst, addr, cur, cur_len);
3845
3846 cur += cur_len;
3847 }
3848 }
3849
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3850 void copy_extent_buffer(const struct extent_buffer *dst,
3851 const struct extent_buffer *src,
3852 unsigned long dst_offset, unsigned long src_offset,
3853 unsigned long len)
3854 {
3855 const int unit_size = dst->folio_size;
3856 u64 dst_len = dst->len;
3857 size_t cur;
3858 size_t offset;
3859 char *kaddr;
3860 unsigned long i = get_eb_folio_index(dst, dst_offset);
3861
3862 if (check_eb_range(dst, dst_offset, len) ||
3863 check_eb_range(src, src_offset, len))
3864 return;
3865
3866 WARN_ON(src->len != dst_len);
3867
3868 offset = get_eb_offset_in_folio(dst, dst_offset);
3869
3870 while (len > 0) {
3871 assert_eb_folio_uptodate(dst, i);
3872
3873 cur = min(len, (unsigned long)(unit_size - offset));
3874
3875 kaddr = folio_address(dst->folios[i]);
3876 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3877
3878 src_offset += cur;
3879 len -= cur;
3880 offset = 0;
3881 i++;
3882 }
3883 }
3884
3885 /*
3886 * Calculate the folio and offset of the byte containing the given bit number.
3887 *
3888 * @eb: the extent buffer
3889 * @start: offset of the bitmap item in the extent buffer
3890 * @nr: bit number
3891 * @folio_index: return index of the folio in the extent buffer that contains
3892 * the given bit number
3893 * @folio_offset: return offset into the folio given by folio_index
3894 *
3895 * This helper hides the ugliness of finding the byte in an extent buffer which
3896 * contains a given bit.
3897 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * folio_index,size_t * folio_offset)3898 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3899 unsigned long start, unsigned long nr,
3900 unsigned long *folio_index,
3901 size_t *folio_offset)
3902 {
3903 size_t byte_offset = BIT_BYTE(nr);
3904 size_t offset;
3905
3906 /*
3907 * The byte we want is the offset of the extent buffer + the offset of
3908 * the bitmap item in the extent buffer + the offset of the byte in the
3909 * bitmap item.
3910 */
3911 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3912
3913 *folio_index = offset >> eb->folio_shift;
3914 *folio_offset = offset_in_eb_folio(eb, offset);
3915 }
3916
3917 /*
3918 * Determine whether a bit in a bitmap item is set.
3919 *
3920 * @eb: the extent buffer
3921 * @start: offset of the bitmap item in the extent buffer
3922 * @nr: bit number to test
3923 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)3924 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3925 unsigned long nr)
3926 {
3927 unsigned long i;
3928 size_t offset;
3929 u8 *kaddr;
3930
3931 eb_bitmap_offset(eb, start, nr, &i, &offset);
3932 assert_eb_folio_uptodate(eb, i);
3933 kaddr = folio_address(eb->folios[i]);
3934 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3935 }
3936
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)3937 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3938 {
3939 unsigned long index = get_eb_folio_index(eb, bytenr);
3940
3941 if (check_eb_range(eb, bytenr, 1))
3942 return NULL;
3943 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
3944 }
3945
3946 /*
3947 * Set an area of a bitmap to 1.
3948 *
3949 * @eb: the extent buffer
3950 * @start: offset of the bitmap item in the extent buffer
3951 * @pos: bit number of the first bit
3952 * @len: number of bits to set
3953 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3954 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
3955 unsigned long pos, unsigned long len)
3956 {
3957 unsigned int first_byte = start + BIT_BYTE(pos);
3958 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3959 const bool same_byte = (first_byte == last_byte);
3960 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3961 u8 *kaddr;
3962
3963 if (same_byte)
3964 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3965
3966 /* Handle the first byte. */
3967 kaddr = extent_buffer_get_byte(eb, first_byte);
3968 *kaddr |= mask;
3969 if (same_byte)
3970 return;
3971
3972 /* Handle the byte aligned part. */
3973 ASSERT(first_byte + 1 <= last_byte);
3974 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
3975
3976 /* Handle the last byte. */
3977 kaddr = extent_buffer_get_byte(eb, last_byte);
3978 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
3979 }
3980
3981
3982 /*
3983 * Clear an area of a bitmap.
3984 *
3985 * @eb: the extent buffer
3986 * @start: offset of the bitmap item in the extent buffer
3987 * @pos: bit number of the first bit
3988 * @len: number of bits to clear
3989 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3990 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
3991 unsigned long start, unsigned long pos,
3992 unsigned long len)
3993 {
3994 unsigned int first_byte = start + BIT_BYTE(pos);
3995 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3996 const bool same_byte = (first_byte == last_byte);
3997 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3998 u8 *kaddr;
3999
4000 if (same_byte)
4001 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4002
4003 /* Handle the first byte. */
4004 kaddr = extent_buffer_get_byte(eb, first_byte);
4005 *kaddr &= ~mask;
4006 if (same_byte)
4007 return;
4008
4009 /* Handle the byte aligned part. */
4010 ASSERT(first_byte + 1 <= last_byte);
4011 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4012
4013 /* Handle the last byte. */
4014 kaddr = extent_buffer_get_byte(eb, last_byte);
4015 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4016 }
4017
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)4018 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4019 {
4020 unsigned long distance = (src > dst) ? src - dst : dst - src;
4021 return distance < len;
4022 }
4023
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4024 void memcpy_extent_buffer(const struct extent_buffer *dst,
4025 unsigned long dst_offset, unsigned long src_offset,
4026 unsigned long len)
4027 {
4028 const int unit_size = dst->folio_size;
4029 unsigned long cur_off = 0;
4030
4031 if (check_eb_range(dst, dst_offset, len) ||
4032 check_eb_range(dst, src_offset, len))
4033 return;
4034
4035 if (dst->addr) {
4036 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4037
4038 if (use_memmove)
4039 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4040 else
4041 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4042 return;
4043 }
4044
4045 while (cur_off < len) {
4046 unsigned long cur_src = cur_off + src_offset;
4047 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4048 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4049 unsigned long cur_len = min(src_offset + len - cur_src,
4050 unit_size - folio_off);
4051 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4052 const bool use_memmove = areas_overlap(src_offset + cur_off,
4053 dst_offset + cur_off, cur_len);
4054
4055 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4056 use_memmove);
4057 cur_off += cur_len;
4058 }
4059 }
4060
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4061 void memmove_extent_buffer(const struct extent_buffer *dst,
4062 unsigned long dst_offset, unsigned long src_offset,
4063 unsigned long len)
4064 {
4065 unsigned long dst_end = dst_offset + len - 1;
4066 unsigned long src_end = src_offset + len - 1;
4067
4068 if (check_eb_range(dst, dst_offset, len) ||
4069 check_eb_range(dst, src_offset, len))
4070 return;
4071
4072 if (dst_offset < src_offset) {
4073 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4074 return;
4075 }
4076
4077 if (dst->addr) {
4078 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4079 return;
4080 }
4081
4082 while (len > 0) {
4083 unsigned long src_i;
4084 size_t cur;
4085 size_t dst_off_in_folio;
4086 size_t src_off_in_folio;
4087 void *src_addr;
4088 bool use_memmove;
4089
4090 src_i = get_eb_folio_index(dst, src_end);
4091
4092 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4093 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4094
4095 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4096 cur = min(cur, dst_off_in_folio + 1);
4097
4098 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4099 cur + 1;
4100 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4101 cur);
4102
4103 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4104 use_memmove);
4105
4106 dst_end -= cur;
4107 src_end -= cur;
4108 len -= cur;
4109 }
4110 }
4111
4112 #define GANG_LOOKUP_SIZE 16
get_next_extent_buffer(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 bytenr)4113 static struct extent_buffer *get_next_extent_buffer(
4114 const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4115 {
4116 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4117 struct extent_buffer *found = NULL;
4118 u64 folio_start = folio_pos(folio);
4119 u64 cur = folio_start;
4120
4121 ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4122 lockdep_assert_held(&fs_info->buffer_lock);
4123
4124 while (cur < folio_start + PAGE_SIZE) {
4125 int ret;
4126 int i;
4127
4128 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4129 (void **)gang, cur >> fs_info->sectorsize_bits,
4130 min_t(unsigned int, GANG_LOOKUP_SIZE,
4131 PAGE_SIZE / fs_info->nodesize));
4132 if (ret == 0)
4133 goto out;
4134 for (i = 0; i < ret; i++) {
4135 /* Already beyond page end */
4136 if (gang[i]->start >= folio_start + PAGE_SIZE)
4137 goto out;
4138 /* Found one */
4139 if (gang[i]->start >= bytenr) {
4140 found = gang[i];
4141 goto out;
4142 }
4143 }
4144 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4145 }
4146 out:
4147 return found;
4148 }
4149
try_release_subpage_extent_buffer(struct folio * folio)4150 static int try_release_subpage_extent_buffer(struct folio *folio)
4151 {
4152 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4153 u64 cur = folio_pos(folio);
4154 const u64 end = cur + PAGE_SIZE;
4155 int ret;
4156
4157 while (cur < end) {
4158 struct extent_buffer *eb = NULL;
4159
4160 /*
4161 * Unlike try_release_extent_buffer() which uses folio private
4162 * to grab buffer, for subpage case we rely on radix tree, thus
4163 * we need to ensure radix tree consistency.
4164 *
4165 * We also want an atomic snapshot of the radix tree, thus go
4166 * with spinlock rather than RCU.
4167 */
4168 spin_lock(&fs_info->buffer_lock);
4169 eb = get_next_extent_buffer(fs_info, folio, cur);
4170 if (!eb) {
4171 /* No more eb in the page range after or at cur */
4172 spin_unlock(&fs_info->buffer_lock);
4173 break;
4174 }
4175 cur = eb->start + eb->len;
4176
4177 /*
4178 * The same as try_release_extent_buffer(), to ensure the eb
4179 * won't disappear out from under us.
4180 */
4181 spin_lock(&eb->refs_lock);
4182 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4183 spin_unlock(&eb->refs_lock);
4184 spin_unlock(&fs_info->buffer_lock);
4185 break;
4186 }
4187 spin_unlock(&fs_info->buffer_lock);
4188
4189 /*
4190 * If tree ref isn't set then we know the ref on this eb is a
4191 * real ref, so just return, this eb will likely be freed soon
4192 * anyway.
4193 */
4194 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4195 spin_unlock(&eb->refs_lock);
4196 break;
4197 }
4198
4199 /*
4200 * Here we don't care about the return value, we will always
4201 * check the folio private at the end. And
4202 * release_extent_buffer() will release the refs_lock.
4203 */
4204 release_extent_buffer(eb);
4205 }
4206 /*
4207 * Finally to check if we have cleared folio private, as if we have
4208 * released all ebs in the page, the folio private should be cleared now.
4209 */
4210 spin_lock(&folio->mapping->i_private_lock);
4211 if (!folio_test_private(folio))
4212 ret = 1;
4213 else
4214 ret = 0;
4215 spin_unlock(&folio->mapping->i_private_lock);
4216 return ret;
4217
4218 }
4219
try_release_extent_buffer(struct folio * folio)4220 int try_release_extent_buffer(struct folio *folio)
4221 {
4222 struct extent_buffer *eb;
4223
4224 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4225 return try_release_subpage_extent_buffer(folio);
4226
4227 /*
4228 * We need to make sure nobody is changing folio private, as we rely on
4229 * folio private as the pointer to extent buffer.
4230 */
4231 spin_lock(&folio->mapping->i_private_lock);
4232 if (!folio_test_private(folio)) {
4233 spin_unlock(&folio->mapping->i_private_lock);
4234 return 1;
4235 }
4236
4237 eb = folio_get_private(folio);
4238 BUG_ON(!eb);
4239
4240 /*
4241 * This is a little awful but should be ok, we need to make sure that
4242 * the eb doesn't disappear out from under us while we're looking at
4243 * this page.
4244 */
4245 spin_lock(&eb->refs_lock);
4246 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4247 spin_unlock(&eb->refs_lock);
4248 spin_unlock(&folio->mapping->i_private_lock);
4249 return 0;
4250 }
4251 spin_unlock(&folio->mapping->i_private_lock);
4252
4253 /*
4254 * If tree ref isn't set then we know the ref on this eb is a real ref,
4255 * so just return, this page will likely be freed soon anyway.
4256 */
4257 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4258 spin_unlock(&eb->refs_lock);
4259 return 0;
4260 }
4261
4262 return release_extent_buffer(eb);
4263 }
4264
4265 /*
4266 * Attempt to readahead a child block.
4267 *
4268 * @fs_info: the fs_info
4269 * @bytenr: bytenr to read
4270 * @owner_root: objectid of the root that owns this eb
4271 * @gen: generation for the uptodate check, can be 0
4272 * @level: level for the eb
4273 *
4274 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4275 * normal uptodate check of the eb, without checking the generation. If we have
4276 * to read the block we will not block on anything.
4277 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4278 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4279 u64 bytenr, u64 owner_root, u64 gen, int level)
4280 {
4281 struct btrfs_tree_parent_check check = {
4282 .level = level,
4283 .transid = gen
4284 };
4285 struct extent_buffer *eb;
4286 int ret;
4287
4288 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4289 if (IS_ERR(eb))
4290 return;
4291
4292 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4293 free_extent_buffer(eb);
4294 return;
4295 }
4296
4297 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4298 if (ret < 0)
4299 free_extent_buffer_stale(eb);
4300 else
4301 free_extent_buffer(eb);
4302 }
4303
4304 /*
4305 * Readahead a node's child block.
4306 *
4307 * @node: parent node we're reading from
4308 * @slot: slot in the parent node for the child we want to read
4309 *
4310 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4311 * the slot in the node provided.
4312 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4313 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4314 {
4315 btrfs_readahead_tree_block(node->fs_info,
4316 btrfs_node_blockptr(node, slot),
4317 btrfs_header_owner(node),
4318 btrfs_node_ptr_generation(node, slot),
4319 btrfs_header_level(node) - 1);
4320 }
4321