1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "bio.h"
23 #include "locking.h"
24 #include "backref.h"
25 #include "disk-io.h"
26 #include "subpage.h"
27 #include "zoned.h"
28 #include "block-group.h"
29 #include "compression.h"
30 #include "fs.h"
31 #include "accessors.h"
32 #include "file-item.h"
33 #include "file.h"
34 #include "dev-replace.h"
35 #include "super.h"
36 #include "transaction.h"
37
38 static struct kmem_cache *extent_buffer_cache;
39
40 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42 {
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49 }
50
btrfs_leak_debug_del_eb(struct extent_buffer * eb)51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52 {
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59 }
60
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62 {
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87 }
88 #else
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
91 #endif
92
93 /*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97 struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 enum btrfs_compression_type compress_type;
100 u32 len_to_oe_boundary;
101 blk_opf_t opf;
102 btrfs_bio_end_io_t end_io_func;
103 struct writeback_control *wbc;
104
105 /*
106 * The sectors of the page which are going to be submitted by
107 * extent_writepage_io().
108 * This is to avoid touching ranges covered by compression/inline.
109 */
110 unsigned long submit_bitmap;
111 };
112
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)113 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
114 {
115 struct btrfs_bio *bbio = bio_ctrl->bbio;
116
117 if (!bbio)
118 return;
119
120 /* Caller should ensure the bio has at least some range added */
121 ASSERT(bbio->bio.bi_iter.bi_size);
122
123 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
124 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
125 btrfs_submit_compressed_read(bbio);
126 else
127 btrfs_submit_bbio(bbio, 0);
128
129 /* The bbio is owned by the end_io handler now */
130 bio_ctrl->bbio = NULL;
131 }
132
133 /*
134 * Submit or fail the current bio in the bio_ctrl structure.
135 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)136 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
137 {
138 struct btrfs_bio *bbio = bio_ctrl->bbio;
139
140 if (!bbio)
141 return;
142
143 if (ret) {
144 ASSERT(ret < 0);
145 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
146 /* The bio is owned by the end_io handler now */
147 bio_ctrl->bbio = NULL;
148 } else {
149 submit_one_bio(bio_ctrl);
150 }
151 }
152
extent_buffer_init_cachep(void)153 int __init extent_buffer_init_cachep(void)
154 {
155 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
156 sizeof(struct extent_buffer), 0, 0,
157 NULL);
158 if (!extent_buffer_cache)
159 return -ENOMEM;
160
161 return 0;
162 }
163
extent_buffer_free_cachep(void)164 void __cold extent_buffer_free_cachep(void)
165 {
166 /*
167 * Make sure all delayed rcu free are flushed before we
168 * destroy caches.
169 */
170 rcu_barrier();
171 kmem_cache_destroy(extent_buffer_cache);
172 }
173
process_one_folio(struct btrfs_fs_info * fs_info,struct folio * folio,const struct folio * locked_folio,unsigned long page_ops,u64 start,u64 end)174 static void process_one_folio(struct btrfs_fs_info *fs_info,
175 struct folio *folio, const struct folio *locked_folio,
176 unsigned long page_ops, u64 start, u64 end)
177 {
178 u32 len;
179
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
181 len = end + 1 - start;
182
183 if (page_ops & PAGE_SET_ORDERED)
184 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
185 if (page_ops & PAGE_START_WRITEBACK) {
186 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
187 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
188 }
189 if (page_ops & PAGE_END_WRITEBACK)
190 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
191
192 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
193 btrfs_folio_end_lock(fs_info, folio, start, len);
194 }
195
__process_folios_contig(struct address_space * mapping,const struct folio * locked_folio,u64 start,u64 end,unsigned long page_ops)196 static void __process_folios_contig(struct address_space *mapping,
197 const struct folio *locked_folio, u64 start,
198 u64 end, unsigned long page_ops)
199 {
200 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
201 pgoff_t index = start >> PAGE_SHIFT;
202 pgoff_t end_index = end >> PAGE_SHIFT;
203 struct folio_batch fbatch;
204 int i;
205
206 folio_batch_init(&fbatch);
207 while (index <= end_index) {
208 int found_folios;
209
210 found_folios = filemap_get_folios_contig(mapping, &index,
211 end_index, &fbatch);
212 for (i = 0; i < found_folios; i++) {
213 struct folio *folio = fbatch.folios[i];
214
215 process_one_folio(fs_info, folio, locked_folio,
216 page_ops, start, end);
217 }
218 folio_batch_release(&fbatch);
219 cond_resched();
220 }
221 }
222
unlock_delalloc_folio(const struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)223 static noinline void unlock_delalloc_folio(const struct inode *inode,
224 const struct folio *locked_folio,
225 u64 start, u64 end)
226 {
227 unsigned long index = start >> PAGE_SHIFT;
228 unsigned long end_index = end >> PAGE_SHIFT;
229
230 ASSERT(locked_folio);
231 if (index == locked_folio->index && end_index == index)
232 return;
233
234 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
235 PAGE_UNLOCK);
236 }
237
lock_delalloc_folios(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)238 static noinline int lock_delalloc_folios(struct inode *inode,
239 const struct folio *locked_folio,
240 u64 start, u64 end)
241 {
242 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
243 struct address_space *mapping = inode->i_mapping;
244 pgoff_t index = start >> PAGE_SHIFT;
245 pgoff_t end_index = end >> PAGE_SHIFT;
246 u64 processed_end = start;
247 struct folio_batch fbatch;
248
249 if (index == locked_folio->index && index == end_index)
250 return 0;
251
252 folio_batch_init(&fbatch);
253 while (index <= end_index) {
254 unsigned int found_folios, i;
255
256 found_folios = filemap_get_folios_contig(mapping, &index,
257 end_index, &fbatch);
258 if (found_folios == 0)
259 goto out;
260
261 for (i = 0; i < found_folios; i++) {
262 struct folio *folio = fbatch.folios[i];
263 u64 range_start;
264 u32 range_len;
265
266 if (folio == locked_folio)
267 continue;
268
269 folio_lock(folio);
270 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
271 folio_unlock(folio);
272 goto out;
273 }
274 range_start = max_t(u64, folio_pos(folio), start);
275 range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
276 end + 1) - range_start;
277 btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
278
279 processed_end = range_start + range_len - 1;
280 }
281 folio_batch_release(&fbatch);
282 cond_resched();
283 }
284
285 return 0;
286 out:
287 folio_batch_release(&fbatch);
288 if (processed_end > start)
289 unlock_delalloc_folio(inode, locked_folio, start, processed_end);
290 return -EAGAIN;
291 }
292
293 /*
294 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
295 * more than @max_bytes.
296 *
297 * @start: The original start bytenr to search.
298 * Will store the extent range start bytenr.
299 * @end: The original end bytenr of the search range
300 * Will store the extent range end bytenr.
301 *
302 * Return true if we find a delalloc range which starts inside the original
303 * range, and @start/@end will store the delalloc range start/end.
304 *
305 * Return false if we can't find any delalloc range which starts inside the
306 * original range, and @start/@end will be the non-delalloc range start/end.
307 */
308 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct folio * locked_folio,u64 * start,u64 * end)309 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
310 struct folio *locked_folio,
311 u64 *start, u64 *end)
312 {
313 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
314 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
315 const u64 orig_start = *start;
316 const u64 orig_end = *end;
317 /* The sanity tests may not set a valid fs_info. */
318 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
319 u64 delalloc_start;
320 u64 delalloc_end;
321 bool found;
322 struct extent_state *cached_state = NULL;
323 int ret;
324 int loops = 0;
325
326 /* Caller should pass a valid @end to indicate the search range end */
327 ASSERT(orig_end > orig_start);
328
329 /* The range should at least cover part of the folio */
330 ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
331 orig_end <= folio_pos(locked_folio)));
332 again:
333 /* step one, find a bunch of delalloc bytes starting at start */
334 delalloc_start = *start;
335 delalloc_end = 0;
336 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
337 max_bytes, &cached_state);
338 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
339 *start = delalloc_start;
340
341 /* @delalloc_end can be -1, never go beyond @orig_end */
342 *end = min(delalloc_end, orig_end);
343 free_extent_state(cached_state);
344 return false;
345 }
346
347 /*
348 * start comes from the offset of locked_folio. We have to lock
349 * folios in order, so we can't process delalloc bytes before
350 * locked_folio
351 */
352 if (delalloc_start < *start)
353 delalloc_start = *start;
354
355 /*
356 * make sure to limit the number of folios we try to lock down
357 */
358 if (delalloc_end + 1 - delalloc_start > max_bytes)
359 delalloc_end = delalloc_start + max_bytes - 1;
360
361 /* step two, lock all the folioss after the folios that has start */
362 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
363 delalloc_end);
364 ASSERT(!ret || ret == -EAGAIN);
365 if (ret == -EAGAIN) {
366 /* some of the folios are gone, lets avoid looping by
367 * shortening the size of the delalloc range we're searching
368 */
369 free_extent_state(cached_state);
370 cached_state = NULL;
371 if (!loops) {
372 max_bytes = PAGE_SIZE;
373 loops = 1;
374 goto again;
375 } else {
376 found = false;
377 goto out_failed;
378 }
379 }
380
381 /* step three, lock the state bits for the whole range */
382 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
383
384 /* then test to make sure it is all still delalloc */
385 ret = test_range_bit(tree, delalloc_start, delalloc_end,
386 EXTENT_DELALLOC, cached_state);
387
388 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
389 if (!ret) {
390 unlock_delalloc_folio(inode, locked_folio, delalloc_start,
391 delalloc_end);
392 cond_resched();
393 goto again;
394 }
395 *start = delalloc_start;
396 *end = delalloc_end;
397 out_failed:
398 return found;
399 }
400
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,const struct folio * locked_folio,struct extent_state ** cached,u32 clear_bits,unsigned long page_ops)401 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
402 const struct folio *locked_folio,
403 struct extent_state **cached,
404 u32 clear_bits, unsigned long page_ops)
405 {
406 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
407
408 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
409 end, page_ops);
410 }
411
btrfs_verify_folio(struct folio * folio,u64 start,u32 len)412 static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
413 {
414 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
415
416 if (!fsverity_active(folio->mapping->host) ||
417 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
418 start >= i_size_read(folio->mapping->host))
419 return true;
420 return fsverity_verify_folio(folio);
421 }
422
end_folio_read(struct folio * folio,bool uptodate,u64 start,u32 len)423 static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
424 {
425 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
426
427 ASSERT(folio_pos(folio) <= start &&
428 start + len <= folio_pos(folio) + PAGE_SIZE);
429
430 if (uptodate && btrfs_verify_folio(folio, start, len))
431 btrfs_folio_set_uptodate(fs_info, folio, start, len);
432 else
433 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
434
435 if (!btrfs_is_subpage(fs_info, folio->mapping))
436 folio_unlock(folio);
437 else
438 btrfs_folio_end_lock(fs_info, folio, start, len);
439 }
440
441 /*
442 * After a write IO is done, we need to:
443 *
444 * - clear the uptodate bits on error
445 * - clear the writeback bits in the extent tree for the range
446 * - filio_end_writeback() if there is no more pending io for the folio
447 *
448 * Scheduling is not allowed, so the extent state tree is expected
449 * to have one and only one object corresponding to this IO.
450 */
end_bbio_data_write(struct btrfs_bio * bbio)451 static void end_bbio_data_write(struct btrfs_bio *bbio)
452 {
453 struct btrfs_fs_info *fs_info = bbio->fs_info;
454 struct bio *bio = &bbio->bio;
455 int error = blk_status_to_errno(bio->bi_status);
456 struct folio_iter fi;
457 const u32 sectorsize = fs_info->sectorsize;
458
459 ASSERT(!bio_flagged(bio, BIO_CLONED));
460 bio_for_each_folio_all(fi, bio) {
461 struct folio *folio = fi.folio;
462 u64 start = folio_pos(folio) + fi.offset;
463 u32 len = fi.length;
464
465 /* Only order 0 (single page) folios are allowed for data. */
466 ASSERT(folio_order(folio) == 0);
467
468 /* Our read/write should always be sector aligned. */
469 if (!IS_ALIGNED(fi.offset, sectorsize))
470 btrfs_err(fs_info,
471 "partial page write in btrfs with offset %zu and length %zu",
472 fi.offset, fi.length);
473 else if (!IS_ALIGNED(fi.length, sectorsize))
474 btrfs_info(fs_info,
475 "incomplete page write with offset %zu and length %zu",
476 fi.offset, fi.length);
477
478 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
479 !error);
480 if (error)
481 mapping_set_error(folio->mapping, error);
482 btrfs_folio_clear_writeback(fs_info, folio, start, len);
483 }
484
485 bio_put(bio);
486 }
487
begin_folio_read(struct btrfs_fs_info * fs_info,struct folio * folio)488 static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
489 {
490 ASSERT(folio_test_locked(folio));
491 if (!btrfs_is_subpage(fs_info, folio->mapping))
492 return;
493
494 ASSERT(folio_test_private(folio));
495 btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
496 }
497
498 /*
499 * After a data read IO is done, we need to:
500 *
501 * - clear the uptodate bits on error
502 * - set the uptodate bits if things worked
503 * - set the folio up to date if all extents in the tree are uptodate
504 * - clear the lock bit in the extent tree
505 * - unlock the folio if there are no other extents locked for it
506 *
507 * Scheduling is not allowed, so the extent state tree is expected
508 * to have one and only one object corresponding to this IO.
509 */
end_bbio_data_read(struct btrfs_bio * bbio)510 static void end_bbio_data_read(struct btrfs_bio *bbio)
511 {
512 struct btrfs_fs_info *fs_info = bbio->fs_info;
513 struct bio *bio = &bbio->bio;
514 struct folio_iter fi;
515 const u32 sectorsize = fs_info->sectorsize;
516
517 ASSERT(!bio_flagged(bio, BIO_CLONED));
518 bio_for_each_folio_all(fi, &bbio->bio) {
519 bool uptodate = !bio->bi_status;
520 struct folio *folio = fi.folio;
521 struct inode *inode = folio->mapping->host;
522 u64 start;
523 u64 end;
524 u32 len;
525
526 btrfs_debug(fs_info,
527 "%s: bi_sector=%llu, err=%d, mirror=%u",
528 __func__, bio->bi_iter.bi_sector, bio->bi_status,
529 bbio->mirror_num);
530
531 /*
532 * We always issue full-sector reads, but if some block in a
533 * folio fails to read, blk_update_request() will advance
534 * bv_offset and adjust bv_len to compensate. Print a warning
535 * for unaligned offsets, and an error if they don't add up to
536 * a full sector.
537 */
538 if (!IS_ALIGNED(fi.offset, sectorsize))
539 btrfs_err(fs_info,
540 "partial page read in btrfs with offset %zu and length %zu",
541 fi.offset, fi.length);
542 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
543 btrfs_info(fs_info,
544 "incomplete page read with offset %zu and length %zu",
545 fi.offset, fi.length);
546
547 start = folio_pos(folio) + fi.offset;
548 end = start + fi.length - 1;
549 len = fi.length;
550
551 if (likely(uptodate)) {
552 loff_t i_size = i_size_read(inode);
553
554 /*
555 * Zero out the remaining part if this range straddles
556 * i_size.
557 *
558 * Here we should only zero the range inside the folio,
559 * not touch anything else.
560 *
561 * NOTE: i_size is exclusive while end is inclusive and
562 * folio_contains() takes PAGE_SIZE units.
563 */
564 if (folio_contains(folio, i_size >> PAGE_SHIFT) &&
565 i_size <= end) {
566 u32 zero_start = max(offset_in_folio(folio, i_size),
567 offset_in_folio(folio, start));
568 u32 zero_len = offset_in_folio(folio, end) + 1 -
569 zero_start;
570
571 folio_zero_range(folio, zero_start, zero_len);
572 }
573 }
574
575 /* Update page status and unlock. */
576 end_folio_read(folio, uptodate, start, len);
577 }
578 bio_put(bio);
579 }
580
581 /*
582 * Populate every free slot in a provided array with folios using GFP_NOFS.
583 *
584 * @nr_folios: number of folios to allocate
585 * @folio_array: the array to fill with folios; any existing non-NULL entries in
586 * the array will be skipped
587 *
588 * Return: 0 if all folios were able to be allocated;
589 * -ENOMEM otherwise, the partially allocated folios would be freed and
590 * the array slots zeroed
591 */
btrfs_alloc_folio_array(unsigned int nr_folios,struct folio ** folio_array)592 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
593 {
594 for (int i = 0; i < nr_folios; i++) {
595 if (folio_array[i])
596 continue;
597 folio_array[i] = folio_alloc(GFP_NOFS, 0);
598 if (!folio_array[i])
599 goto error;
600 }
601 return 0;
602 error:
603 for (int i = 0; i < nr_folios; i++) {
604 if (folio_array[i])
605 folio_put(folio_array[i]);
606 }
607 return -ENOMEM;
608 }
609
610 /*
611 * Populate every free slot in a provided array with pages, using GFP_NOFS.
612 *
613 * @nr_pages: number of pages to allocate
614 * @page_array: the array to fill with pages; any existing non-null entries in
615 * the array will be skipped
616 * @nofail: whether using __GFP_NOFAIL flag
617 *
618 * Return: 0 if all pages were able to be allocated;
619 * -ENOMEM otherwise, the partially allocated pages would be freed and
620 * the array slots zeroed
621 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array,bool nofail)622 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
623 bool nofail)
624 {
625 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
626 unsigned int allocated;
627
628 for (allocated = 0; allocated < nr_pages;) {
629 unsigned int last = allocated;
630
631 allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
632 if (unlikely(allocated == last)) {
633 /* No progress, fail and do cleanup. */
634 for (int i = 0; i < allocated; i++) {
635 __free_page(page_array[i]);
636 page_array[i] = NULL;
637 }
638 return -ENOMEM;
639 }
640 }
641 return 0;
642 }
643
644 /*
645 * Populate needed folios for the extent buffer.
646 *
647 * For now, the folios populated are always in order 0 (aka, single page).
648 */
alloc_eb_folio_array(struct extent_buffer * eb,bool nofail)649 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
650 {
651 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
652 int num_pages = num_extent_pages(eb);
653 int ret;
654
655 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
656 if (ret < 0)
657 return ret;
658
659 for (int i = 0; i < num_pages; i++)
660 eb->folios[i] = page_folio(page_array[i]);
661 eb->folio_size = PAGE_SIZE;
662 eb->folio_shift = PAGE_SHIFT;
663 return 0;
664 }
665
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,struct folio * folio,u64 disk_bytenr,unsigned int pg_offset)666 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
667 struct folio *folio, u64 disk_bytenr,
668 unsigned int pg_offset)
669 {
670 struct bio *bio = &bio_ctrl->bbio->bio;
671 struct bio_vec *bvec = bio_last_bvec_all(bio);
672 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
673 struct folio *bv_folio = page_folio(bvec->bv_page);
674
675 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
676 /*
677 * For compression, all IO should have its logical bytenr set
678 * to the starting bytenr of the compressed extent.
679 */
680 return bio->bi_iter.bi_sector == sector;
681 }
682
683 /*
684 * The contig check requires the following conditions to be met:
685 *
686 * 1) The folios are belonging to the same inode
687 * This is implied by the call chain.
688 *
689 * 2) The range has adjacent logical bytenr
690 *
691 * 3) The range has adjacent file offset
692 * This is required for the usage of btrfs_bio->file_offset.
693 */
694 return bio_end_sector(bio) == sector &&
695 folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
696 folio_pos(folio) + pg_offset;
697 }
698
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)699 static void alloc_new_bio(struct btrfs_inode *inode,
700 struct btrfs_bio_ctrl *bio_ctrl,
701 u64 disk_bytenr, u64 file_offset)
702 {
703 struct btrfs_fs_info *fs_info = inode->root->fs_info;
704 struct btrfs_bio *bbio;
705
706 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
707 bio_ctrl->end_io_func, NULL);
708 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
709 bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
710 bbio->inode = inode;
711 bbio->file_offset = file_offset;
712 bio_ctrl->bbio = bbio;
713 bio_ctrl->len_to_oe_boundary = U32_MAX;
714
715 /* Limit data write bios to the ordered boundary. */
716 if (bio_ctrl->wbc) {
717 struct btrfs_ordered_extent *ordered;
718
719 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
720 if (ordered) {
721 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
722 ordered->file_offset +
723 ordered->disk_num_bytes - file_offset);
724 bbio->ordered = ordered;
725 }
726
727 /*
728 * Pick the last added device to support cgroup writeback. For
729 * multi-device file systems this means blk-cgroup policies have
730 * to always be set on the last added/replaced device.
731 * This is a bit odd but has been like that for a long time.
732 */
733 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
734 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
735 }
736 }
737
738 /*
739 * @disk_bytenr: logical bytenr where the write will be
740 * @page: page to add to the bio
741 * @size: portion of page that we want to write to
742 * @pg_offset: offset of the new bio or to check whether we are adding
743 * a contiguous page to the previous one
744 *
745 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
746 * new one in @bio_ctrl->bbio.
747 * The mirror number for this IO should already be initizlied in
748 * @bio_ctrl->mirror_num.
749 */
submit_extent_folio(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct folio * folio,size_t size,unsigned long pg_offset)750 static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
751 u64 disk_bytenr, struct folio *folio,
752 size_t size, unsigned long pg_offset)
753 {
754 struct btrfs_inode *inode = folio_to_inode(folio);
755
756 ASSERT(pg_offset + size <= PAGE_SIZE);
757 ASSERT(bio_ctrl->end_io_func);
758
759 if (bio_ctrl->bbio &&
760 !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
761 submit_one_bio(bio_ctrl);
762
763 do {
764 u32 len = size;
765
766 /* Allocate new bio if needed */
767 if (!bio_ctrl->bbio) {
768 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
769 folio_pos(folio) + pg_offset);
770 }
771
772 /* Cap to the current ordered extent boundary if there is one. */
773 if (len > bio_ctrl->len_to_oe_boundary) {
774 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
775 ASSERT(is_data_inode(inode));
776 len = bio_ctrl->len_to_oe_boundary;
777 }
778
779 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
780 /* bio full: move on to a new one */
781 submit_one_bio(bio_ctrl);
782 continue;
783 }
784
785 if (bio_ctrl->wbc)
786 wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
787 len);
788
789 size -= len;
790 pg_offset += len;
791 disk_bytenr += len;
792
793 /*
794 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
795 * sector aligned. alloc_new_bio() then sets it to the end of
796 * our ordered extent for writes into zoned devices.
797 *
798 * When len_to_oe_boundary is tracking an ordered extent, we
799 * trust the ordered extent code to align things properly, and
800 * the check above to cap our write to the ordered extent
801 * boundary is correct.
802 *
803 * When len_to_oe_boundary is U32_MAX, the cap above would
804 * result in a 4095 byte IO for the last folio right before
805 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
806 * the checks required to make sure we don't overflow the bio,
807 * and we should just ignore len_to_oe_boundary completely
808 * unless we're using it to track an ordered extent.
809 *
810 * It's pretty hard to make a bio sized U32_MAX, but it can
811 * happen when the page cache is able to feed us contiguous
812 * folios for large extents.
813 */
814 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
815 bio_ctrl->len_to_oe_boundary -= len;
816
817 /* Ordered extent boundary: move on to a new bio. */
818 if (bio_ctrl->len_to_oe_boundary == 0)
819 submit_one_bio(bio_ctrl);
820 } while (size);
821 }
822
attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_subpage * prealloc)823 static int attach_extent_buffer_folio(struct extent_buffer *eb,
824 struct folio *folio,
825 struct btrfs_subpage *prealloc)
826 {
827 struct btrfs_fs_info *fs_info = eb->fs_info;
828 int ret = 0;
829
830 /*
831 * If the page is mapped to btree inode, we should hold the private
832 * lock to prevent race.
833 * For cloned or dummy extent buffers, their pages are not mapped and
834 * will not race with any other ebs.
835 */
836 if (folio->mapping)
837 lockdep_assert_held(&folio->mapping->i_private_lock);
838
839 if (fs_info->nodesize >= PAGE_SIZE) {
840 if (!folio_test_private(folio))
841 folio_attach_private(folio, eb);
842 else
843 WARN_ON(folio_get_private(folio) != eb);
844 return 0;
845 }
846
847 /* Already mapped, just free prealloc */
848 if (folio_test_private(folio)) {
849 btrfs_free_subpage(prealloc);
850 return 0;
851 }
852
853 if (prealloc)
854 /* Has preallocated memory for subpage */
855 folio_attach_private(folio, prealloc);
856 else
857 /* Do new allocation to attach subpage */
858 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
859 return ret;
860 }
861
set_folio_extent_mapped(struct folio * folio)862 int set_folio_extent_mapped(struct folio *folio)
863 {
864 struct btrfs_fs_info *fs_info;
865
866 ASSERT(folio->mapping);
867
868 if (folio_test_private(folio))
869 return 0;
870
871 fs_info = folio_to_fs_info(folio);
872
873 if (btrfs_is_subpage(fs_info, folio->mapping))
874 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
875
876 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
877 return 0;
878 }
879
clear_folio_extent_mapped(struct folio * folio)880 void clear_folio_extent_mapped(struct folio *folio)
881 {
882 struct btrfs_fs_info *fs_info;
883
884 ASSERT(folio->mapping);
885
886 if (!folio_test_private(folio))
887 return;
888
889 fs_info = folio_to_fs_info(folio);
890 if (btrfs_is_subpage(fs_info, folio->mapping))
891 return btrfs_detach_subpage(fs_info, folio);
892
893 folio_detach_private(folio);
894 }
895
get_extent_map(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len,struct extent_map ** em_cached)896 static struct extent_map *get_extent_map(struct btrfs_inode *inode,
897 struct folio *folio, u64 start,
898 u64 len, struct extent_map **em_cached)
899 {
900 struct extent_map *em;
901
902 ASSERT(em_cached);
903
904 if (*em_cached) {
905 em = *em_cached;
906 if (extent_map_in_tree(em) && start >= em->start &&
907 start < extent_map_end(em)) {
908 refcount_inc(&em->refs);
909 return em;
910 }
911
912 free_extent_map(em);
913 *em_cached = NULL;
914 }
915
916 em = btrfs_get_extent(inode, folio, start, len);
917 if (!IS_ERR(em)) {
918 BUG_ON(*em_cached);
919 refcount_inc(&em->refs);
920 *em_cached = em;
921 }
922
923 return em;
924 }
925 /*
926 * basic readpage implementation. Locked extent state structs are inserted
927 * into the tree that are removed when the IO is done (by the end_io
928 * handlers)
929 * XXX JDM: This needs looking at to ensure proper page locking
930 * return 0 on success, otherwise return error
931 */
btrfs_do_readpage(struct folio * folio,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)932 static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
933 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
934 {
935 struct inode *inode = folio->mapping->host;
936 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
937 u64 start = folio_pos(folio);
938 const u64 end = start + PAGE_SIZE - 1;
939 u64 cur = start;
940 u64 extent_offset;
941 u64 last_byte = i_size_read(inode);
942 u64 block_start;
943 struct extent_map *em;
944 int ret = 0;
945 size_t pg_offset = 0;
946 size_t iosize;
947 size_t blocksize = fs_info->sectorsize;
948
949 ret = set_folio_extent_mapped(folio);
950 if (ret < 0) {
951 folio_unlock(folio);
952 return ret;
953 }
954
955 if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
956 size_t zero_offset = offset_in_folio(folio, last_byte);
957
958 if (zero_offset) {
959 iosize = folio_size(folio) - zero_offset;
960 folio_zero_range(folio, zero_offset, iosize);
961 }
962 }
963 bio_ctrl->end_io_func = end_bbio_data_read;
964 begin_folio_read(fs_info, folio);
965 while (cur <= end) {
966 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
967 bool force_bio_submit = false;
968 u64 disk_bytenr;
969
970 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
971 if (cur >= last_byte) {
972 iosize = folio_size(folio) - pg_offset;
973 folio_zero_range(folio, pg_offset, iosize);
974 end_folio_read(folio, true, cur, iosize);
975 break;
976 }
977 em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
978 if (IS_ERR(em)) {
979 end_folio_read(folio, false, cur, end + 1 - cur);
980 return PTR_ERR(em);
981 }
982 extent_offset = cur - em->start;
983 BUG_ON(extent_map_end(em) <= cur);
984 BUG_ON(end < cur);
985
986 compress_type = extent_map_compression(em);
987
988 iosize = min(extent_map_end(em) - cur, end - cur + 1);
989 iosize = ALIGN(iosize, blocksize);
990 if (compress_type != BTRFS_COMPRESS_NONE)
991 disk_bytenr = em->disk_bytenr;
992 else
993 disk_bytenr = extent_map_block_start(em) + extent_offset;
994 block_start = extent_map_block_start(em);
995 if (em->flags & EXTENT_FLAG_PREALLOC)
996 block_start = EXTENT_MAP_HOLE;
997
998 /*
999 * If we have a file range that points to a compressed extent
1000 * and it's followed by a consecutive file range that points
1001 * to the same compressed extent (possibly with a different
1002 * offset and/or length, so it either points to the whole extent
1003 * or only part of it), we must make sure we do not submit a
1004 * single bio to populate the folios for the 2 ranges because
1005 * this makes the compressed extent read zero out the folios
1006 * belonging to the 2nd range. Imagine the following scenario:
1007 *
1008 * File layout
1009 * [0 - 8K] [8K - 24K]
1010 * | |
1011 * | |
1012 * points to extent X, points to extent X,
1013 * offset 4K, length of 8K offset 0, length 16K
1014 *
1015 * [extent X, compressed length = 4K uncompressed length = 16K]
1016 *
1017 * If the bio to read the compressed extent covers both ranges,
1018 * it will decompress extent X into the folios belonging to the
1019 * first range and then it will stop, zeroing out the remaining
1020 * folios that belong to the other range that points to extent X.
1021 * So here we make sure we submit 2 bios, one for the first
1022 * range and another one for the third range. Both will target
1023 * the same physical extent from disk, but we can't currently
1024 * make the compressed bio endio callback populate the folios
1025 * for both ranges because each compressed bio is tightly
1026 * coupled with a single extent map, and each range can have
1027 * an extent map with a different offset value relative to the
1028 * uncompressed data of our extent and different lengths. This
1029 * is a corner case so we prioritize correctness over
1030 * non-optimal behavior (submitting 2 bios for the same extent).
1031 */
1032 if (compress_type != BTRFS_COMPRESS_NONE &&
1033 prev_em_start && *prev_em_start != (u64)-1 &&
1034 *prev_em_start != em->start)
1035 force_bio_submit = true;
1036
1037 if (prev_em_start)
1038 *prev_em_start = em->start;
1039
1040 free_extent_map(em);
1041 em = NULL;
1042
1043 /* we've found a hole, just zero and go on */
1044 if (block_start == EXTENT_MAP_HOLE) {
1045 folio_zero_range(folio, pg_offset, iosize);
1046
1047 end_folio_read(folio, true, cur, iosize);
1048 cur = cur + iosize;
1049 pg_offset += iosize;
1050 continue;
1051 }
1052 /* the get_extent function already copied into the folio */
1053 if (block_start == EXTENT_MAP_INLINE) {
1054 end_folio_read(folio, true, cur, iosize);
1055 cur = cur + iosize;
1056 pg_offset += iosize;
1057 continue;
1058 }
1059
1060 if (bio_ctrl->compress_type != compress_type) {
1061 submit_one_bio(bio_ctrl);
1062 bio_ctrl->compress_type = compress_type;
1063 }
1064
1065 if (force_bio_submit)
1066 submit_one_bio(bio_ctrl);
1067 submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1068 pg_offset);
1069 cur = cur + iosize;
1070 pg_offset += iosize;
1071 }
1072
1073 return 0;
1074 }
1075
btrfs_read_folio(struct file * file,struct folio * folio)1076 int btrfs_read_folio(struct file *file, struct folio *folio)
1077 {
1078 struct btrfs_inode *inode = folio_to_inode(folio);
1079 const u64 start = folio_pos(folio);
1080 const u64 end = start + folio_size(folio) - 1;
1081 struct extent_state *cached_state = NULL;
1082 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1083 struct extent_map *em_cached = NULL;
1084 int ret;
1085
1086 btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
1087 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1088 unlock_extent(&inode->io_tree, start, end, &cached_state);
1089
1090 free_extent_map(em_cached);
1091
1092 /*
1093 * If btrfs_do_readpage() failed we will want to submit the assembled
1094 * bio to do the cleanup.
1095 */
1096 submit_one_bio(&bio_ctrl);
1097 return ret;
1098 }
1099
set_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u32 len)1100 static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1101 u64 start, u32 len)
1102 {
1103 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1104 const u64 folio_start = folio_pos(folio);
1105 unsigned int start_bit;
1106 unsigned int nbits;
1107
1108 ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
1109 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1110 nbits = len >> fs_info->sectorsize_bits;
1111 ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1112 bitmap_set(delalloc_bitmap, start_bit, nbits);
1113 }
1114
find_next_delalloc_bitmap(struct folio * folio,unsigned long * delalloc_bitmap,u64 start,u64 * found_start,u32 * found_len)1115 static bool find_next_delalloc_bitmap(struct folio *folio,
1116 unsigned long *delalloc_bitmap, u64 start,
1117 u64 *found_start, u32 *found_len)
1118 {
1119 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1120 const u64 folio_start = folio_pos(folio);
1121 const unsigned int bitmap_size = fs_info->sectors_per_page;
1122 unsigned int start_bit;
1123 unsigned int first_zero;
1124 unsigned int first_set;
1125
1126 ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
1127
1128 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1129 first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1130 if (first_set >= bitmap_size)
1131 return false;
1132
1133 *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1134 first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1135 *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1136 return true;
1137 }
1138
1139 /*
1140 * Do all of the delayed allocation setup.
1141 *
1142 * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
1143 * The @folio should no longer be touched (treat it as already unlocked).
1144 *
1145 * Return 0 if there is still dirty block that needs to be submitted through
1146 * extent_writepage_io().
1147 * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
1148 * submitted, and @folio is still kept locked.
1149 *
1150 * Return <0 if there is any error hit.
1151 * Any allocated ordered extent range covering this folio will be marked
1152 * finished (IOERR), and @folio is still kept locked.
1153 */
writepage_delalloc(struct btrfs_inode * inode,struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1154 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1155 struct folio *folio,
1156 struct btrfs_bio_ctrl *bio_ctrl)
1157 {
1158 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1159 struct writeback_control *wbc = bio_ctrl->wbc;
1160 const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1161 const u64 page_start = folio_pos(folio);
1162 const u64 page_end = page_start + folio_size(folio) - 1;
1163 unsigned long delalloc_bitmap = 0;
1164 /*
1165 * Save the last found delalloc end. As the delalloc end can go beyond
1166 * page boundary, thus we cannot rely on subpage bitmap to locate the
1167 * last delalloc end.
1168 */
1169 u64 last_delalloc_end = 0;
1170 /*
1171 * The range end (exclusive) of the last successfully finished delalloc
1172 * range.
1173 * Any range covered by ordered extent must either be manually marked
1174 * finished (error handling), or has IO submitted (and finish the
1175 * ordered extent normally).
1176 *
1177 * This records the end of ordered extent cleanup if we hit an error.
1178 */
1179 u64 last_finished_delalloc_end = page_start;
1180 u64 delalloc_start = page_start;
1181 u64 delalloc_end = page_end;
1182 u64 delalloc_to_write = 0;
1183 int ret = 0;
1184 int bit;
1185
1186 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1187 if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1188 ASSERT(fs_info->sectors_per_page > 1);
1189 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1190 } else {
1191 bio_ctrl->submit_bitmap = 1;
1192 }
1193
1194 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1195 u64 start = page_start + (bit << fs_info->sectorsize_bits);
1196
1197 btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1198 }
1199
1200 /* Lock all (subpage) delalloc ranges inside the folio first. */
1201 while (delalloc_start < page_end) {
1202 delalloc_end = page_end;
1203 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1204 &delalloc_start, &delalloc_end)) {
1205 delalloc_start = delalloc_end + 1;
1206 continue;
1207 }
1208 set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1209 min(delalloc_end, page_end) + 1 - delalloc_start);
1210 last_delalloc_end = delalloc_end;
1211 delalloc_start = delalloc_end + 1;
1212 }
1213 delalloc_start = page_start;
1214
1215 if (!last_delalloc_end)
1216 goto out;
1217
1218 /* Run the delalloc ranges for the above locked ranges. */
1219 while (delalloc_start < page_end) {
1220 u64 found_start;
1221 u32 found_len;
1222 bool found;
1223
1224 if (!is_subpage) {
1225 /*
1226 * For non-subpage case, the found delalloc range must
1227 * cover this folio and there must be only one locked
1228 * delalloc range.
1229 */
1230 found_start = page_start;
1231 found_len = last_delalloc_end + 1 - found_start;
1232 found = true;
1233 } else {
1234 found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1235 delalloc_start, &found_start, &found_len);
1236 }
1237 if (!found)
1238 break;
1239 /*
1240 * The subpage range covers the last sector, the delalloc range may
1241 * end beyond the folio boundary, use the saved delalloc_end
1242 * instead.
1243 */
1244 if (found_start + found_len >= page_end)
1245 found_len = last_delalloc_end + 1 - found_start;
1246
1247 if (ret >= 0) {
1248 /*
1249 * Some delalloc range may be created by previous folios.
1250 * Thus we still need to clean up this range during error
1251 * handling.
1252 */
1253 last_finished_delalloc_end = found_start;
1254 /* No errors hit so far, run the current delalloc range. */
1255 ret = btrfs_run_delalloc_range(inode, folio,
1256 found_start,
1257 found_start + found_len - 1,
1258 wbc);
1259 if (ret >= 0)
1260 last_finished_delalloc_end = found_start + found_len;
1261 if (unlikely(ret < 0))
1262 btrfs_err_rl(fs_info,
1263 "failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d",
1264 btrfs_root_id(inode->root),
1265 btrfs_ino(inode),
1266 folio_pos(folio),
1267 fs_info->sectors_per_page,
1268 &bio_ctrl->submit_bitmap,
1269 found_start, found_len, ret);
1270 } else {
1271 /*
1272 * We've hit an error during previous delalloc range,
1273 * have to cleanup the remaining locked ranges.
1274 */
1275 unlock_extent(&inode->io_tree, found_start,
1276 found_start + found_len - 1, NULL);
1277 unlock_delalloc_folio(&inode->vfs_inode, folio,
1278 found_start,
1279 found_start + found_len - 1);
1280 }
1281
1282 /*
1283 * We have some ranges that's going to be submitted asynchronously
1284 * (compression or inline). These range have their own control
1285 * on when to unlock the pages. We should not touch them
1286 * anymore, so clear the range from the submission bitmap.
1287 */
1288 if (ret > 0) {
1289 unsigned int start_bit = (found_start - page_start) >>
1290 fs_info->sectorsize_bits;
1291 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1292 page_start) >> fs_info->sectorsize_bits;
1293 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1294 }
1295 /*
1296 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1297 * thus for the last range, we cannot touch the folio anymore.
1298 */
1299 if (found_start + found_len >= last_delalloc_end + 1)
1300 break;
1301
1302 delalloc_start = found_start + found_len;
1303 }
1304 /*
1305 * It's possible we had some ordered extents created before we hit
1306 * an error, cleanup non-async successfully created delalloc ranges.
1307 */
1308 if (unlikely(ret < 0)) {
1309 unsigned int bitmap_size = min(
1310 (last_finished_delalloc_end - page_start) >>
1311 fs_info->sectorsize_bits,
1312 fs_info->sectors_per_page);
1313
1314 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
1315 btrfs_mark_ordered_io_finished(inode, folio,
1316 page_start + (bit << fs_info->sectorsize_bits),
1317 fs_info->sectorsize, false);
1318 return ret;
1319 }
1320 out:
1321 if (last_delalloc_end)
1322 delalloc_end = last_delalloc_end;
1323 else
1324 delalloc_end = page_end;
1325 /*
1326 * delalloc_end is already one less than the total length, so
1327 * we don't subtract one from PAGE_SIZE
1328 */
1329 delalloc_to_write +=
1330 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1331
1332 /*
1333 * If all ranges are submitted asynchronously, we just need to account
1334 * for them here.
1335 */
1336 if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1337 wbc->nr_to_write -= delalloc_to_write;
1338 return 1;
1339 }
1340
1341 if (wbc->nr_to_write < delalloc_to_write) {
1342 int thresh = 8192;
1343
1344 if (delalloc_to_write < thresh * 2)
1345 thresh = delalloc_to_write;
1346 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1347 thresh);
1348 }
1349
1350 return 0;
1351 }
1352
1353 /*
1354 * Return 0 if we have submitted or queued the sector for submission.
1355 * Return <0 for critical errors.
1356 *
1357 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1358 */
submit_one_sector(struct btrfs_inode * inode,struct folio * folio,u64 filepos,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1359 static int submit_one_sector(struct btrfs_inode *inode,
1360 struct folio *folio,
1361 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1362 loff_t i_size)
1363 {
1364 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1365 struct extent_map *em;
1366 u64 block_start;
1367 u64 disk_bytenr;
1368 u64 extent_offset;
1369 u64 em_end;
1370 const u32 sectorsize = fs_info->sectorsize;
1371
1372 ASSERT(IS_ALIGNED(filepos, sectorsize));
1373
1374 /* @filepos >= i_size case should be handled by the caller. */
1375 ASSERT(filepos < i_size);
1376
1377 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1378 if (IS_ERR(em))
1379 return PTR_ERR(em);
1380
1381 extent_offset = filepos - em->start;
1382 em_end = extent_map_end(em);
1383 ASSERT(filepos <= em_end);
1384 ASSERT(IS_ALIGNED(em->start, sectorsize));
1385 ASSERT(IS_ALIGNED(em->len, sectorsize));
1386
1387 block_start = extent_map_block_start(em);
1388 disk_bytenr = extent_map_block_start(em) + extent_offset;
1389
1390 ASSERT(!extent_map_is_compressed(em));
1391 ASSERT(block_start != EXTENT_MAP_HOLE);
1392 ASSERT(block_start != EXTENT_MAP_INLINE);
1393
1394 free_extent_map(em);
1395 em = NULL;
1396
1397 /*
1398 * Although the PageDirty bit is cleared before entering this
1399 * function, subpage dirty bit is not cleared.
1400 * So clear subpage dirty bit here so next time we won't submit
1401 * a folio for a range already written to disk.
1402 */
1403 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1404 btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1405 /*
1406 * Above call should set the whole folio with writeback flag, even
1407 * just for a single subpage sector.
1408 * As long as the folio is properly locked and the range is correct,
1409 * we should always get the folio with writeback flag.
1410 */
1411 ASSERT(folio_test_writeback(folio));
1412
1413 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1414 sectorsize, filepos - folio_pos(folio));
1415 return 0;
1416 }
1417
1418 /*
1419 * Helper for extent_writepage(). This calls the writepage start hooks,
1420 * and does the loop to map the page into extents and bios.
1421 *
1422 * We return 1 if the IO is started and the page is unlocked,
1423 * 0 if all went well (page still locked)
1424 * < 0 if there were errors (page still locked)
1425 */
extent_writepage_io(struct btrfs_inode * inode,struct folio * folio,u64 start,u32 len,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1426 static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1427 struct folio *folio,
1428 u64 start, u32 len,
1429 struct btrfs_bio_ctrl *bio_ctrl,
1430 loff_t i_size)
1431 {
1432 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1433 unsigned long range_bitmap = 0;
1434 bool submitted_io = false;
1435 bool error = false;
1436 const u64 folio_start = folio_pos(folio);
1437 u64 cur;
1438 int bit;
1439 int ret = 0;
1440
1441 ASSERT(start >= folio_start &&
1442 start + len <= folio_start + folio_size(folio));
1443
1444 ret = btrfs_writepage_cow_fixup(folio);
1445 if (ret) {
1446 /* Fixup worker will requeue */
1447 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1448 folio_unlock(folio);
1449 return 1;
1450 }
1451
1452 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1453 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1454 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1455 fs_info->sectors_per_page);
1456
1457 bio_ctrl->end_io_func = end_bbio_data_write;
1458
1459 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1460 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1461
1462 if (cur >= i_size) {
1463 btrfs_mark_ordered_io_finished(inode, folio, cur,
1464 start + len - cur, true);
1465 /*
1466 * This range is beyond i_size, thus we don't need to
1467 * bother writing back.
1468 * But we still need to clear the dirty subpage bit, or
1469 * the next time the folio gets dirtied, we will try to
1470 * writeback the sectors with subpage dirty bits,
1471 * causing writeback without ordered extent.
1472 */
1473 btrfs_folio_clear_dirty(fs_info, folio, cur,
1474 start + len - cur);
1475 break;
1476 }
1477 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1478 if (unlikely(ret < 0)) {
1479 /*
1480 * bio_ctrl may contain a bio crossing several folios.
1481 * Submit it immediately so that the bio has a chance
1482 * to finish normally, other than marked as error.
1483 */
1484 submit_one_bio(bio_ctrl);
1485 /*
1486 * Failed to grab the extent map which should be very rare.
1487 * Since there is no bio submitted to finish the ordered
1488 * extent, we have to manually finish this sector.
1489 */
1490 btrfs_mark_ordered_io_finished(inode, folio, cur,
1491 fs_info->sectorsize, false);
1492 error = true;
1493 continue;
1494 }
1495 submitted_io = true;
1496 }
1497
1498 /*
1499 * If we didn't submitted any sector (>= i_size), folio dirty get
1500 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1501 * by folio_start_writeback() if the folio is not dirty).
1502 *
1503 * Here we set writeback and clear for the range. If the full folio
1504 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1505 *
1506 * If we hit any error, the corresponding sector will still be dirty
1507 * thus no need to clear PAGECACHE_TAG_DIRTY.
1508 */
1509 if (!submitted_io && !error) {
1510 btrfs_folio_set_writeback(fs_info, folio, start, len);
1511 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1512 }
1513 return ret;
1514 }
1515
1516 /*
1517 * the writepage semantics are similar to regular writepage. extent
1518 * records are inserted to lock ranges in the tree, and as dirty areas
1519 * are found, they are marked writeback. Then the lock bits are removed
1520 * and the end_io handler clears the writeback ranges
1521 *
1522 * Return 0 if everything goes well.
1523 * Return <0 for error.
1524 */
extent_writepage(struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1525 static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1526 {
1527 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
1528 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1529 int ret;
1530 size_t pg_offset;
1531 loff_t i_size = i_size_read(&inode->vfs_inode);
1532 unsigned long end_index = i_size >> PAGE_SHIFT;
1533
1534 trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
1535
1536 WARN_ON(!folio_test_locked(folio));
1537
1538 pg_offset = offset_in_folio(folio, i_size);
1539 if (folio->index > end_index ||
1540 (folio->index == end_index && !pg_offset)) {
1541 folio_invalidate(folio, 0, folio_size(folio));
1542 folio_unlock(folio);
1543 return 0;
1544 }
1545
1546 if (folio->index == end_index)
1547 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1548
1549 /*
1550 * Default to unlock the whole folio.
1551 * The proper bitmap can only be initialized until writepage_delalloc().
1552 */
1553 bio_ctrl->submit_bitmap = (unsigned long)-1;
1554 ret = set_folio_extent_mapped(folio);
1555 if (ret < 0)
1556 goto done;
1557
1558 ret = writepage_delalloc(inode, folio, bio_ctrl);
1559 if (ret == 1)
1560 return 0;
1561 if (ret)
1562 goto done;
1563
1564 ret = extent_writepage_io(inode, folio, folio_pos(folio),
1565 PAGE_SIZE, bio_ctrl, i_size);
1566 if (ret == 1)
1567 return 0;
1568 if (ret < 0)
1569 btrfs_err_rl(fs_info,
1570 "failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d",
1571 btrfs_root_id(inode->root), btrfs_ino(inode),
1572 folio_pos(folio), fs_info->sectors_per_page,
1573 &bio_ctrl->submit_bitmap, ret);
1574
1575 bio_ctrl->wbc->nr_to_write--;
1576
1577 done:
1578 if (ret < 0)
1579 mapping_set_error(folio->mapping, ret);
1580 /*
1581 * Only unlock ranges that are submitted. As there can be some async
1582 * submitted ranges inside the folio.
1583 */
1584 btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1585 ASSERT(ret <= 0);
1586 return ret;
1587 }
1588
1589 /*
1590 * Lock extent buffer status and pages for writeback.
1591 *
1592 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1593 * extent buffer is not dirty)
1594 * Return %true is the extent buffer is submitted to bio.
1595 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1596 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1597 struct writeback_control *wbc)
1598 {
1599 struct btrfs_fs_info *fs_info = eb->fs_info;
1600 bool ret = false;
1601
1602 btrfs_tree_lock(eb);
1603 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1604 btrfs_tree_unlock(eb);
1605 if (wbc->sync_mode != WB_SYNC_ALL)
1606 return false;
1607 wait_on_extent_buffer_writeback(eb);
1608 btrfs_tree_lock(eb);
1609 }
1610
1611 /*
1612 * We need to do this to prevent races in people who check if the eb is
1613 * under IO since we can end up having no IO bits set for a short period
1614 * of time.
1615 */
1616 spin_lock(&eb->refs_lock);
1617 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1618 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1619 spin_unlock(&eb->refs_lock);
1620 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1621 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1622 -eb->len,
1623 fs_info->dirty_metadata_batch);
1624 ret = true;
1625 } else {
1626 spin_unlock(&eb->refs_lock);
1627 }
1628 btrfs_tree_unlock(eb);
1629 return ret;
1630 }
1631
set_btree_ioerr(struct extent_buffer * eb)1632 static void set_btree_ioerr(struct extent_buffer *eb)
1633 {
1634 struct btrfs_fs_info *fs_info = eb->fs_info;
1635
1636 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1637
1638 /*
1639 * A read may stumble upon this buffer later, make sure that it gets an
1640 * error and knows there was an error.
1641 */
1642 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1643
1644 /*
1645 * We need to set the mapping with the io error as well because a write
1646 * error will flip the file system readonly, and then syncfs() will
1647 * return a 0 because we are readonly if we don't modify the err seq for
1648 * the superblock.
1649 */
1650 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1651
1652 /*
1653 * If writeback for a btree extent that doesn't belong to a log tree
1654 * failed, increment the counter transaction->eb_write_errors.
1655 * We do this because while the transaction is running and before it's
1656 * committing (when we call filemap_fdata[write|wait]_range against
1657 * the btree inode), we might have
1658 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1659 * returns an error or an error happens during writeback, when we're
1660 * committing the transaction we wouldn't know about it, since the pages
1661 * can be no longer dirty nor marked anymore for writeback (if a
1662 * subsequent modification to the extent buffer didn't happen before the
1663 * transaction commit), which makes filemap_fdata[write|wait]_range not
1664 * able to find the pages which contain errors at transaction
1665 * commit time. So if this happens we must abort the transaction,
1666 * otherwise we commit a super block with btree roots that point to
1667 * btree nodes/leafs whose content on disk is invalid - either garbage
1668 * or the content of some node/leaf from a past generation that got
1669 * cowed or deleted and is no longer valid.
1670 *
1671 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1672 * not be enough - we need to distinguish between log tree extents vs
1673 * non-log tree extents, and the next filemap_fdatawait_range() call
1674 * will catch and clear such errors in the mapping - and that call might
1675 * be from a log sync and not from a transaction commit. Also, checking
1676 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1677 * not done and would not be reliable - the eb might have been released
1678 * from memory and reading it back again means that flag would not be
1679 * set (since it's a runtime flag, not persisted on disk).
1680 *
1681 * Using the flags below in the btree inode also makes us achieve the
1682 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1683 * writeback for all dirty pages and before filemap_fdatawait_range()
1684 * is called, the writeback for all dirty pages had already finished
1685 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1686 * filemap_fdatawait_range() would return success, as it could not know
1687 * that writeback errors happened (the pages were no longer tagged for
1688 * writeback).
1689 */
1690 switch (eb->log_index) {
1691 case -1:
1692 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1693 break;
1694 case 0:
1695 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1696 break;
1697 case 1:
1698 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1699 break;
1700 default:
1701 BUG(); /* unexpected, logic error */
1702 }
1703 }
1704
1705 /*
1706 * The endio specific version which won't touch any unsafe spinlock in endio
1707 * context.
1708 */
find_extent_buffer_nolock(const struct btrfs_fs_info * fs_info,u64 start)1709 static struct extent_buffer *find_extent_buffer_nolock(
1710 const struct btrfs_fs_info *fs_info, u64 start)
1711 {
1712 struct extent_buffer *eb;
1713
1714 rcu_read_lock();
1715 eb = radix_tree_lookup(&fs_info->buffer_radix,
1716 start >> fs_info->sectorsize_bits);
1717 if (eb && atomic_inc_not_zero(&eb->refs)) {
1718 rcu_read_unlock();
1719 return eb;
1720 }
1721 rcu_read_unlock();
1722 return NULL;
1723 }
1724
end_bbio_meta_write(struct btrfs_bio * bbio)1725 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1726 {
1727 struct extent_buffer *eb = bbio->private;
1728 struct btrfs_fs_info *fs_info = eb->fs_info;
1729 struct folio_iter fi;
1730 u32 bio_offset = 0;
1731
1732 if (bbio->bio.bi_status != BLK_STS_OK)
1733 set_btree_ioerr(eb);
1734
1735 bio_for_each_folio_all(fi, &bbio->bio) {
1736 u64 start = eb->start + bio_offset;
1737 struct folio *folio = fi.folio;
1738 u32 len = fi.length;
1739
1740 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1741 bio_offset += len;
1742 }
1743
1744 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1745 smp_mb__after_atomic();
1746 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1747
1748 bio_put(&bbio->bio);
1749 }
1750
prepare_eb_write(struct extent_buffer * eb)1751 static void prepare_eb_write(struct extent_buffer *eb)
1752 {
1753 u32 nritems;
1754 unsigned long start;
1755 unsigned long end;
1756
1757 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1758
1759 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1760 nritems = btrfs_header_nritems(eb);
1761 if (btrfs_header_level(eb) > 0) {
1762 end = btrfs_node_key_ptr_offset(eb, nritems);
1763 memzero_extent_buffer(eb, end, eb->len - end);
1764 } else {
1765 /*
1766 * Leaf:
1767 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1768 */
1769 start = btrfs_item_nr_offset(eb, nritems);
1770 end = btrfs_item_nr_offset(eb, 0);
1771 if (nritems == 0)
1772 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1773 else
1774 end += btrfs_item_offset(eb, nritems - 1);
1775 memzero_extent_buffer(eb, start, end - start);
1776 }
1777 }
1778
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)1779 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1780 struct writeback_control *wbc)
1781 {
1782 struct btrfs_fs_info *fs_info = eb->fs_info;
1783 struct btrfs_bio *bbio;
1784
1785 prepare_eb_write(eb);
1786
1787 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1788 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1789 eb->fs_info, end_bbio_meta_write, eb);
1790 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1791 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1792 wbc_init_bio(wbc, &bbio->bio);
1793 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1794 bbio->file_offset = eb->start;
1795 if (fs_info->nodesize < PAGE_SIZE) {
1796 struct folio *folio = eb->folios[0];
1797 bool ret;
1798
1799 folio_lock(folio);
1800 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1801 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1802 eb->len)) {
1803 folio_clear_dirty_for_io(folio);
1804 wbc->nr_to_write--;
1805 }
1806 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1807 eb->start - folio_pos(folio));
1808 ASSERT(ret);
1809 wbc_account_cgroup_owner(wbc, folio, eb->len);
1810 folio_unlock(folio);
1811 } else {
1812 int num_folios = num_extent_folios(eb);
1813
1814 for (int i = 0; i < num_folios; i++) {
1815 struct folio *folio = eb->folios[i];
1816 bool ret;
1817
1818 folio_lock(folio);
1819 folio_clear_dirty_for_io(folio);
1820 folio_start_writeback(folio);
1821 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1822 ASSERT(ret);
1823 wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
1824 wbc->nr_to_write -= folio_nr_pages(folio);
1825 folio_unlock(folio);
1826 }
1827 }
1828 btrfs_submit_bbio(bbio, 0);
1829 }
1830
1831 /*
1832 * Submit one subpage btree page.
1833 *
1834 * The main difference to submit_eb_page() is:
1835 * - Page locking
1836 * For subpage, we don't rely on page locking at all.
1837 *
1838 * - Flush write bio
1839 * We only flush bio if we may be unable to fit current extent buffers into
1840 * current bio.
1841 *
1842 * Return >=0 for the number of submitted extent buffers.
1843 * Return <0 for fatal error.
1844 */
submit_eb_subpage(struct folio * folio,struct writeback_control * wbc)1845 static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1846 {
1847 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1848 int submitted = 0;
1849 u64 folio_start = folio_pos(folio);
1850 int bit_start = 0;
1851 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1852
1853 /* Lock and write each dirty extent buffers in the range */
1854 while (bit_start < fs_info->sectors_per_page) {
1855 struct btrfs_subpage *subpage = folio_get_private(folio);
1856 struct extent_buffer *eb;
1857 unsigned long flags;
1858 u64 start;
1859
1860 /*
1861 * Take private lock to ensure the subpage won't be detached
1862 * in the meantime.
1863 */
1864 spin_lock(&folio->mapping->i_private_lock);
1865 if (!folio_test_private(folio)) {
1866 spin_unlock(&folio->mapping->i_private_lock);
1867 break;
1868 }
1869 spin_lock_irqsave(&subpage->lock, flags);
1870 if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1871 subpage->bitmaps)) {
1872 spin_unlock_irqrestore(&subpage->lock, flags);
1873 spin_unlock(&folio->mapping->i_private_lock);
1874 bit_start++;
1875 continue;
1876 }
1877
1878 start = folio_start + bit_start * fs_info->sectorsize;
1879 bit_start += sectors_per_node;
1880
1881 /*
1882 * Here we just want to grab the eb without touching extra
1883 * spin locks, so call find_extent_buffer_nolock().
1884 */
1885 eb = find_extent_buffer_nolock(fs_info, start);
1886 spin_unlock_irqrestore(&subpage->lock, flags);
1887 spin_unlock(&folio->mapping->i_private_lock);
1888
1889 /*
1890 * The eb has already reached 0 refs thus find_extent_buffer()
1891 * doesn't return it. We don't need to write back such eb
1892 * anyway.
1893 */
1894 if (!eb)
1895 continue;
1896
1897 if (lock_extent_buffer_for_io(eb, wbc)) {
1898 write_one_eb(eb, wbc);
1899 submitted++;
1900 }
1901 free_extent_buffer(eb);
1902 }
1903 return submitted;
1904 }
1905
1906 /*
1907 * Submit all page(s) of one extent buffer.
1908 *
1909 * @page: the page of one extent buffer
1910 * @eb_context: to determine if we need to submit this page, if current page
1911 * belongs to this eb, we don't need to submit
1912 *
1913 * The caller should pass each page in their bytenr order, and here we use
1914 * @eb_context to determine if we have submitted pages of one extent buffer.
1915 *
1916 * If we have, we just skip until we hit a new page that doesn't belong to
1917 * current @eb_context.
1918 *
1919 * If not, we submit all the page(s) of the extent buffer.
1920 *
1921 * Return >0 if we have submitted the extent buffer successfully.
1922 * Return 0 if we don't need to submit the page, as it's already submitted by
1923 * previous call.
1924 * Return <0 for fatal error.
1925 */
submit_eb_page(struct folio * folio,struct btrfs_eb_write_context * ctx)1926 static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1927 {
1928 struct writeback_control *wbc = ctx->wbc;
1929 struct address_space *mapping = folio->mapping;
1930 struct extent_buffer *eb;
1931 int ret;
1932
1933 if (!folio_test_private(folio))
1934 return 0;
1935
1936 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1937 return submit_eb_subpage(folio, wbc);
1938
1939 spin_lock(&mapping->i_private_lock);
1940 if (!folio_test_private(folio)) {
1941 spin_unlock(&mapping->i_private_lock);
1942 return 0;
1943 }
1944
1945 eb = folio_get_private(folio);
1946
1947 /*
1948 * Shouldn't happen and normally this would be a BUG_ON but no point
1949 * crashing the machine for something we can survive anyway.
1950 */
1951 if (WARN_ON(!eb)) {
1952 spin_unlock(&mapping->i_private_lock);
1953 return 0;
1954 }
1955
1956 if (eb == ctx->eb) {
1957 spin_unlock(&mapping->i_private_lock);
1958 return 0;
1959 }
1960 ret = atomic_inc_not_zero(&eb->refs);
1961 spin_unlock(&mapping->i_private_lock);
1962 if (!ret)
1963 return 0;
1964
1965 ctx->eb = eb;
1966
1967 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1968 if (ret) {
1969 if (ret == -EBUSY)
1970 ret = 0;
1971 free_extent_buffer(eb);
1972 return ret;
1973 }
1974
1975 if (!lock_extent_buffer_for_io(eb, wbc)) {
1976 free_extent_buffer(eb);
1977 return 0;
1978 }
1979 /* Implies write in zoned mode. */
1980 if (ctx->zoned_bg) {
1981 /* Mark the last eb in the block group. */
1982 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1983 ctx->zoned_bg->meta_write_pointer += eb->len;
1984 }
1985 write_one_eb(eb, wbc);
1986 free_extent_buffer(eb);
1987 return 1;
1988 }
1989
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)1990 int btree_write_cache_pages(struct address_space *mapping,
1991 struct writeback_control *wbc)
1992 {
1993 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1994 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1995 int ret = 0;
1996 int done = 0;
1997 int nr_to_write_done = 0;
1998 struct folio_batch fbatch;
1999 unsigned int nr_folios;
2000 pgoff_t index;
2001 pgoff_t end; /* Inclusive */
2002 int scanned = 0;
2003 xa_mark_t tag;
2004
2005 folio_batch_init(&fbatch);
2006 if (wbc->range_cyclic) {
2007 index = mapping->writeback_index; /* Start from prev offset */
2008 end = -1;
2009 /*
2010 * Start from the beginning does not need to cycle over the
2011 * range, mark it as scanned.
2012 */
2013 scanned = (index == 0);
2014 } else {
2015 index = wbc->range_start >> PAGE_SHIFT;
2016 end = wbc->range_end >> PAGE_SHIFT;
2017 scanned = 1;
2018 }
2019 if (wbc->sync_mode == WB_SYNC_ALL)
2020 tag = PAGECACHE_TAG_TOWRITE;
2021 else
2022 tag = PAGECACHE_TAG_DIRTY;
2023 btrfs_zoned_meta_io_lock(fs_info);
2024 retry:
2025 if (wbc->sync_mode == WB_SYNC_ALL)
2026 tag_pages_for_writeback(mapping, index, end);
2027 while (!done && !nr_to_write_done && (index <= end) &&
2028 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2029 tag, &fbatch))) {
2030 unsigned i;
2031
2032 for (i = 0; i < nr_folios; i++) {
2033 struct folio *folio = fbatch.folios[i];
2034
2035 ret = submit_eb_page(folio, &ctx);
2036 if (ret == 0)
2037 continue;
2038 if (ret < 0) {
2039 done = 1;
2040 break;
2041 }
2042
2043 /*
2044 * the filesystem may choose to bump up nr_to_write.
2045 * We have to make sure to honor the new nr_to_write
2046 * at any time
2047 */
2048 nr_to_write_done = wbc->nr_to_write <= 0;
2049 }
2050 folio_batch_release(&fbatch);
2051 cond_resched();
2052 }
2053 if (!scanned && !done) {
2054 /*
2055 * We hit the last page and there is more work to be done: wrap
2056 * back to the start of the file
2057 */
2058 scanned = 1;
2059 index = 0;
2060 goto retry;
2061 }
2062 /*
2063 * If something went wrong, don't allow any metadata write bio to be
2064 * submitted.
2065 *
2066 * This would prevent use-after-free if we had dirty pages not
2067 * cleaned up, which can still happen by fuzzed images.
2068 *
2069 * - Bad extent tree
2070 * Allowing existing tree block to be allocated for other trees.
2071 *
2072 * - Log tree operations
2073 * Exiting tree blocks get allocated to log tree, bumps its
2074 * generation, then get cleaned in tree re-balance.
2075 * Such tree block will not be written back, since it's clean,
2076 * thus no WRITTEN flag set.
2077 * And after log writes back, this tree block is not traced by
2078 * any dirty extent_io_tree.
2079 *
2080 * - Offending tree block gets re-dirtied from its original owner
2081 * Since it has bumped generation, no WRITTEN flag, it can be
2082 * reused without COWing. This tree block will not be traced
2083 * by btrfs_transaction::dirty_pages.
2084 *
2085 * Now such dirty tree block will not be cleaned by any dirty
2086 * extent io tree. Thus we don't want to submit such wild eb
2087 * if the fs already has error.
2088 *
2089 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2090 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2091 */
2092 if (ret > 0)
2093 ret = 0;
2094 if (!ret && BTRFS_FS_ERROR(fs_info))
2095 ret = -EROFS;
2096
2097 if (ctx.zoned_bg)
2098 btrfs_put_block_group(ctx.zoned_bg);
2099 btrfs_zoned_meta_io_unlock(fs_info);
2100 return ret;
2101 }
2102
2103 /*
2104 * Walk the list of dirty pages of the given address space and write all of them.
2105 *
2106 * @mapping: address space structure to write
2107 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2108 * @bio_ctrl: holds context for the write, namely the bio
2109 *
2110 * If a page is already under I/O, write_cache_pages() skips it, even
2111 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2112 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2113 * and msync() need to guarantee that all the data which was dirty at the time
2114 * the call was made get new I/O started against them. If wbc->sync_mode is
2115 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2116 * existing IO to complete.
2117 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)2118 static int extent_write_cache_pages(struct address_space *mapping,
2119 struct btrfs_bio_ctrl *bio_ctrl)
2120 {
2121 struct writeback_control *wbc = bio_ctrl->wbc;
2122 struct inode *inode = mapping->host;
2123 int ret = 0;
2124 int done = 0;
2125 int nr_to_write_done = 0;
2126 struct folio_batch fbatch;
2127 unsigned int nr_folios;
2128 pgoff_t index;
2129 pgoff_t end; /* Inclusive */
2130 pgoff_t done_index;
2131 int range_whole = 0;
2132 int scanned = 0;
2133 xa_mark_t tag;
2134
2135 /*
2136 * We have to hold onto the inode so that ordered extents can do their
2137 * work when the IO finishes. The alternative to this is failing to add
2138 * an ordered extent if the igrab() fails there and that is a huge pain
2139 * to deal with, so instead just hold onto the inode throughout the
2140 * writepages operation. If it fails here we are freeing up the inode
2141 * anyway and we'd rather not waste our time writing out stuff that is
2142 * going to be truncated anyway.
2143 */
2144 if (!igrab(inode))
2145 return 0;
2146
2147 folio_batch_init(&fbatch);
2148 if (wbc->range_cyclic) {
2149 index = mapping->writeback_index; /* Start from prev offset */
2150 end = -1;
2151 /*
2152 * Start from the beginning does not need to cycle over the
2153 * range, mark it as scanned.
2154 */
2155 scanned = (index == 0);
2156 } else {
2157 index = wbc->range_start >> PAGE_SHIFT;
2158 end = wbc->range_end >> PAGE_SHIFT;
2159 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2160 range_whole = 1;
2161 scanned = 1;
2162 }
2163
2164 /*
2165 * We do the tagged writepage as long as the snapshot flush bit is set
2166 * and we are the first one who do the filemap_flush() on this inode.
2167 *
2168 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2169 * not race in and drop the bit.
2170 */
2171 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2172 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2173 &BTRFS_I(inode)->runtime_flags))
2174 wbc->tagged_writepages = 1;
2175
2176 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2177 tag = PAGECACHE_TAG_TOWRITE;
2178 else
2179 tag = PAGECACHE_TAG_DIRTY;
2180 retry:
2181 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2182 tag_pages_for_writeback(mapping, index, end);
2183 done_index = index;
2184 while (!done && !nr_to_write_done && (index <= end) &&
2185 (nr_folios = filemap_get_folios_tag(mapping, &index,
2186 end, tag, &fbatch))) {
2187 unsigned i;
2188
2189 for (i = 0; i < nr_folios; i++) {
2190 struct folio *folio = fbatch.folios[i];
2191
2192 done_index = folio_next_index(folio);
2193 /*
2194 * At this point we hold neither the i_pages lock nor
2195 * the page lock: the page may be truncated or
2196 * invalidated (changing page->mapping to NULL),
2197 * or even swizzled back from swapper_space to
2198 * tmpfs file mapping
2199 */
2200 if (!folio_trylock(folio)) {
2201 submit_write_bio(bio_ctrl, 0);
2202 folio_lock(folio);
2203 }
2204
2205 if (unlikely(folio->mapping != mapping)) {
2206 folio_unlock(folio);
2207 continue;
2208 }
2209
2210 if (!folio_test_dirty(folio)) {
2211 /* Someone wrote it for us. */
2212 folio_unlock(folio);
2213 continue;
2214 }
2215
2216 /*
2217 * For subpage case, compression can lead to mixed
2218 * writeback and dirty flags, e.g:
2219 * 0 32K 64K 96K 128K
2220 * | |//////||/////| |//|
2221 *
2222 * In above case, [32K, 96K) is asynchronously submitted
2223 * for compression, and [124K, 128K) needs to be written back.
2224 *
2225 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2226 * won't be submitted as the page still has writeback flag
2227 * and will be skipped in the next check.
2228 *
2229 * This mixed writeback and dirty case is only possible for
2230 * subpage case.
2231 *
2232 * TODO: Remove this check after migrating compression to
2233 * regular submission.
2234 */
2235 if (wbc->sync_mode != WB_SYNC_NONE ||
2236 btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
2237 if (folio_test_writeback(folio))
2238 submit_write_bio(bio_ctrl, 0);
2239 folio_wait_writeback(folio);
2240 }
2241
2242 if (folio_test_writeback(folio) ||
2243 !folio_clear_dirty_for_io(folio)) {
2244 folio_unlock(folio);
2245 continue;
2246 }
2247
2248 ret = extent_writepage(folio, bio_ctrl);
2249 if (ret < 0) {
2250 done = 1;
2251 break;
2252 }
2253
2254 /*
2255 * The filesystem may choose to bump up nr_to_write.
2256 * We have to make sure to honor the new nr_to_write
2257 * at any time.
2258 */
2259 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2260 wbc->nr_to_write <= 0);
2261 }
2262 folio_batch_release(&fbatch);
2263 cond_resched();
2264 }
2265 if (!scanned && !done) {
2266 /*
2267 * We hit the last page and there is more work to be done: wrap
2268 * back to the start of the file
2269 */
2270 scanned = 1;
2271 index = 0;
2272
2273 /*
2274 * If we're looping we could run into a page that is locked by a
2275 * writer and that writer could be waiting on writeback for a
2276 * page in our current bio, and thus deadlock, so flush the
2277 * write bio here.
2278 */
2279 submit_write_bio(bio_ctrl, 0);
2280 goto retry;
2281 }
2282
2283 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2284 mapping->writeback_index = done_index;
2285
2286 btrfs_add_delayed_iput(BTRFS_I(inode));
2287 return ret;
2288 }
2289
2290 /*
2291 * Submit the pages in the range to bio for call sites which delalloc range has
2292 * already been ran (aka, ordered extent inserted) and all pages are still
2293 * locked.
2294 */
extent_write_locked_range(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2295 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2296 u64 start, u64 end, struct writeback_control *wbc,
2297 bool pages_dirty)
2298 {
2299 bool found_error = false;
2300 int ret = 0;
2301 struct address_space *mapping = inode->i_mapping;
2302 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2303 const u32 sectorsize = fs_info->sectorsize;
2304 loff_t i_size = i_size_read(inode);
2305 u64 cur = start;
2306 struct btrfs_bio_ctrl bio_ctrl = {
2307 .wbc = wbc,
2308 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2309 };
2310
2311 if (wbc->no_cgroup_owner)
2312 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2313
2314 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2315
2316 while (cur <= end) {
2317 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2318 u32 cur_len = cur_end + 1 - cur;
2319 struct folio *folio;
2320
2321 folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
2322
2323 /*
2324 * This shouldn't happen, the pages are pinned and locked, this
2325 * code is just in case, but shouldn't actually be run.
2326 */
2327 if (IS_ERR(folio)) {
2328 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2329 cur, cur_len, false);
2330 mapping_set_error(mapping, PTR_ERR(folio));
2331 cur = cur_end + 1;
2332 continue;
2333 }
2334
2335 ASSERT(folio_test_locked(folio));
2336 if (pages_dirty && folio != locked_folio)
2337 ASSERT(folio_test_dirty(folio));
2338
2339 /*
2340 * Set the submission bitmap to submit all sectors.
2341 * extent_writepage_io() will do the truncation correctly.
2342 */
2343 bio_ctrl.submit_bitmap = (unsigned long)-1;
2344 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2345 &bio_ctrl, i_size);
2346 if (ret == 1)
2347 goto next_page;
2348
2349 if (ret)
2350 mapping_set_error(mapping, ret);
2351 btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
2352 if (ret < 0)
2353 found_error = true;
2354 next_page:
2355 folio_put(folio);
2356 cur = cur_end + 1;
2357 }
2358
2359 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2360 }
2361
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2362 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2363 {
2364 struct inode *inode = mapping->host;
2365 int ret = 0;
2366 struct btrfs_bio_ctrl bio_ctrl = {
2367 .wbc = wbc,
2368 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2369 };
2370
2371 /*
2372 * Allow only a single thread to do the reloc work in zoned mode to
2373 * protect the write pointer updates.
2374 */
2375 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2376 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2377 submit_write_bio(&bio_ctrl, ret);
2378 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2379 return ret;
2380 }
2381
btrfs_readahead(struct readahead_control * rac)2382 void btrfs_readahead(struct readahead_control *rac)
2383 {
2384 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2385 struct folio *folio;
2386 struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
2387 const u64 start = readahead_pos(rac);
2388 const u64 end = start + readahead_length(rac) - 1;
2389 struct extent_state *cached_state = NULL;
2390 struct extent_map *em_cached = NULL;
2391 u64 prev_em_start = (u64)-1;
2392
2393 btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
2394
2395 while ((folio = readahead_folio(rac)) != NULL)
2396 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2397
2398 unlock_extent(&inode->io_tree, start, end, &cached_state);
2399
2400 if (em_cached)
2401 free_extent_map(em_cached);
2402 submit_one_bio(&bio_ctrl);
2403 }
2404
2405 /*
2406 * basic invalidate_folio code, this waits on any locked or writeback
2407 * ranges corresponding to the folio, and then deletes any extent state
2408 * records from the tree
2409 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2410 int extent_invalidate_folio(struct extent_io_tree *tree,
2411 struct folio *folio, size_t offset)
2412 {
2413 struct extent_state *cached_state = NULL;
2414 u64 start = folio_pos(folio);
2415 u64 end = start + folio_size(folio) - 1;
2416 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2417
2418 /* This function is only called for the btree inode */
2419 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2420
2421 start += ALIGN(offset, blocksize);
2422 if (start > end)
2423 return 0;
2424
2425 lock_extent(tree, start, end, &cached_state);
2426 folio_wait_writeback(folio);
2427
2428 /*
2429 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2430 * so here we only need to unlock the extent range to free any
2431 * existing extent state.
2432 */
2433 unlock_extent(tree, start, end, &cached_state);
2434 return 0;
2435 }
2436
2437 /*
2438 * a helper for release_folio, this tests for areas of the page that
2439 * are locked or under IO and drops the related state bits if it is safe
2440 * to drop the page.
2441 */
try_release_extent_state(struct extent_io_tree * tree,struct folio * folio)2442 static bool try_release_extent_state(struct extent_io_tree *tree,
2443 struct folio *folio)
2444 {
2445 u64 start = folio_pos(folio);
2446 u64 end = start + PAGE_SIZE - 1;
2447 bool ret;
2448
2449 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2450 ret = false;
2451 } else {
2452 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2453 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2454 EXTENT_QGROUP_RESERVED);
2455 int ret2;
2456
2457 /*
2458 * At this point we can safely clear everything except the
2459 * locked bit, the nodatasum bit and the delalloc new bit.
2460 * The delalloc new bit will be cleared by ordered extent
2461 * completion.
2462 */
2463 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2464
2465 /* if clear_extent_bit failed for enomem reasons,
2466 * we can't allow the release to continue.
2467 */
2468 if (ret2 < 0)
2469 ret = false;
2470 else
2471 ret = true;
2472 }
2473 return ret;
2474 }
2475
2476 /*
2477 * a helper for release_folio. As long as there are no locked extents
2478 * in the range corresponding to the page, both state records and extent
2479 * map records are removed
2480 */
try_release_extent_mapping(struct folio * folio,gfp_t mask)2481 bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2482 {
2483 u64 start = folio_pos(folio);
2484 u64 end = start + PAGE_SIZE - 1;
2485 struct btrfs_inode *inode = folio_to_inode(folio);
2486 struct extent_io_tree *io_tree = &inode->io_tree;
2487
2488 while (start <= end) {
2489 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2490 const u64 len = end - start + 1;
2491 struct extent_map_tree *extent_tree = &inode->extent_tree;
2492 struct extent_map *em;
2493
2494 write_lock(&extent_tree->lock);
2495 em = lookup_extent_mapping(extent_tree, start, len);
2496 if (!em) {
2497 write_unlock(&extent_tree->lock);
2498 break;
2499 }
2500 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2501 write_unlock(&extent_tree->lock);
2502 free_extent_map(em);
2503 break;
2504 }
2505 if (test_range_bit_exists(io_tree, em->start,
2506 extent_map_end(em) - 1, EXTENT_LOCKED))
2507 goto next;
2508 /*
2509 * If it's not in the list of modified extents, used by a fast
2510 * fsync, we can remove it. If it's being logged we can safely
2511 * remove it since fsync took an extra reference on the em.
2512 */
2513 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2514 goto remove_em;
2515 /*
2516 * If it's in the list of modified extents, remove it only if
2517 * its generation is older then the current one, in which case
2518 * we don't need it for a fast fsync. Otherwise don't remove it,
2519 * we could be racing with an ongoing fast fsync that could miss
2520 * the new extent.
2521 */
2522 if (em->generation >= cur_gen)
2523 goto next;
2524 remove_em:
2525 /*
2526 * We only remove extent maps that are not in the list of
2527 * modified extents or that are in the list but with a
2528 * generation lower then the current generation, so there is no
2529 * need to set the full fsync flag on the inode (it hurts the
2530 * fsync performance for workloads with a data size that exceeds
2531 * or is close to the system's memory).
2532 */
2533 remove_extent_mapping(inode, em);
2534 /* Once for the inode's extent map tree. */
2535 free_extent_map(em);
2536 next:
2537 start = extent_map_end(em);
2538 write_unlock(&extent_tree->lock);
2539
2540 /* Once for us, for the lookup_extent_mapping() reference. */
2541 free_extent_map(em);
2542
2543 if (need_resched()) {
2544 /*
2545 * If we need to resched but we can't block just exit
2546 * and leave any remaining extent maps.
2547 */
2548 if (!gfpflags_allow_blocking(mask))
2549 break;
2550
2551 cond_resched();
2552 }
2553 }
2554 return try_release_extent_state(io_tree, folio);
2555 }
2556
extent_buffer_under_io(const struct extent_buffer * eb)2557 static int extent_buffer_under_io(const struct extent_buffer *eb)
2558 {
2559 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2560 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2561 }
2562
folio_range_has_eb(struct folio * folio)2563 static bool folio_range_has_eb(struct folio *folio)
2564 {
2565 struct btrfs_subpage *subpage;
2566
2567 lockdep_assert_held(&folio->mapping->i_private_lock);
2568
2569 if (folio_test_private(folio)) {
2570 subpage = folio_get_private(folio);
2571 if (atomic_read(&subpage->eb_refs))
2572 return true;
2573 }
2574 return false;
2575 }
2576
detach_extent_buffer_folio(const struct extent_buffer * eb,struct folio * folio)2577 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2578 {
2579 struct btrfs_fs_info *fs_info = eb->fs_info;
2580 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2581
2582 /*
2583 * For mapped eb, we're going to change the folio private, which should
2584 * be done under the i_private_lock.
2585 */
2586 if (mapped)
2587 spin_lock(&folio->mapping->i_private_lock);
2588
2589 if (!folio_test_private(folio)) {
2590 if (mapped)
2591 spin_unlock(&folio->mapping->i_private_lock);
2592 return;
2593 }
2594
2595 if (fs_info->nodesize >= PAGE_SIZE) {
2596 /*
2597 * We do this since we'll remove the pages after we've
2598 * removed the eb from the radix tree, so we could race
2599 * and have this page now attached to the new eb. So
2600 * only clear folio if it's still connected to
2601 * this eb.
2602 */
2603 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2604 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2605 BUG_ON(folio_test_dirty(folio));
2606 BUG_ON(folio_test_writeback(folio));
2607 /* We need to make sure we haven't be attached to a new eb. */
2608 folio_detach_private(folio);
2609 }
2610 if (mapped)
2611 spin_unlock(&folio->mapping->i_private_lock);
2612 return;
2613 }
2614
2615 /*
2616 * For subpage, we can have dummy eb with folio private attached. In
2617 * this case, we can directly detach the private as such folio is only
2618 * attached to one dummy eb, no sharing.
2619 */
2620 if (!mapped) {
2621 btrfs_detach_subpage(fs_info, folio);
2622 return;
2623 }
2624
2625 btrfs_folio_dec_eb_refs(fs_info, folio);
2626
2627 /*
2628 * We can only detach the folio private if there are no other ebs in the
2629 * page range and no unfinished IO.
2630 */
2631 if (!folio_range_has_eb(folio))
2632 btrfs_detach_subpage(fs_info, folio);
2633
2634 spin_unlock(&folio->mapping->i_private_lock);
2635 }
2636
2637 /* Release all folios attached to the extent buffer */
btrfs_release_extent_buffer_folios(const struct extent_buffer * eb)2638 static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb)
2639 {
2640 ASSERT(!extent_buffer_under_io(eb));
2641
2642 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2643 struct folio *folio = eb->folios[i];
2644
2645 if (!folio)
2646 continue;
2647
2648 detach_extent_buffer_folio(eb, folio);
2649
2650 /* One for when we allocated the folio. */
2651 folio_put(folio);
2652 }
2653 }
2654
2655 /*
2656 * Helper for releasing the extent buffer.
2657 */
btrfs_release_extent_buffer(struct extent_buffer * eb)2658 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2659 {
2660 btrfs_release_extent_buffer_folios(eb);
2661 btrfs_leak_debug_del_eb(eb);
2662 kmem_cache_free(extent_buffer_cache, eb);
2663 }
2664
2665 static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2666 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2667 unsigned long len)
2668 {
2669 struct extent_buffer *eb = NULL;
2670
2671 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2672 eb->start = start;
2673 eb->len = len;
2674 eb->fs_info = fs_info;
2675 init_rwsem(&eb->lock);
2676
2677 btrfs_leak_debug_add_eb(eb);
2678
2679 spin_lock_init(&eb->refs_lock);
2680 atomic_set(&eb->refs, 1);
2681
2682 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2683
2684 return eb;
2685 }
2686
btrfs_clone_extent_buffer(const struct extent_buffer * src)2687 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2688 {
2689 struct extent_buffer *new;
2690 int num_folios = num_extent_folios(src);
2691 int ret;
2692
2693 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2694 if (new == NULL)
2695 return NULL;
2696
2697 /*
2698 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2699 * btrfs_release_extent_buffer() have different behavior for
2700 * UNMAPPED subpage extent buffer.
2701 */
2702 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2703
2704 ret = alloc_eb_folio_array(new, false);
2705 if (ret) {
2706 btrfs_release_extent_buffer(new);
2707 return NULL;
2708 }
2709
2710 for (int i = 0; i < num_folios; i++) {
2711 struct folio *folio = new->folios[i];
2712
2713 ret = attach_extent_buffer_folio(new, folio, NULL);
2714 if (ret < 0) {
2715 btrfs_release_extent_buffer(new);
2716 return NULL;
2717 }
2718 WARN_ON(folio_test_dirty(folio));
2719 }
2720 copy_extent_buffer_full(new, src);
2721 set_extent_buffer_uptodate(new);
2722
2723 return new;
2724 }
2725
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2726 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2727 u64 start, unsigned long len)
2728 {
2729 struct extent_buffer *eb;
2730 int num_folios = 0;
2731 int ret;
2732
2733 eb = __alloc_extent_buffer(fs_info, start, len);
2734 if (!eb)
2735 return NULL;
2736
2737 ret = alloc_eb_folio_array(eb, false);
2738 if (ret)
2739 goto err;
2740
2741 num_folios = num_extent_folios(eb);
2742 for (int i = 0; i < num_folios; i++) {
2743 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2744 if (ret < 0)
2745 goto err;
2746 }
2747
2748 set_extent_buffer_uptodate(eb);
2749 btrfs_set_header_nritems(eb, 0);
2750 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2751
2752 return eb;
2753 err:
2754 for (int i = 0; i < num_folios; i++) {
2755 if (eb->folios[i]) {
2756 detach_extent_buffer_folio(eb, eb->folios[i]);
2757 folio_put(eb->folios[i]);
2758 }
2759 }
2760 kmem_cache_free(extent_buffer_cache, eb);
2761 return NULL;
2762 }
2763
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2764 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2765 u64 start)
2766 {
2767 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2768 }
2769
check_buffer_tree_ref(struct extent_buffer * eb)2770 static void check_buffer_tree_ref(struct extent_buffer *eb)
2771 {
2772 int refs;
2773 /*
2774 * The TREE_REF bit is first set when the extent_buffer is added
2775 * to the radix tree. It is also reset, if unset, when a new reference
2776 * is created by find_extent_buffer.
2777 *
2778 * It is only cleared in two cases: freeing the last non-tree
2779 * reference to the extent_buffer when its STALE bit is set or
2780 * calling release_folio when the tree reference is the only reference.
2781 *
2782 * In both cases, care is taken to ensure that the extent_buffer's
2783 * pages are not under io. However, release_folio can be concurrently
2784 * called with creating new references, which is prone to race
2785 * conditions between the calls to check_buffer_tree_ref in those
2786 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2787 *
2788 * The actual lifetime of the extent_buffer in the radix tree is
2789 * adequately protected by the refcount, but the TREE_REF bit and
2790 * its corresponding reference are not. To protect against this
2791 * class of races, we call check_buffer_tree_ref from the codepaths
2792 * which trigger io. Note that once io is initiated, TREE_REF can no
2793 * longer be cleared, so that is the moment at which any such race is
2794 * best fixed.
2795 */
2796 refs = atomic_read(&eb->refs);
2797 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2798 return;
2799
2800 spin_lock(&eb->refs_lock);
2801 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2802 atomic_inc(&eb->refs);
2803 spin_unlock(&eb->refs_lock);
2804 }
2805
mark_extent_buffer_accessed(struct extent_buffer * eb)2806 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2807 {
2808 int num_folios= num_extent_folios(eb);
2809
2810 check_buffer_tree_ref(eb);
2811
2812 for (int i = 0; i < num_folios; i++)
2813 folio_mark_accessed(eb->folios[i]);
2814 }
2815
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2816 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2817 u64 start)
2818 {
2819 struct extent_buffer *eb;
2820
2821 eb = find_extent_buffer_nolock(fs_info, start);
2822 if (!eb)
2823 return NULL;
2824 /*
2825 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2826 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2827 * another task running free_extent_buffer() might have seen that flag
2828 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2829 * writeback flags not set) and it's still in the tree (flag
2830 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2831 * decrementing the extent buffer's reference count twice. So here we
2832 * could race and increment the eb's reference count, clear its stale
2833 * flag, mark it as dirty and drop our reference before the other task
2834 * finishes executing free_extent_buffer, which would later result in
2835 * an attempt to free an extent buffer that is dirty.
2836 */
2837 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2838 spin_lock(&eb->refs_lock);
2839 spin_unlock(&eb->refs_lock);
2840 }
2841 mark_extent_buffer_accessed(eb);
2842 return eb;
2843 }
2844
2845 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2846 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2847 u64 start)
2848 {
2849 struct extent_buffer *eb, *exists = NULL;
2850 int ret;
2851
2852 eb = find_extent_buffer(fs_info, start);
2853 if (eb)
2854 return eb;
2855 eb = alloc_dummy_extent_buffer(fs_info, start);
2856 if (!eb)
2857 return ERR_PTR(-ENOMEM);
2858 eb->fs_info = fs_info;
2859 again:
2860 ret = radix_tree_preload(GFP_NOFS);
2861 if (ret) {
2862 exists = ERR_PTR(ret);
2863 goto free_eb;
2864 }
2865 spin_lock(&fs_info->buffer_lock);
2866 ret = radix_tree_insert(&fs_info->buffer_radix,
2867 start >> fs_info->sectorsize_bits, eb);
2868 spin_unlock(&fs_info->buffer_lock);
2869 radix_tree_preload_end();
2870 if (ret == -EEXIST) {
2871 exists = find_extent_buffer(fs_info, start);
2872 if (exists)
2873 goto free_eb;
2874 else
2875 goto again;
2876 }
2877 check_buffer_tree_ref(eb);
2878 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2879
2880 return eb;
2881 free_eb:
2882 btrfs_release_extent_buffer(eb);
2883 return exists;
2884 }
2885 #endif
2886
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct folio * folio)2887 static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
2888 struct folio *folio)
2889 {
2890 struct extent_buffer *exists;
2891
2892 lockdep_assert_held(&folio->mapping->i_private_lock);
2893
2894 /*
2895 * For subpage case, we completely rely on radix tree to ensure we
2896 * don't try to insert two ebs for the same bytenr. So here we always
2897 * return NULL and just continue.
2898 */
2899 if (fs_info->nodesize < PAGE_SIZE)
2900 return NULL;
2901
2902 /* Page not yet attached to an extent buffer */
2903 if (!folio_test_private(folio))
2904 return NULL;
2905
2906 /*
2907 * We could have already allocated an eb for this folio and attached one
2908 * so lets see if we can get a ref on the existing eb, and if we can we
2909 * know it's good and we can just return that one, else we know we can
2910 * just overwrite folio private.
2911 */
2912 exists = folio_get_private(folio);
2913 if (atomic_inc_not_zero(&exists->refs))
2914 return exists;
2915
2916 WARN_ON(folio_test_dirty(folio));
2917 folio_detach_private(folio);
2918 return NULL;
2919 }
2920
2921 /*
2922 * Validate alignment constraints of eb at logical address @start.
2923 */
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)2924 static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2925 {
2926 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2927 btrfs_err(fs_info, "bad tree block start %llu", start);
2928 return true;
2929 }
2930
2931 if (fs_info->nodesize < PAGE_SIZE &&
2932 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2933 btrfs_err(fs_info,
2934 "tree block crosses page boundary, start %llu nodesize %u",
2935 start, fs_info->nodesize);
2936 return true;
2937 }
2938 if (fs_info->nodesize >= PAGE_SIZE &&
2939 !PAGE_ALIGNED(start)) {
2940 btrfs_err(fs_info,
2941 "tree block is not page aligned, start %llu nodesize %u",
2942 start, fs_info->nodesize);
2943 return true;
2944 }
2945 if (!IS_ALIGNED(start, fs_info->nodesize) &&
2946 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2947 btrfs_warn(fs_info,
2948 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2949 start, fs_info->nodesize);
2950 }
2951 return false;
2952 }
2953
2954 /*
2955 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2956 * Return >0 if there is already another extent buffer for the range,
2957 * and @found_eb_ret would be updated.
2958 * Return -EAGAIN if the filemap has an existing folio but with different size
2959 * than @eb.
2960 * The caller needs to free the existing folios and retry using the same order.
2961 */
attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_subpage * prealloc,struct extent_buffer ** found_eb_ret)2962 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2963 struct btrfs_subpage *prealloc,
2964 struct extent_buffer **found_eb_ret)
2965 {
2966
2967 struct btrfs_fs_info *fs_info = eb->fs_info;
2968 struct address_space *mapping = fs_info->btree_inode->i_mapping;
2969 const unsigned long index = eb->start >> PAGE_SHIFT;
2970 struct folio *existing_folio = NULL;
2971 int ret;
2972
2973 ASSERT(found_eb_ret);
2974
2975 /* Caller should ensure the folio exists. */
2976 ASSERT(eb->folios[i]);
2977
2978 retry:
2979 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2980 GFP_NOFS | __GFP_NOFAIL);
2981 if (!ret)
2982 goto finish;
2983
2984 existing_folio = filemap_lock_folio(mapping, index + i);
2985 /* The page cache only exists for a very short time, just retry. */
2986 if (IS_ERR(existing_folio)) {
2987 existing_folio = NULL;
2988 goto retry;
2989 }
2990
2991 /* For now, we should only have single-page folios for btree inode. */
2992 ASSERT(folio_nr_pages(existing_folio) == 1);
2993
2994 if (folio_size(existing_folio) != eb->folio_size) {
2995 folio_unlock(existing_folio);
2996 folio_put(existing_folio);
2997 return -EAGAIN;
2998 }
2999
3000 finish:
3001 spin_lock(&mapping->i_private_lock);
3002 if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
3003 /* We're going to reuse the existing page, can drop our folio now. */
3004 __free_page(folio_page(eb->folios[i], 0));
3005 eb->folios[i] = existing_folio;
3006 } else if (existing_folio) {
3007 struct extent_buffer *existing_eb;
3008
3009 existing_eb = grab_extent_buffer(fs_info, existing_folio);
3010 if (existing_eb) {
3011 /* The extent buffer still exists, we can use it directly. */
3012 *found_eb_ret = existing_eb;
3013 spin_unlock(&mapping->i_private_lock);
3014 folio_unlock(existing_folio);
3015 folio_put(existing_folio);
3016 return 1;
3017 }
3018 /* The extent buffer no longer exists, we can reuse the folio. */
3019 __free_page(folio_page(eb->folios[i], 0));
3020 eb->folios[i] = existing_folio;
3021 }
3022 eb->folio_size = folio_size(eb->folios[i]);
3023 eb->folio_shift = folio_shift(eb->folios[i]);
3024 /* Should not fail, as we have preallocated the memory. */
3025 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3026 ASSERT(!ret);
3027 /*
3028 * To inform we have an extra eb under allocation, so that
3029 * detach_extent_buffer_page() won't release the folio private when the
3030 * eb hasn't been inserted into radix tree yet.
3031 *
3032 * The ref will be decreased when the eb releases the page, in
3033 * detach_extent_buffer_page(). Thus needs no special handling in the
3034 * error path.
3035 */
3036 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3037 spin_unlock(&mapping->i_private_lock);
3038 return 0;
3039 }
3040
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)3041 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3042 u64 start, u64 owner_root, int level)
3043 {
3044 unsigned long len = fs_info->nodesize;
3045 int num_folios;
3046 int attached = 0;
3047 struct extent_buffer *eb;
3048 struct extent_buffer *existing_eb = NULL;
3049 struct btrfs_subpage *prealloc = NULL;
3050 u64 lockdep_owner = owner_root;
3051 bool page_contig = true;
3052 int uptodate = 1;
3053 int ret;
3054
3055 if (check_eb_alignment(fs_info, start))
3056 return ERR_PTR(-EINVAL);
3057
3058 #if BITS_PER_LONG == 32
3059 if (start >= MAX_LFS_FILESIZE) {
3060 btrfs_err_rl(fs_info,
3061 "extent buffer %llu is beyond 32bit page cache limit", start);
3062 btrfs_err_32bit_limit(fs_info);
3063 return ERR_PTR(-EOVERFLOW);
3064 }
3065 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3066 btrfs_warn_32bit_limit(fs_info);
3067 #endif
3068
3069 eb = find_extent_buffer(fs_info, start);
3070 if (eb)
3071 return eb;
3072
3073 eb = __alloc_extent_buffer(fs_info, start, len);
3074 if (!eb)
3075 return ERR_PTR(-ENOMEM);
3076
3077 /*
3078 * The reloc trees are just snapshots, so we need them to appear to be
3079 * just like any other fs tree WRT lockdep.
3080 */
3081 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3082 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3083
3084 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3085
3086 /*
3087 * Preallocate folio private for subpage case, so that we won't
3088 * allocate memory with i_private_lock nor page lock hold.
3089 *
3090 * The memory will be freed by attach_extent_buffer_page() or freed
3091 * manually if we exit earlier.
3092 */
3093 if (fs_info->nodesize < PAGE_SIZE) {
3094 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3095 if (IS_ERR(prealloc)) {
3096 ret = PTR_ERR(prealloc);
3097 goto out;
3098 }
3099 }
3100
3101 reallocate:
3102 /* Allocate all pages first. */
3103 ret = alloc_eb_folio_array(eb, true);
3104 if (ret < 0) {
3105 btrfs_free_subpage(prealloc);
3106 goto out;
3107 }
3108
3109 num_folios = num_extent_folios(eb);
3110 /* Attach all pages to the filemap. */
3111 for (int i = 0; i < num_folios; i++) {
3112 struct folio *folio;
3113
3114 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3115 if (ret > 0) {
3116 ASSERT(existing_eb);
3117 goto out;
3118 }
3119
3120 /*
3121 * TODO: Special handling for a corner case where the order of
3122 * folios mismatch between the new eb and filemap.
3123 *
3124 * This happens when:
3125 *
3126 * - the new eb is using higher order folio
3127 *
3128 * - the filemap is still using 0-order folios for the range
3129 * This can happen at the previous eb allocation, and we don't
3130 * have higher order folio for the call.
3131 *
3132 * - the existing eb has already been freed
3133 *
3134 * In this case, we have to free the existing folios first, and
3135 * re-allocate using the same order.
3136 * Thankfully this is not going to happen yet, as we're still
3137 * using 0-order folios.
3138 */
3139 if (unlikely(ret == -EAGAIN)) {
3140 ASSERT(0);
3141 goto reallocate;
3142 }
3143 attached++;
3144
3145 /*
3146 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3147 * reliable, as we may choose to reuse the existing page cache
3148 * and free the allocated page.
3149 */
3150 folio = eb->folios[i];
3151 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3152
3153 /*
3154 * Check if the current page is physically contiguous with previous eb
3155 * page.
3156 * At this stage, either we allocated a large folio, thus @i
3157 * would only be 0, or we fall back to per-page allocation.
3158 */
3159 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3160 page_contig = false;
3161
3162 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3163 uptodate = 0;
3164
3165 /*
3166 * We can't unlock the pages just yet since the extent buffer
3167 * hasn't been properly inserted in the radix tree, this
3168 * opens a race with btree_release_folio which can free a page
3169 * while we are still filling in all pages for the buffer and
3170 * we could crash.
3171 */
3172 }
3173 if (uptodate)
3174 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3175 /* All pages are physically contiguous, can skip cross page handling. */
3176 if (page_contig)
3177 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3178 again:
3179 ret = radix_tree_preload(GFP_NOFS);
3180 if (ret)
3181 goto out;
3182
3183 spin_lock(&fs_info->buffer_lock);
3184 ret = radix_tree_insert(&fs_info->buffer_radix,
3185 start >> fs_info->sectorsize_bits, eb);
3186 spin_unlock(&fs_info->buffer_lock);
3187 radix_tree_preload_end();
3188 if (ret == -EEXIST) {
3189 ret = 0;
3190 existing_eb = find_extent_buffer(fs_info, start);
3191 if (existing_eb)
3192 goto out;
3193 else
3194 goto again;
3195 }
3196 /* add one reference for the tree */
3197 check_buffer_tree_ref(eb);
3198 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3199
3200 /*
3201 * Now it's safe to unlock the pages because any calls to
3202 * btree_release_folio will correctly detect that a page belongs to a
3203 * live buffer and won't free them prematurely.
3204 */
3205 for (int i = 0; i < num_folios; i++)
3206 folio_unlock(eb->folios[i]);
3207 return eb;
3208
3209 out:
3210 WARN_ON(!atomic_dec_and_test(&eb->refs));
3211
3212 /*
3213 * Any attached folios need to be detached before we unlock them. This
3214 * is because when we're inserting our new folios into the mapping, and
3215 * then attaching our eb to that folio. If we fail to insert our folio
3216 * we'll lookup the folio for that index, and grab that EB. We do not
3217 * want that to grab this eb, as we're getting ready to free it. So we
3218 * have to detach it first and then unlock it.
3219 *
3220 * We have to drop our reference and NULL it out here because in the
3221 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3222 * Below when we call btrfs_release_extent_buffer() we will call
3223 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3224 * case. If we left eb->folios[i] populated in the subpage case we'd
3225 * double put our reference and be super sad.
3226 */
3227 for (int i = 0; i < attached; i++) {
3228 ASSERT(eb->folios[i]);
3229 detach_extent_buffer_folio(eb, eb->folios[i]);
3230 folio_unlock(eb->folios[i]);
3231 folio_put(eb->folios[i]);
3232 eb->folios[i] = NULL;
3233 }
3234 /*
3235 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3236 * so it can be cleaned up without utilizing page->mapping.
3237 */
3238 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3239
3240 btrfs_release_extent_buffer(eb);
3241 if (ret < 0)
3242 return ERR_PTR(ret);
3243 ASSERT(existing_eb);
3244 return existing_eb;
3245 }
3246
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3247 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3248 {
3249 struct extent_buffer *eb =
3250 container_of(head, struct extent_buffer, rcu_head);
3251
3252 kmem_cache_free(extent_buffer_cache, eb);
3253 }
3254
release_extent_buffer(struct extent_buffer * eb)3255 static int release_extent_buffer(struct extent_buffer *eb)
3256 __releases(&eb->refs_lock)
3257 {
3258 lockdep_assert_held(&eb->refs_lock);
3259
3260 WARN_ON(atomic_read(&eb->refs) == 0);
3261 if (atomic_dec_and_test(&eb->refs)) {
3262 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3263 struct btrfs_fs_info *fs_info = eb->fs_info;
3264
3265 spin_unlock(&eb->refs_lock);
3266
3267 spin_lock(&fs_info->buffer_lock);
3268 radix_tree_delete(&fs_info->buffer_radix,
3269 eb->start >> fs_info->sectorsize_bits);
3270 spin_unlock(&fs_info->buffer_lock);
3271 } else {
3272 spin_unlock(&eb->refs_lock);
3273 }
3274
3275 btrfs_leak_debug_del_eb(eb);
3276 /* Should be safe to release folios at this point. */
3277 btrfs_release_extent_buffer_folios(eb);
3278 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3279 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3280 kmem_cache_free(extent_buffer_cache, eb);
3281 return 1;
3282 }
3283 #endif
3284 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3285 return 1;
3286 }
3287 spin_unlock(&eb->refs_lock);
3288
3289 return 0;
3290 }
3291
free_extent_buffer(struct extent_buffer * eb)3292 void free_extent_buffer(struct extent_buffer *eb)
3293 {
3294 int refs;
3295 if (!eb)
3296 return;
3297
3298 refs = atomic_read(&eb->refs);
3299 while (1) {
3300 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3301 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3302 refs == 1))
3303 break;
3304 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3305 return;
3306 }
3307
3308 spin_lock(&eb->refs_lock);
3309 if (atomic_read(&eb->refs) == 2 &&
3310 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3311 !extent_buffer_under_io(eb) &&
3312 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3313 atomic_dec(&eb->refs);
3314
3315 /*
3316 * I know this is terrible, but it's temporary until we stop tracking
3317 * the uptodate bits and such for the extent buffers.
3318 */
3319 release_extent_buffer(eb);
3320 }
3321
free_extent_buffer_stale(struct extent_buffer * eb)3322 void free_extent_buffer_stale(struct extent_buffer *eb)
3323 {
3324 if (!eb)
3325 return;
3326
3327 spin_lock(&eb->refs_lock);
3328 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3329
3330 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3331 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3332 atomic_dec(&eb->refs);
3333 release_extent_buffer(eb);
3334 }
3335
btree_clear_folio_dirty(struct folio * folio)3336 static void btree_clear_folio_dirty(struct folio *folio)
3337 {
3338 ASSERT(folio_test_dirty(folio));
3339 ASSERT(folio_test_locked(folio));
3340 folio_clear_dirty_for_io(folio);
3341 xa_lock_irq(&folio->mapping->i_pages);
3342 if (!folio_test_dirty(folio))
3343 __xa_clear_mark(&folio->mapping->i_pages,
3344 folio_index(folio), PAGECACHE_TAG_DIRTY);
3345 xa_unlock_irq(&folio->mapping->i_pages);
3346 }
3347
clear_subpage_extent_buffer_dirty(const struct extent_buffer * eb)3348 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3349 {
3350 struct btrfs_fs_info *fs_info = eb->fs_info;
3351 struct folio *folio = eb->folios[0];
3352 bool last;
3353
3354 /* btree_clear_folio_dirty() needs page locked. */
3355 folio_lock(folio);
3356 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3357 if (last)
3358 btree_clear_folio_dirty(folio);
3359 folio_unlock(folio);
3360 WARN_ON(atomic_read(&eb->refs) == 0);
3361 }
3362
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3363 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3364 struct extent_buffer *eb)
3365 {
3366 struct btrfs_fs_info *fs_info = eb->fs_info;
3367 int num_folios;
3368
3369 btrfs_assert_tree_write_locked(eb);
3370
3371 if (trans && btrfs_header_generation(eb) != trans->transid)
3372 return;
3373
3374 /*
3375 * Instead of clearing the dirty flag off of the buffer, mark it as
3376 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3377 * write-ordering in zoned mode, without the need to later re-dirty
3378 * the extent_buffer.
3379 *
3380 * The actual zeroout of the buffer will happen later in
3381 * btree_csum_one_bio.
3382 */
3383 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3384 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3385 return;
3386 }
3387
3388 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3389 return;
3390
3391 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3392 fs_info->dirty_metadata_batch);
3393
3394 if (eb->fs_info->nodesize < PAGE_SIZE)
3395 return clear_subpage_extent_buffer_dirty(eb);
3396
3397 num_folios = num_extent_folios(eb);
3398 for (int i = 0; i < num_folios; i++) {
3399 struct folio *folio = eb->folios[i];
3400
3401 if (!folio_test_dirty(folio))
3402 continue;
3403 folio_lock(folio);
3404 btree_clear_folio_dirty(folio);
3405 folio_unlock(folio);
3406 }
3407 WARN_ON(atomic_read(&eb->refs) == 0);
3408 }
3409
set_extent_buffer_dirty(struct extent_buffer * eb)3410 void set_extent_buffer_dirty(struct extent_buffer *eb)
3411 {
3412 int num_folios;
3413 bool was_dirty;
3414
3415 check_buffer_tree_ref(eb);
3416
3417 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3418
3419 num_folios = num_extent_folios(eb);
3420 WARN_ON(atomic_read(&eb->refs) == 0);
3421 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3422 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3423
3424 if (!was_dirty) {
3425 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3426
3427 /*
3428 * For subpage case, we can have other extent buffers in the
3429 * same page, and in clear_subpage_extent_buffer_dirty() we
3430 * have to clear page dirty without subpage lock held.
3431 * This can cause race where our page gets dirty cleared after
3432 * we just set it.
3433 *
3434 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3435 * its page for other reasons, we can use page lock to prevent
3436 * the above race.
3437 */
3438 if (subpage)
3439 folio_lock(eb->folios[0]);
3440 for (int i = 0; i < num_folios; i++)
3441 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3442 eb->start, eb->len);
3443 if (subpage)
3444 folio_unlock(eb->folios[0]);
3445 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3446 eb->len,
3447 eb->fs_info->dirty_metadata_batch);
3448 }
3449 #ifdef CONFIG_BTRFS_DEBUG
3450 for (int i = 0; i < num_folios; i++)
3451 ASSERT(folio_test_dirty(eb->folios[i]));
3452 #endif
3453 }
3454
clear_extent_buffer_uptodate(struct extent_buffer * eb)3455 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3456 {
3457 struct btrfs_fs_info *fs_info = eb->fs_info;
3458 int num_folios = num_extent_folios(eb);
3459
3460 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3461 for (int i = 0; i < num_folios; i++) {
3462 struct folio *folio = eb->folios[i];
3463
3464 if (!folio)
3465 continue;
3466
3467 /*
3468 * This is special handling for metadata subpage, as regular
3469 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3470 */
3471 if (fs_info->nodesize >= PAGE_SIZE)
3472 folio_clear_uptodate(folio);
3473 else
3474 btrfs_subpage_clear_uptodate(fs_info, folio,
3475 eb->start, eb->len);
3476 }
3477 }
3478
set_extent_buffer_uptodate(struct extent_buffer * eb)3479 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3480 {
3481 struct btrfs_fs_info *fs_info = eb->fs_info;
3482 int num_folios = num_extent_folios(eb);
3483
3484 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3485 for (int i = 0; i < num_folios; i++) {
3486 struct folio *folio = eb->folios[i];
3487
3488 /*
3489 * This is special handling for metadata subpage, as regular
3490 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3491 */
3492 if (fs_info->nodesize >= PAGE_SIZE)
3493 folio_mark_uptodate(folio);
3494 else
3495 btrfs_subpage_set_uptodate(fs_info, folio,
3496 eb->start, eb->len);
3497 }
3498 }
3499
clear_extent_buffer_reading(struct extent_buffer * eb)3500 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3501 {
3502 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3503 smp_mb__after_atomic();
3504 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3505 }
3506
end_bbio_meta_read(struct btrfs_bio * bbio)3507 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3508 {
3509 struct extent_buffer *eb = bbio->private;
3510 struct btrfs_fs_info *fs_info = eb->fs_info;
3511 bool uptodate = !bbio->bio.bi_status;
3512 struct folio_iter fi;
3513 u32 bio_offset = 0;
3514
3515 /*
3516 * If the extent buffer is marked UPTODATE before the read operation
3517 * completes, other calls to read_extent_buffer_pages() will return
3518 * early without waiting for the read to finish, causing data races.
3519 */
3520 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3521
3522 eb->read_mirror = bbio->mirror_num;
3523
3524 if (uptodate &&
3525 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3526 uptodate = false;
3527
3528 if (uptodate) {
3529 set_extent_buffer_uptodate(eb);
3530 } else {
3531 clear_extent_buffer_uptodate(eb);
3532 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3533 }
3534
3535 bio_for_each_folio_all(fi, &bbio->bio) {
3536 struct folio *folio = fi.folio;
3537 u64 start = eb->start + bio_offset;
3538 u32 len = fi.length;
3539
3540 if (uptodate)
3541 btrfs_folio_set_uptodate(fs_info, folio, start, len);
3542 else
3543 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3544
3545 bio_offset += len;
3546 }
3547
3548 clear_extent_buffer_reading(eb);
3549 free_extent_buffer(eb);
3550
3551 bio_put(&bbio->bio);
3552 }
3553
read_extent_buffer_pages_nowait(struct extent_buffer * eb,int mirror_num,const struct btrfs_tree_parent_check * check)3554 int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
3555 const struct btrfs_tree_parent_check *check)
3556 {
3557 struct btrfs_bio *bbio;
3558 bool ret;
3559
3560 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3561 return 0;
3562
3563 /*
3564 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3565 * operation, which could potentially still be in flight. In this case
3566 * we simply want to return an error.
3567 */
3568 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3569 return -EIO;
3570
3571 /* Someone else is already reading the buffer, just wait for it. */
3572 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3573 return 0;
3574
3575 /*
3576 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3577 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3578 * started and finished reading the same eb. In this case, UPTODATE
3579 * will now be set, and we shouldn't read it in again.
3580 */
3581 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3582 clear_extent_buffer_reading(eb);
3583 return 0;
3584 }
3585
3586 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3587 eb->read_mirror = 0;
3588 check_buffer_tree_ref(eb);
3589 atomic_inc(&eb->refs);
3590
3591 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3592 REQ_OP_READ | REQ_META, eb->fs_info,
3593 end_bbio_meta_read, eb);
3594 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3595 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3596 bbio->file_offset = eb->start;
3597 memcpy(&bbio->parent_check, check, sizeof(*check));
3598 if (eb->fs_info->nodesize < PAGE_SIZE) {
3599 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3600 eb->start - folio_pos(eb->folios[0]));
3601 ASSERT(ret);
3602 } else {
3603 int num_folios = num_extent_folios(eb);
3604
3605 for (int i = 0; i < num_folios; i++) {
3606 struct folio *folio = eb->folios[i];
3607
3608 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3609 ASSERT(ret);
3610 }
3611 }
3612 btrfs_submit_bbio(bbio, mirror_num);
3613 return 0;
3614 }
3615
read_extent_buffer_pages(struct extent_buffer * eb,int mirror_num,const struct btrfs_tree_parent_check * check)3616 int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
3617 const struct btrfs_tree_parent_check *check)
3618 {
3619 int ret;
3620
3621 ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
3622 if (ret < 0)
3623 return ret;
3624
3625 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3626 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3627 return -EIO;
3628 return 0;
3629 }
3630
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3631 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3632 unsigned long len)
3633 {
3634 btrfs_warn(eb->fs_info,
3635 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3636 eb->start, eb->len, start, len);
3637 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3638
3639 return true;
3640 }
3641
3642 /*
3643 * Check if the [start, start + len) range is valid before reading/writing
3644 * the eb.
3645 * NOTE: @start and @len are offset inside the eb, not logical address.
3646 *
3647 * Caller should not touch the dst/src memory if this function returns error.
3648 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3649 static inline int check_eb_range(const struct extent_buffer *eb,
3650 unsigned long start, unsigned long len)
3651 {
3652 unsigned long offset;
3653
3654 /* start, start + len should not go beyond eb->len nor overflow */
3655 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3656 return report_eb_range(eb, start, len);
3657
3658 return false;
3659 }
3660
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3661 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3662 unsigned long start, unsigned long len)
3663 {
3664 const int unit_size = eb->folio_size;
3665 size_t cur;
3666 size_t offset;
3667 char *dst = (char *)dstv;
3668 unsigned long i = get_eb_folio_index(eb, start);
3669
3670 if (check_eb_range(eb, start, len)) {
3671 /*
3672 * Invalid range hit, reset the memory, so callers won't get
3673 * some random garbage for their uninitialized memory.
3674 */
3675 memset(dstv, 0, len);
3676 return;
3677 }
3678
3679 if (eb->addr) {
3680 memcpy(dstv, eb->addr + start, len);
3681 return;
3682 }
3683
3684 offset = get_eb_offset_in_folio(eb, start);
3685
3686 while (len > 0) {
3687 char *kaddr;
3688
3689 cur = min(len, unit_size - offset);
3690 kaddr = folio_address(eb->folios[i]);
3691 memcpy(dst, kaddr + offset, cur);
3692
3693 dst += cur;
3694 len -= cur;
3695 offset = 0;
3696 i++;
3697 }
3698 }
3699
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)3700 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3701 void __user *dstv,
3702 unsigned long start, unsigned long len)
3703 {
3704 const int unit_size = eb->folio_size;
3705 size_t cur;
3706 size_t offset;
3707 char __user *dst = (char __user *)dstv;
3708 unsigned long i = get_eb_folio_index(eb, start);
3709 int ret = 0;
3710
3711 WARN_ON(start > eb->len);
3712 WARN_ON(start + len > eb->start + eb->len);
3713
3714 if (eb->addr) {
3715 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3716 ret = -EFAULT;
3717 return ret;
3718 }
3719
3720 offset = get_eb_offset_in_folio(eb, start);
3721
3722 while (len > 0) {
3723 char *kaddr;
3724
3725 cur = min(len, unit_size - offset);
3726 kaddr = folio_address(eb->folios[i]);
3727 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3728 ret = -EFAULT;
3729 break;
3730 }
3731
3732 dst += cur;
3733 len -= cur;
3734 offset = 0;
3735 i++;
3736 }
3737
3738 return ret;
3739 }
3740
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3741 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3742 unsigned long start, unsigned long len)
3743 {
3744 const int unit_size = eb->folio_size;
3745 size_t cur;
3746 size_t offset;
3747 char *kaddr;
3748 char *ptr = (char *)ptrv;
3749 unsigned long i = get_eb_folio_index(eb, start);
3750 int ret = 0;
3751
3752 if (check_eb_range(eb, start, len))
3753 return -EINVAL;
3754
3755 if (eb->addr)
3756 return memcmp(ptrv, eb->addr + start, len);
3757
3758 offset = get_eb_offset_in_folio(eb, start);
3759
3760 while (len > 0) {
3761 cur = min(len, unit_size - offset);
3762 kaddr = folio_address(eb->folios[i]);
3763 ret = memcmp(ptr, kaddr + offset, cur);
3764 if (ret)
3765 break;
3766
3767 ptr += cur;
3768 len -= cur;
3769 offset = 0;
3770 i++;
3771 }
3772 return ret;
3773 }
3774
3775 /*
3776 * Check that the extent buffer is uptodate.
3777 *
3778 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3779 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3780 */
assert_eb_folio_uptodate(const struct extent_buffer * eb,int i)3781 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3782 {
3783 struct btrfs_fs_info *fs_info = eb->fs_info;
3784 struct folio *folio = eb->folios[i];
3785
3786 ASSERT(folio);
3787
3788 /*
3789 * If we are using the commit root we could potentially clear a page
3790 * Uptodate while we're using the extent buffer that we've previously
3791 * looked up. We don't want to complain in this case, as the page was
3792 * valid before, we just didn't write it out. Instead we want to catch
3793 * the case where we didn't actually read the block properly, which
3794 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3795 */
3796 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3797 return;
3798
3799 if (fs_info->nodesize < PAGE_SIZE) {
3800 folio = eb->folios[0];
3801 ASSERT(i == 0);
3802 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3803 eb->start, eb->len)))
3804 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3805 } else {
3806 WARN_ON(!folio_test_uptodate(folio));
3807 }
3808 }
3809
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)3810 static void __write_extent_buffer(const struct extent_buffer *eb,
3811 const void *srcv, unsigned long start,
3812 unsigned long len, bool use_memmove)
3813 {
3814 const int unit_size = eb->folio_size;
3815 size_t cur;
3816 size_t offset;
3817 char *kaddr;
3818 const char *src = (const char *)srcv;
3819 unsigned long i = get_eb_folio_index(eb, start);
3820 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3821 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3822
3823 if (check_eb_range(eb, start, len))
3824 return;
3825
3826 if (eb->addr) {
3827 if (use_memmove)
3828 memmove(eb->addr + start, srcv, len);
3829 else
3830 memcpy(eb->addr + start, srcv, len);
3831 return;
3832 }
3833
3834 offset = get_eb_offset_in_folio(eb, start);
3835
3836 while (len > 0) {
3837 if (check_uptodate)
3838 assert_eb_folio_uptodate(eb, i);
3839
3840 cur = min(len, unit_size - offset);
3841 kaddr = folio_address(eb->folios[i]);
3842 if (use_memmove)
3843 memmove(kaddr + offset, src, cur);
3844 else
3845 memcpy(kaddr + offset, src, cur);
3846
3847 src += cur;
3848 len -= cur;
3849 offset = 0;
3850 i++;
3851 }
3852 }
3853
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)3854 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3855 unsigned long start, unsigned long len)
3856 {
3857 return __write_extent_buffer(eb, srcv, start, len, false);
3858 }
3859
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)3860 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3861 unsigned long start, unsigned long len)
3862 {
3863 const int unit_size = eb->folio_size;
3864 unsigned long cur = start;
3865
3866 if (eb->addr) {
3867 memset(eb->addr + start, c, len);
3868 return;
3869 }
3870
3871 while (cur < start + len) {
3872 unsigned long index = get_eb_folio_index(eb, cur);
3873 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3874 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3875
3876 assert_eb_folio_uptodate(eb, index);
3877 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3878
3879 cur += cur_len;
3880 }
3881 }
3882
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)3883 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3884 unsigned long len)
3885 {
3886 if (check_eb_range(eb, start, len))
3887 return;
3888 return memset_extent_buffer(eb, 0, start, len);
3889 }
3890
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)3891 void copy_extent_buffer_full(const struct extent_buffer *dst,
3892 const struct extent_buffer *src)
3893 {
3894 const int unit_size = src->folio_size;
3895 unsigned long cur = 0;
3896
3897 ASSERT(dst->len == src->len);
3898
3899 while (cur < src->len) {
3900 unsigned long index = get_eb_folio_index(src, cur);
3901 unsigned long offset = get_eb_offset_in_folio(src, cur);
3902 unsigned long cur_len = min(src->len, unit_size - offset);
3903 void *addr = folio_address(src->folios[index]) + offset;
3904
3905 write_extent_buffer(dst, addr, cur, cur_len);
3906
3907 cur += cur_len;
3908 }
3909 }
3910
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3911 void copy_extent_buffer(const struct extent_buffer *dst,
3912 const struct extent_buffer *src,
3913 unsigned long dst_offset, unsigned long src_offset,
3914 unsigned long len)
3915 {
3916 const int unit_size = dst->folio_size;
3917 u64 dst_len = dst->len;
3918 size_t cur;
3919 size_t offset;
3920 char *kaddr;
3921 unsigned long i = get_eb_folio_index(dst, dst_offset);
3922
3923 if (check_eb_range(dst, dst_offset, len) ||
3924 check_eb_range(src, src_offset, len))
3925 return;
3926
3927 WARN_ON(src->len != dst_len);
3928
3929 offset = get_eb_offset_in_folio(dst, dst_offset);
3930
3931 while (len > 0) {
3932 assert_eb_folio_uptodate(dst, i);
3933
3934 cur = min(len, (unsigned long)(unit_size - offset));
3935
3936 kaddr = folio_address(dst->folios[i]);
3937 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3938
3939 src_offset += cur;
3940 len -= cur;
3941 offset = 0;
3942 i++;
3943 }
3944 }
3945
3946 /*
3947 * Calculate the folio and offset of the byte containing the given bit number.
3948 *
3949 * @eb: the extent buffer
3950 * @start: offset of the bitmap item in the extent buffer
3951 * @nr: bit number
3952 * @folio_index: return index of the folio in the extent buffer that contains
3953 * the given bit number
3954 * @folio_offset: return offset into the folio given by folio_index
3955 *
3956 * This helper hides the ugliness of finding the byte in an extent buffer which
3957 * contains a given bit.
3958 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * folio_index,size_t * folio_offset)3959 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3960 unsigned long start, unsigned long nr,
3961 unsigned long *folio_index,
3962 size_t *folio_offset)
3963 {
3964 size_t byte_offset = BIT_BYTE(nr);
3965 size_t offset;
3966
3967 /*
3968 * The byte we want is the offset of the extent buffer + the offset of
3969 * the bitmap item in the extent buffer + the offset of the byte in the
3970 * bitmap item.
3971 */
3972 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3973
3974 *folio_index = offset >> eb->folio_shift;
3975 *folio_offset = offset_in_eb_folio(eb, offset);
3976 }
3977
3978 /*
3979 * Determine whether a bit in a bitmap item is set.
3980 *
3981 * @eb: the extent buffer
3982 * @start: offset of the bitmap item in the extent buffer
3983 * @nr: bit number to test
3984 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)3985 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3986 unsigned long nr)
3987 {
3988 unsigned long i;
3989 size_t offset;
3990 u8 *kaddr;
3991
3992 eb_bitmap_offset(eb, start, nr, &i, &offset);
3993 assert_eb_folio_uptodate(eb, i);
3994 kaddr = folio_address(eb->folios[i]);
3995 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3996 }
3997
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)3998 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3999 {
4000 unsigned long index = get_eb_folio_index(eb, bytenr);
4001
4002 if (check_eb_range(eb, bytenr, 1))
4003 return NULL;
4004 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4005 }
4006
4007 /*
4008 * Set an area of a bitmap to 1.
4009 *
4010 * @eb: the extent buffer
4011 * @start: offset of the bitmap item in the extent buffer
4012 * @pos: bit number of the first bit
4013 * @len: number of bits to set
4014 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4015 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4016 unsigned long pos, unsigned long len)
4017 {
4018 unsigned int first_byte = start + BIT_BYTE(pos);
4019 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4020 const bool same_byte = (first_byte == last_byte);
4021 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4022 u8 *kaddr;
4023
4024 if (same_byte)
4025 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4026
4027 /* Handle the first byte. */
4028 kaddr = extent_buffer_get_byte(eb, first_byte);
4029 *kaddr |= mask;
4030 if (same_byte)
4031 return;
4032
4033 /* Handle the byte aligned part. */
4034 ASSERT(first_byte + 1 <= last_byte);
4035 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4036
4037 /* Handle the last byte. */
4038 kaddr = extent_buffer_get_byte(eb, last_byte);
4039 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4040 }
4041
4042
4043 /*
4044 * Clear an area of a bitmap.
4045 *
4046 * @eb: the extent buffer
4047 * @start: offset of the bitmap item in the extent buffer
4048 * @pos: bit number of the first bit
4049 * @len: number of bits to clear
4050 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)4051 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4052 unsigned long start, unsigned long pos,
4053 unsigned long len)
4054 {
4055 unsigned int first_byte = start + BIT_BYTE(pos);
4056 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4057 const bool same_byte = (first_byte == last_byte);
4058 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4059 u8 *kaddr;
4060
4061 if (same_byte)
4062 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4063
4064 /* Handle the first byte. */
4065 kaddr = extent_buffer_get_byte(eb, first_byte);
4066 *kaddr &= ~mask;
4067 if (same_byte)
4068 return;
4069
4070 /* Handle the byte aligned part. */
4071 ASSERT(first_byte + 1 <= last_byte);
4072 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4073
4074 /* Handle the last byte. */
4075 kaddr = extent_buffer_get_byte(eb, last_byte);
4076 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4077 }
4078
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)4079 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4080 {
4081 unsigned long distance = (src > dst) ? src - dst : dst - src;
4082 return distance < len;
4083 }
4084
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4085 void memcpy_extent_buffer(const struct extent_buffer *dst,
4086 unsigned long dst_offset, unsigned long src_offset,
4087 unsigned long len)
4088 {
4089 const int unit_size = dst->folio_size;
4090 unsigned long cur_off = 0;
4091
4092 if (check_eb_range(dst, dst_offset, len) ||
4093 check_eb_range(dst, src_offset, len))
4094 return;
4095
4096 if (dst->addr) {
4097 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4098
4099 if (use_memmove)
4100 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4101 else
4102 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4103 return;
4104 }
4105
4106 while (cur_off < len) {
4107 unsigned long cur_src = cur_off + src_offset;
4108 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4109 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4110 unsigned long cur_len = min(src_offset + len - cur_src,
4111 unit_size - folio_off);
4112 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4113 const bool use_memmove = areas_overlap(src_offset + cur_off,
4114 dst_offset + cur_off, cur_len);
4115
4116 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4117 use_memmove);
4118 cur_off += cur_len;
4119 }
4120 }
4121
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4122 void memmove_extent_buffer(const struct extent_buffer *dst,
4123 unsigned long dst_offset, unsigned long src_offset,
4124 unsigned long len)
4125 {
4126 unsigned long dst_end = dst_offset + len - 1;
4127 unsigned long src_end = src_offset + len - 1;
4128
4129 if (check_eb_range(dst, dst_offset, len) ||
4130 check_eb_range(dst, src_offset, len))
4131 return;
4132
4133 if (dst_offset < src_offset) {
4134 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4135 return;
4136 }
4137
4138 if (dst->addr) {
4139 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4140 return;
4141 }
4142
4143 while (len > 0) {
4144 unsigned long src_i;
4145 size_t cur;
4146 size_t dst_off_in_folio;
4147 size_t src_off_in_folio;
4148 void *src_addr;
4149 bool use_memmove;
4150
4151 src_i = get_eb_folio_index(dst, src_end);
4152
4153 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4154 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4155
4156 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4157 cur = min(cur, dst_off_in_folio + 1);
4158
4159 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4160 cur + 1;
4161 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4162 cur);
4163
4164 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4165 use_memmove);
4166
4167 dst_end -= cur;
4168 src_end -= cur;
4169 len -= cur;
4170 }
4171 }
4172
4173 #define GANG_LOOKUP_SIZE 16
get_next_extent_buffer(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 bytenr)4174 static struct extent_buffer *get_next_extent_buffer(
4175 const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4176 {
4177 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4178 struct extent_buffer *found = NULL;
4179 u64 folio_start = folio_pos(folio);
4180 u64 cur = folio_start;
4181
4182 ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4183 lockdep_assert_held(&fs_info->buffer_lock);
4184
4185 while (cur < folio_start + PAGE_SIZE) {
4186 int ret;
4187 int i;
4188
4189 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4190 (void **)gang, cur >> fs_info->sectorsize_bits,
4191 min_t(unsigned int, GANG_LOOKUP_SIZE,
4192 PAGE_SIZE / fs_info->nodesize));
4193 if (ret == 0)
4194 goto out;
4195 for (i = 0; i < ret; i++) {
4196 /* Already beyond page end */
4197 if (gang[i]->start >= folio_start + PAGE_SIZE)
4198 goto out;
4199 /* Found one */
4200 if (gang[i]->start >= bytenr) {
4201 found = gang[i];
4202 goto out;
4203 }
4204 }
4205 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4206 }
4207 out:
4208 return found;
4209 }
4210
try_release_subpage_extent_buffer(struct folio * folio)4211 static int try_release_subpage_extent_buffer(struct folio *folio)
4212 {
4213 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4214 u64 cur = folio_pos(folio);
4215 const u64 end = cur + PAGE_SIZE;
4216 int ret;
4217
4218 while (cur < end) {
4219 struct extent_buffer *eb = NULL;
4220
4221 /*
4222 * Unlike try_release_extent_buffer() which uses folio private
4223 * to grab buffer, for subpage case we rely on radix tree, thus
4224 * we need to ensure radix tree consistency.
4225 *
4226 * We also want an atomic snapshot of the radix tree, thus go
4227 * with spinlock rather than RCU.
4228 */
4229 spin_lock(&fs_info->buffer_lock);
4230 eb = get_next_extent_buffer(fs_info, folio, cur);
4231 if (!eb) {
4232 /* No more eb in the page range after or at cur */
4233 spin_unlock(&fs_info->buffer_lock);
4234 break;
4235 }
4236 cur = eb->start + eb->len;
4237
4238 /*
4239 * The same as try_release_extent_buffer(), to ensure the eb
4240 * won't disappear out from under us.
4241 */
4242 spin_lock(&eb->refs_lock);
4243 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4244 spin_unlock(&eb->refs_lock);
4245 spin_unlock(&fs_info->buffer_lock);
4246 break;
4247 }
4248 spin_unlock(&fs_info->buffer_lock);
4249
4250 /*
4251 * If tree ref isn't set then we know the ref on this eb is a
4252 * real ref, so just return, this eb will likely be freed soon
4253 * anyway.
4254 */
4255 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4256 spin_unlock(&eb->refs_lock);
4257 break;
4258 }
4259
4260 /*
4261 * Here we don't care about the return value, we will always
4262 * check the folio private at the end. And
4263 * release_extent_buffer() will release the refs_lock.
4264 */
4265 release_extent_buffer(eb);
4266 }
4267 /*
4268 * Finally to check if we have cleared folio private, as if we have
4269 * released all ebs in the page, the folio private should be cleared now.
4270 */
4271 spin_lock(&folio->mapping->i_private_lock);
4272 if (!folio_test_private(folio))
4273 ret = 1;
4274 else
4275 ret = 0;
4276 spin_unlock(&folio->mapping->i_private_lock);
4277 return ret;
4278
4279 }
4280
try_release_extent_buffer(struct folio * folio)4281 int try_release_extent_buffer(struct folio *folio)
4282 {
4283 struct extent_buffer *eb;
4284
4285 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4286 return try_release_subpage_extent_buffer(folio);
4287
4288 /*
4289 * We need to make sure nobody is changing folio private, as we rely on
4290 * folio private as the pointer to extent buffer.
4291 */
4292 spin_lock(&folio->mapping->i_private_lock);
4293 if (!folio_test_private(folio)) {
4294 spin_unlock(&folio->mapping->i_private_lock);
4295 return 1;
4296 }
4297
4298 eb = folio_get_private(folio);
4299 BUG_ON(!eb);
4300
4301 /*
4302 * This is a little awful but should be ok, we need to make sure that
4303 * the eb doesn't disappear out from under us while we're looking at
4304 * this page.
4305 */
4306 spin_lock(&eb->refs_lock);
4307 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4308 spin_unlock(&eb->refs_lock);
4309 spin_unlock(&folio->mapping->i_private_lock);
4310 return 0;
4311 }
4312 spin_unlock(&folio->mapping->i_private_lock);
4313
4314 /*
4315 * If tree ref isn't set then we know the ref on this eb is a real ref,
4316 * so just return, this page will likely be freed soon anyway.
4317 */
4318 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4319 spin_unlock(&eb->refs_lock);
4320 return 0;
4321 }
4322
4323 return release_extent_buffer(eb);
4324 }
4325
4326 /*
4327 * Attempt to readahead a child block.
4328 *
4329 * @fs_info: the fs_info
4330 * @bytenr: bytenr to read
4331 * @owner_root: objectid of the root that owns this eb
4332 * @gen: generation for the uptodate check, can be 0
4333 * @level: level for the eb
4334 *
4335 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4336 * normal uptodate check of the eb, without checking the generation. If we have
4337 * to read the block we will not block on anything.
4338 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4339 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4340 u64 bytenr, u64 owner_root, u64 gen, int level)
4341 {
4342 struct btrfs_tree_parent_check check = {
4343 .level = level,
4344 .transid = gen
4345 };
4346 struct extent_buffer *eb;
4347 int ret;
4348
4349 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4350 if (IS_ERR(eb))
4351 return;
4352
4353 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4354 free_extent_buffer(eb);
4355 return;
4356 }
4357
4358 ret = read_extent_buffer_pages_nowait(eb, 0, &check);
4359 if (ret < 0)
4360 free_extent_buffer_stale(eb);
4361 else
4362 free_extent_buffer(eb);
4363 }
4364
4365 /*
4366 * Readahead a node's child block.
4367 *
4368 * @node: parent node we're reading from
4369 * @slot: slot in the parent node for the child we want to read
4370 *
4371 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4372 * the slot in the node provided.
4373 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4374 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4375 {
4376 btrfs_readahead_tree_block(node->fs_info,
4377 btrfs_node_blockptr(node, slot),
4378 btrfs_header_owner(node),
4379 btrfs_node_ptr_generation(node, slot),
4380 btrfs_header_level(node) - 1);
4381 }
4382