1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "bio.h"
23 #include "locking.h"
24 #include "backref.h"
25 #include "disk-io.h"
26 #include "subpage.h"
27 #include "zoned.h"
28 #include "block-group.h"
29 #include "compression.h"
30 #include "fs.h"
31 #include "accessors.h"
32 #include "file-item.h"
33 #include "file.h"
34 #include "dev-replace.h"
35 #include "super.h"
36 #include "transaction.h"
37
38 static struct kmem_cache *extent_buffer_cache;
39
40 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42 {
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49 }
50
btrfs_leak_debug_del_eb(struct extent_buffer * eb)51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52 {
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59 }
60
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62 {
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87 }
88 #else
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
91 #endif
92
93 /*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97 struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 enum btrfs_compression_type compress_type;
100 u32 len_to_oe_boundary;
101 blk_opf_t opf;
102 btrfs_bio_end_io_t end_io_func;
103 struct writeback_control *wbc;
104
105 /*
106 * The sectors of the page which are going to be submitted by
107 * extent_writepage_io().
108 * This is to avoid touching ranges covered by compression/inline.
109 */
110 unsigned long submit_bitmap;
111 };
112
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)113 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
114 {
115 struct btrfs_bio *bbio = bio_ctrl->bbio;
116
117 if (!bbio)
118 return;
119
120 /* Caller should ensure the bio has at least some range added */
121 ASSERT(bbio->bio.bi_iter.bi_size);
122
123 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
124 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
125 btrfs_submit_compressed_read(bbio);
126 else
127 btrfs_submit_bbio(bbio, 0);
128
129 /* The bbio is owned by the end_io handler now */
130 bio_ctrl->bbio = NULL;
131 }
132
133 /*
134 * Submit or fail the current bio in the bio_ctrl structure.
135 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)136 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
137 {
138 struct btrfs_bio *bbio = bio_ctrl->bbio;
139
140 if (!bbio)
141 return;
142
143 if (ret) {
144 ASSERT(ret < 0);
145 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
146 /* The bio is owned by the end_io handler now */
147 bio_ctrl->bbio = NULL;
148 } else {
149 submit_one_bio(bio_ctrl);
150 }
151 }
152
extent_buffer_init_cachep(void)153 int __init extent_buffer_init_cachep(void)
154 {
155 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
156 sizeof(struct extent_buffer), 0, 0,
157 NULL);
158 if (!extent_buffer_cache)
159 return -ENOMEM;
160
161 return 0;
162 }
163
extent_buffer_free_cachep(void)164 void __cold extent_buffer_free_cachep(void)
165 {
166 /*
167 * Make sure all delayed rcu free are flushed before we
168 * destroy caches.
169 */
170 rcu_barrier();
171 kmem_cache_destroy(extent_buffer_cache);
172 }
173
process_one_folio(struct btrfs_fs_info * fs_info,struct folio * folio,const struct folio * locked_folio,unsigned long page_ops,u64 start,u64 end)174 static void process_one_folio(struct btrfs_fs_info *fs_info,
175 struct folio *folio, const struct folio *locked_folio,
176 unsigned long page_ops, u64 start, u64 end)
177 {
178 u32 len;
179
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
181 len = end + 1 - start;
182
183 if (page_ops & PAGE_SET_ORDERED)
184 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
185 if (page_ops & PAGE_START_WRITEBACK) {
186 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
187 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
188 }
189 if (page_ops & PAGE_END_WRITEBACK)
190 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
191
192 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
193 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
194 }
195
__process_folios_contig(struct address_space * mapping,const struct folio * locked_folio,u64 start,u64 end,unsigned long page_ops)196 static void __process_folios_contig(struct address_space *mapping,
197 const struct folio *locked_folio, u64 start,
198 u64 end, unsigned long page_ops)
199 {
200 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
201 pgoff_t start_index = start >> PAGE_SHIFT;
202 pgoff_t end_index = end >> PAGE_SHIFT;
203 pgoff_t index = start_index;
204 struct folio_batch fbatch;
205 int i;
206
207 folio_batch_init(&fbatch);
208 while (index <= end_index) {
209 int found_folios;
210
211 found_folios = filemap_get_folios_contig(mapping, &index,
212 end_index, &fbatch);
213 for (i = 0; i < found_folios; i++) {
214 struct folio *folio = fbatch.folios[i];
215
216 process_one_folio(fs_info, folio, locked_folio,
217 page_ops, start, end);
218 }
219 folio_batch_release(&fbatch);
220 cond_resched();
221 }
222 }
223
__unlock_for_delalloc(const struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)224 static noinline void __unlock_for_delalloc(const struct inode *inode,
225 const struct folio *locked_folio,
226 u64 start, u64 end)
227 {
228 unsigned long index = start >> PAGE_SHIFT;
229 unsigned long end_index = end >> PAGE_SHIFT;
230
231 ASSERT(locked_folio);
232 if (index == locked_folio->index && end_index == index)
233 return;
234
235 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
236 PAGE_UNLOCK);
237 }
238
lock_delalloc_folios(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)239 static noinline int lock_delalloc_folios(struct inode *inode,
240 const struct folio *locked_folio,
241 u64 start, u64 end)
242 {
243 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
244 struct address_space *mapping = inode->i_mapping;
245 pgoff_t start_index = start >> PAGE_SHIFT;
246 pgoff_t end_index = end >> PAGE_SHIFT;
247 pgoff_t index = start_index;
248 u64 processed_end = start;
249 struct folio_batch fbatch;
250
251 if (index == locked_folio->index && index == end_index)
252 return 0;
253
254 folio_batch_init(&fbatch);
255 while (index <= end_index) {
256 unsigned int found_folios, i;
257
258 found_folios = filemap_get_folios_contig(mapping, &index,
259 end_index, &fbatch);
260 if (found_folios == 0)
261 goto out;
262
263 for (i = 0; i < found_folios; i++) {
264 struct folio *folio = fbatch.folios[i];
265 u32 len = end + 1 - start;
266
267 if (folio == locked_folio)
268 continue;
269
270 if (btrfs_folio_start_writer_lock(fs_info, folio, start,
271 len))
272 goto out;
273
274 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
275 btrfs_folio_end_writer_lock(fs_info, folio, start,
276 len);
277 goto out;
278 }
279
280 processed_end = folio_pos(folio) + folio_size(folio) - 1;
281 }
282 folio_batch_release(&fbatch);
283 cond_resched();
284 }
285
286 return 0;
287 out:
288 folio_batch_release(&fbatch);
289 if (processed_end > start)
290 __unlock_for_delalloc(inode, locked_folio, start,
291 processed_end);
292 return -EAGAIN;
293 }
294
295 /*
296 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
297 * more than @max_bytes.
298 *
299 * @start: The original start bytenr to search.
300 * Will store the extent range start bytenr.
301 * @end: The original end bytenr of the search range
302 * Will store the extent range end bytenr.
303 *
304 * Return true if we find a delalloc range which starts inside the original
305 * range, and @start/@end will store the delalloc range start/end.
306 *
307 * Return false if we can't find any delalloc range which starts inside the
308 * original range, and @start/@end will be the non-delalloc range start/end.
309 */
310 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct folio * locked_folio,u64 * start,u64 * end)311 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
312 struct folio *locked_folio,
313 u64 *start, u64 *end)
314 {
315 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
316 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
317 const u64 orig_start = *start;
318 const u64 orig_end = *end;
319 /* The sanity tests may not set a valid fs_info. */
320 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
321 u64 delalloc_start;
322 u64 delalloc_end;
323 bool found;
324 struct extent_state *cached_state = NULL;
325 int ret;
326 int loops = 0;
327
328 /* Caller should pass a valid @end to indicate the search range end */
329 ASSERT(orig_end > orig_start);
330
331 /* The range should at least cover part of the folio */
332 ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
333 orig_end <= folio_pos(locked_folio)));
334 again:
335 /* step one, find a bunch of delalloc bytes starting at start */
336 delalloc_start = *start;
337 delalloc_end = 0;
338 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
339 max_bytes, &cached_state);
340 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
341 *start = delalloc_start;
342
343 /* @delalloc_end can be -1, never go beyond @orig_end */
344 *end = min(delalloc_end, orig_end);
345 free_extent_state(cached_state);
346 return false;
347 }
348
349 /*
350 * start comes from the offset of locked_folio. We have to lock
351 * folios in order, so we can't process delalloc bytes before
352 * locked_folio
353 */
354 if (delalloc_start < *start)
355 delalloc_start = *start;
356
357 /*
358 * make sure to limit the number of folios we try to lock down
359 */
360 if (delalloc_end + 1 - delalloc_start > max_bytes)
361 delalloc_end = delalloc_start + max_bytes - 1;
362
363 /* step two, lock all the folioss after the folios that has start */
364 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
365 delalloc_end);
366 ASSERT(!ret || ret == -EAGAIN);
367 if (ret == -EAGAIN) {
368 /* some of the folios are gone, lets avoid looping by
369 * shortening the size of the delalloc range we're searching
370 */
371 free_extent_state(cached_state);
372 cached_state = NULL;
373 if (!loops) {
374 max_bytes = PAGE_SIZE;
375 loops = 1;
376 goto again;
377 } else {
378 found = false;
379 goto out_failed;
380 }
381 }
382
383 /* step three, lock the state bits for the whole range */
384 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
385
386 /* then test to make sure it is all still delalloc */
387 ret = test_range_bit(tree, delalloc_start, delalloc_end,
388 EXTENT_DELALLOC, cached_state);
389
390 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
391 if (!ret) {
392 __unlock_for_delalloc(inode, locked_folio, delalloc_start,
393 delalloc_end);
394 cond_resched();
395 goto again;
396 }
397 *start = delalloc_start;
398 *end = delalloc_end;
399 out_failed:
400 return found;
401 }
402
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,const struct folio * locked_folio,struct extent_state ** cached,u32 clear_bits,unsigned long page_ops)403 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
404 const struct folio *locked_folio,
405 struct extent_state **cached,
406 u32 clear_bits, unsigned long page_ops)
407 {
408 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
409
410 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
411 end, page_ops);
412 }
413
btrfs_verify_folio(struct folio * folio,u64 start,u32 len)414 static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
415 {
416 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
417
418 if (!fsverity_active(folio->mapping->host) ||
419 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
420 start >= i_size_read(folio->mapping->host))
421 return true;
422 return fsverity_verify_folio(folio);
423 }
424
end_folio_read(struct folio * folio,bool uptodate,u64 start,u32 len)425 static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
426 {
427 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
428
429 ASSERT(folio_pos(folio) <= start &&
430 start + len <= folio_pos(folio) + PAGE_SIZE);
431
432 if (uptodate && btrfs_verify_folio(folio, start, len))
433 btrfs_folio_set_uptodate(fs_info, folio, start, len);
434 else
435 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
436
437 if (!btrfs_is_subpage(fs_info, folio->mapping))
438 folio_unlock(folio);
439 else
440 btrfs_subpage_end_reader(fs_info, folio, start, len);
441 }
442
443 /*
444 * After a write IO is done, we need to:
445 *
446 * - clear the uptodate bits on error
447 * - clear the writeback bits in the extent tree for the range
448 * - filio_end_writeback() if there is no more pending io for the folio
449 *
450 * Scheduling is not allowed, so the extent state tree is expected
451 * to have one and only one object corresponding to this IO.
452 */
end_bbio_data_write(struct btrfs_bio * bbio)453 static void end_bbio_data_write(struct btrfs_bio *bbio)
454 {
455 struct btrfs_fs_info *fs_info = bbio->fs_info;
456 struct bio *bio = &bbio->bio;
457 int error = blk_status_to_errno(bio->bi_status);
458 struct folio_iter fi;
459 const u32 sectorsize = fs_info->sectorsize;
460
461 ASSERT(!bio_flagged(bio, BIO_CLONED));
462 bio_for_each_folio_all(fi, bio) {
463 struct folio *folio = fi.folio;
464 u64 start = folio_pos(folio) + fi.offset;
465 u32 len = fi.length;
466
467 /* Only order 0 (single page) folios are allowed for data. */
468 ASSERT(folio_order(folio) == 0);
469
470 /* Our read/write should always be sector aligned. */
471 if (!IS_ALIGNED(fi.offset, sectorsize))
472 btrfs_err(fs_info,
473 "partial page write in btrfs with offset %zu and length %zu",
474 fi.offset, fi.length);
475 else if (!IS_ALIGNED(fi.length, sectorsize))
476 btrfs_info(fs_info,
477 "incomplete page write with offset %zu and length %zu",
478 fi.offset, fi.length);
479
480 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
481 !error);
482 if (error)
483 mapping_set_error(folio->mapping, error);
484 btrfs_folio_clear_writeback(fs_info, folio, start, len);
485 }
486
487 bio_put(bio);
488 }
489
begin_folio_read(struct btrfs_fs_info * fs_info,struct folio * folio)490 static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
491 {
492 ASSERT(folio_test_locked(folio));
493 if (!btrfs_is_subpage(fs_info, folio->mapping))
494 return;
495
496 ASSERT(folio_test_private(folio));
497 btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
498 }
499
500 /*
501 * After a data read IO is done, we need to:
502 *
503 * - clear the uptodate bits on error
504 * - set the uptodate bits if things worked
505 * - set the folio up to date if all extents in the tree are uptodate
506 * - clear the lock bit in the extent tree
507 * - unlock the folio if there are no other extents locked for it
508 *
509 * Scheduling is not allowed, so the extent state tree is expected
510 * to have one and only one object corresponding to this IO.
511 */
end_bbio_data_read(struct btrfs_bio * bbio)512 static void end_bbio_data_read(struct btrfs_bio *bbio)
513 {
514 struct btrfs_fs_info *fs_info = bbio->fs_info;
515 struct bio *bio = &bbio->bio;
516 struct folio_iter fi;
517 const u32 sectorsize = fs_info->sectorsize;
518
519 ASSERT(!bio_flagged(bio, BIO_CLONED));
520 bio_for_each_folio_all(fi, &bbio->bio) {
521 bool uptodate = !bio->bi_status;
522 struct folio *folio = fi.folio;
523 struct inode *inode = folio->mapping->host;
524 u64 start;
525 u64 end;
526 u32 len;
527
528 /* For now only order 0 folios are supported for data. */
529 ASSERT(folio_order(folio) == 0);
530 btrfs_debug(fs_info,
531 "%s: bi_sector=%llu, err=%d, mirror=%u",
532 __func__, bio->bi_iter.bi_sector, bio->bi_status,
533 bbio->mirror_num);
534
535 /*
536 * We always issue full-sector reads, but if some block in a
537 * folio fails to read, blk_update_request() will advance
538 * bv_offset and adjust bv_len to compensate. Print a warning
539 * for unaligned offsets, and an error if they don't add up to
540 * a full sector.
541 */
542 if (!IS_ALIGNED(fi.offset, sectorsize))
543 btrfs_err(fs_info,
544 "partial page read in btrfs with offset %zu and length %zu",
545 fi.offset, fi.length);
546 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
547 btrfs_info(fs_info,
548 "incomplete page read with offset %zu and length %zu",
549 fi.offset, fi.length);
550
551 start = folio_pos(folio) + fi.offset;
552 end = start + fi.length - 1;
553 len = fi.length;
554
555 if (likely(uptodate)) {
556 loff_t i_size = i_size_read(inode);
557 pgoff_t end_index = i_size >> folio_shift(folio);
558
559 /*
560 * Zero out the remaining part if this range straddles
561 * i_size.
562 *
563 * Here we should only zero the range inside the folio,
564 * not touch anything else.
565 *
566 * NOTE: i_size is exclusive while end is inclusive.
567 */
568 if (folio_index(folio) == end_index && i_size <= end) {
569 u32 zero_start = max(offset_in_folio(folio, i_size),
570 offset_in_folio(folio, start));
571 u32 zero_len = offset_in_folio(folio, end) + 1 -
572 zero_start;
573
574 folio_zero_range(folio, zero_start, zero_len);
575 }
576 }
577
578 /* Update page status and unlock. */
579 end_folio_read(folio, uptodate, start, len);
580 }
581 bio_put(bio);
582 }
583
584 /*
585 * Populate every free slot in a provided array with folios using GFP_NOFS.
586 *
587 * @nr_folios: number of folios to allocate
588 * @folio_array: the array to fill with folios; any existing non-NULL entries in
589 * the array will be skipped
590 *
591 * Return: 0 if all folios were able to be allocated;
592 * -ENOMEM otherwise, the partially allocated folios would be freed and
593 * the array slots zeroed
594 */
btrfs_alloc_folio_array(unsigned int nr_folios,struct folio ** folio_array)595 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
596 {
597 for (int i = 0; i < nr_folios; i++) {
598 if (folio_array[i])
599 continue;
600 folio_array[i] = folio_alloc(GFP_NOFS, 0);
601 if (!folio_array[i])
602 goto error;
603 }
604 return 0;
605 error:
606 for (int i = 0; i < nr_folios; i++) {
607 if (folio_array[i])
608 folio_put(folio_array[i]);
609 }
610 return -ENOMEM;
611 }
612
613 /*
614 * Populate every free slot in a provided array with pages, using GFP_NOFS.
615 *
616 * @nr_pages: number of pages to allocate
617 * @page_array: the array to fill with pages; any existing non-null entries in
618 * the array will be skipped
619 * @nofail: whether using __GFP_NOFAIL flag
620 *
621 * Return: 0 if all pages were able to be allocated;
622 * -ENOMEM otherwise, the partially allocated pages would be freed and
623 * the array slots zeroed
624 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array,bool nofail)625 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
626 bool nofail)
627 {
628 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
629 unsigned int allocated;
630
631 for (allocated = 0; allocated < nr_pages;) {
632 unsigned int last = allocated;
633
634 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
635 if (unlikely(allocated == last)) {
636 /* No progress, fail and do cleanup. */
637 for (int i = 0; i < allocated; i++) {
638 __free_page(page_array[i]);
639 page_array[i] = NULL;
640 }
641 return -ENOMEM;
642 }
643 }
644 return 0;
645 }
646
647 /*
648 * Populate needed folios for the extent buffer.
649 *
650 * For now, the folios populated are always in order 0 (aka, single page).
651 */
alloc_eb_folio_array(struct extent_buffer * eb,bool nofail)652 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
653 {
654 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
655 int num_pages = num_extent_pages(eb);
656 int ret;
657
658 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
659 if (ret < 0)
660 return ret;
661
662 for (int i = 0; i < num_pages; i++)
663 eb->folios[i] = page_folio(page_array[i]);
664 eb->folio_size = PAGE_SIZE;
665 eb->folio_shift = PAGE_SHIFT;
666 return 0;
667 }
668
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,struct folio * folio,u64 disk_bytenr,unsigned int pg_offset)669 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
670 struct folio *folio, u64 disk_bytenr,
671 unsigned int pg_offset)
672 {
673 struct bio *bio = &bio_ctrl->bbio->bio;
674 struct bio_vec *bvec = bio_last_bvec_all(bio);
675 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
676 struct folio *bv_folio = page_folio(bvec->bv_page);
677
678 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
679 /*
680 * For compression, all IO should have its logical bytenr set
681 * to the starting bytenr of the compressed extent.
682 */
683 return bio->bi_iter.bi_sector == sector;
684 }
685
686 /*
687 * The contig check requires the following conditions to be met:
688 *
689 * 1) The folios are belonging to the same inode
690 * This is implied by the call chain.
691 *
692 * 2) The range has adjacent logical bytenr
693 *
694 * 3) The range has adjacent file offset
695 * This is required for the usage of btrfs_bio->file_offset.
696 */
697 return bio_end_sector(bio) == sector &&
698 folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
699 folio_pos(folio) + pg_offset;
700 }
701
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)702 static void alloc_new_bio(struct btrfs_inode *inode,
703 struct btrfs_bio_ctrl *bio_ctrl,
704 u64 disk_bytenr, u64 file_offset)
705 {
706 struct btrfs_fs_info *fs_info = inode->root->fs_info;
707 struct btrfs_bio *bbio;
708
709 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
710 bio_ctrl->end_io_func, NULL);
711 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
712 bbio->inode = inode;
713 bbio->file_offset = file_offset;
714 bio_ctrl->bbio = bbio;
715 bio_ctrl->len_to_oe_boundary = U32_MAX;
716
717 /* Limit data write bios to the ordered boundary. */
718 if (bio_ctrl->wbc) {
719 struct btrfs_ordered_extent *ordered;
720
721 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
722 if (ordered) {
723 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
724 ordered->file_offset +
725 ordered->disk_num_bytes - file_offset);
726 bbio->ordered = ordered;
727 }
728
729 /*
730 * Pick the last added device to support cgroup writeback. For
731 * multi-device file systems this means blk-cgroup policies have
732 * to always be set on the last added/replaced device.
733 * This is a bit odd but has been like that for a long time.
734 */
735 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
736 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
737 }
738 }
739
740 /*
741 * @disk_bytenr: logical bytenr where the write will be
742 * @page: page to add to the bio
743 * @size: portion of page that we want to write to
744 * @pg_offset: offset of the new bio or to check whether we are adding
745 * a contiguous page to the previous one
746 *
747 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
748 * new one in @bio_ctrl->bbio.
749 * The mirror number for this IO should already be initizlied in
750 * @bio_ctrl->mirror_num.
751 */
submit_extent_folio(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct folio * folio,size_t size,unsigned long pg_offset)752 static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
753 u64 disk_bytenr, struct folio *folio,
754 size_t size, unsigned long pg_offset)
755 {
756 struct btrfs_inode *inode = folio_to_inode(folio);
757
758 ASSERT(pg_offset + size <= PAGE_SIZE);
759 ASSERT(bio_ctrl->end_io_func);
760
761 if (bio_ctrl->bbio &&
762 !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
763 submit_one_bio(bio_ctrl);
764
765 do {
766 u32 len = size;
767
768 /* Allocate new bio if needed */
769 if (!bio_ctrl->bbio) {
770 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
771 folio_pos(folio) + pg_offset);
772 }
773
774 /* Cap to the current ordered extent boundary if there is one. */
775 if (len > bio_ctrl->len_to_oe_boundary) {
776 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
777 ASSERT(is_data_inode(inode));
778 len = bio_ctrl->len_to_oe_boundary;
779 }
780
781 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
782 /* bio full: move on to a new one */
783 submit_one_bio(bio_ctrl);
784 continue;
785 }
786
787 if (bio_ctrl->wbc)
788 wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
789 len);
790
791 size -= len;
792 pg_offset += len;
793 disk_bytenr += len;
794
795 /*
796 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
797 * sector aligned. alloc_new_bio() then sets it to the end of
798 * our ordered extent for writes into zoned devices.
799 *
800 * When len_to_oe_boundary is tracking an ordered extent, we
801 * trust the ordered extent code to align things properly, and
802 * the check above to cap our write to the ordered extent
803 * boundary is correct.
804 *
805 * When len_to_oe_boundary is U32_MAX, the cap above would
806 * result in a 4095 byte IO for the last folio right before
807 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
808 * the checks required to make sure we don't overflow the bio,
809 * and we should just ignore len_to_oe_boundary completely
810 * unless we're using it to track an ordered extent.
811 *
812 * It's pretty hard to make a bio sized U32_MAX, but it can
813 * happen when the page cache is able to feed us contiguous
814 * folios for large extents.
815 */
816 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
817 bio_ctrl->len_to_oe_boundary -= len;
818
819 /* Ordered extent boundary: move on to a new bio. */
820 if (bio_ctrl->len_to_oe_boundary == 0)
821 submit_one_bio(bio_ctrl);
822 } while (size);
823 }
824
attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_subpage * prealloc)825 static int attach_extent_buffer_folio(struct extent_buffer *eb,
826 struct folio *folio,
827 struct btrfs_subpage *prealloc)
828 {
829 struct btrfs_fs_info *fs_info = eb->fs_info;
830 int ret = 0;
831
832 /*
833 * If the page is mapped to btree inode, we should hold the private
834 * lock to prevent race.
835 * For cloned or dummy extent buffers, their pages are not mapped and
836 * will not race with any other ebs.
837 */
838 if (folio->mapping)
839 lockdep_assert_held(&folio->mapping->i_private_lock);
840
841 if (fs_info->nodesize >= PAGE_SIZE) {
842 if (!folio_test_private(folio))
843 folio_attach_private(folio, eb);
844 else
845 WARN_ON(folio_get_private(folio) != eb);
846 return 0;
847 }
848
849 /* Already mapped, just free prealloc */
850 if (folio_test_private(folio)) {
851 btrfs_free_subpage(prealloc);
852 return 0;
853 }
854
855 if (prealloc)
856 /* Has preallocated memory for subpage */
857 folio_attach_private(folio, prealloc);
858 else
859 /* Do new allocation to attach subpage */
860 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
861 return ret;
862 }
863
set_page_extent_mapped(struct page * page)864 int set_page_extent_mapped(struct page *page)
865 {
866 return set_folio_extent_mapped(page_folio(page));
867 }
868
set_folio_extent_mapped(struct folio * folio)869 int set_folio_extent_mapped(struct folio *folio)
870 {
871 struct btrfs_fs_info *fs_info;
872
873 ASSERT(folio->mapping);
874
875 if (folio_test_private(folio))
876 return 0;
877
878 fs_info = folio_to_fs_info(folio);
879
880 if (btrfs_is_subpage(fs_info, folio->mapping))
881 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
882
883 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
884 return 0;
885 }
886
clear_folio_extent_mapped(struct folio * folio)887 void clear_folio_extent_mapped(struct folio *folio)
888 {
889 struct btrfs_fs_info *fs_info;
890
891 ASSERT(folio->mapping);
892
893 if (!folio_test_private(folio))
894 return;
895
896 fs_info = folio_to_fs_info(folio);
897 if (btrfs_is_subpage(fs_info, folio->mapping))
898 return btrfs_detach_subpage(fs_info, folio);
899
900 folio_detach_private(folio);
901 }
902
__get_extent_map(struct inode * inode,struct folio * folio,u64 start,u64 len,struct extent_map ** em_cached)903 static struct extent_map *__get_extent_map(struct inode *inode,
904 struct folio *folio, u64 start,
905 u64 len, struct extent_map **em_cached)
906 {
907 struct extent_map *em;
908 struct extent_state *cached_state = NULL;
909
910 ASSERT(em_cached);
911
912 if (*em_cached) {
913 em = *em_cached;
914 if (extent_map_in_tree(em) && start >= em->start &&
915 start < extent_map_end(em)) {
916 refcount_inc(&em->refs);
917 return em;
918 }
919
920 free_extent_map(em);
921 *em_cached = NULL;
922 }
923
924 btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
925 em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
926 if (!IS_ERR(em)) {
927 BUG_ON(*em_cached);
928 refcount_inc(&em->refs);
929 *em_cached = em;
930 }
931 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
932
933 return em;
934 }
935 /*
936 * basic readpage implementation. Locked extent state structs are inserted
937 * into the tree that are removed when the IO is done (by the end_io
938 * handlers)
939 * XXX JDM: This needs looking at to ensure proper page locking
940 * return 0 on success, otherwise return error
941 */
btrfs_do_readpage(struct folio * folio,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)942 static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
943 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
944 {
945 struct inode *inode = folio->mapping->host;
946 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
947 u64 start = folio_pos(folio);
948 const u64 end = start + PAGE_SIZE - 1;
949 u64 cur = start;
950 u64 extent_offset;
951 u64 last_byte = i_size_read(inode);
952 u64 block_start;
953 struct extent_map *em;
954 int ret = 0;
955 size_t pg_offset = 0;
956 size_t iosize;
957 size_t blocksize = fs_info->sectorsize;
958
959 ret = set_folio_extent_mapped(folio);
960 if (ret < 0) {
961 folio_unlock(folio);
962 return ret;
963 }
964
965 if (folio->index == last_byte >> folio_shift(folio)) {
966 size_t zero_offset = offset_in_folio(folio, last_byte);
967
968 if (zero_offset) {
969 iosize = folio_size(folio) - zero_offset;
970 folio_zero_range(folio, zero_offset, iosize);
971 }
972 }
973 bio_ctrl->end_io_func = end_bbio_data_read;
974 begin_folio_read(fs_info, folio);
975 while (cur <= end) {
976 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
977 bool force_bio_submit = false;
978 u64 disk_bytenr;
979
980 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
981 if (cur >= last_byte) {
982 iosize = folio_size(folio) - pg_offset;
983 folio_zero_range(folio, pg_offset, iosize);
984 end_folio_read(folio, true, cur, iosize);
985 break;
986 }
987 em = __get_extent_map(inode, folio, cur, end - cur + 1,
988 em_cached);
989 if (IS_ERR(em)) {
990 end_folio_read(folio, false, cur, end + 1 - cur);
991 return PTR_ERR(em);
992 }
993 extent_offset = cur - em->start;
994 BUG_ON(extent_map_end(em) <= cur);
995 BUG_ON(end < cur);
996
997 compress_type = extent_map_compression(em);
998
999 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1000 iosize = ALIGN(iosize, blocksize);
1001 if (compress_type != BTRFS_COMPRESS_NONE)
1002 disk_bytenr = em->disk_bytenr;
1003 else
1004 disk_bytenr = extent_map_block_start(em) + extent_offset;
1005 block_start = extent_map_block_start(em);
1006 if (em->flags & EXTENT_FLAG_PREALLOC)
1007 block_start = EXTENT_MAP_HOLE;
1008
1009 /*
1010 * If we have a file range that points to a compressed extent
1011 * and it's followed by a consecutive file range that points
1012 * to the same compressed extent (possibly with a different
1013 * offset and/or length, so it either points to the whole extent
1014 * or only part of it), we must make sure we do not submit a
1015 * single bio to populate the folios for the 2 ranges because
1016 * this makes the compressed extent read zero out the folios
1017 * belonging to the 2nd range. Imagine the following scenario:
1018 *
1019 * File layout
1020 * [0 - 8K] [8K - 24K]
1021 * | |
1022 * | |
1023 * points to extent X, points to extent X,
1024 * offset 4K, length of 8K offset 0, length 16K
1025 *
1026 * [extent X, compressed length = 4K uncompressed length = 16K]
1027 *
1028 * If the bio to read the compressed extent covers both ranges,
1029 * it will decompress extent X into the folios belonging to the
1030 * first range and then it will stop, zeroing out the remaining
1031 * folios that belong to the other range that points to extent X.
1032 * So here we make sure we submit 2 bios, one for the first
1033 * range and another one for the third range. Both will target
1034 * the same physical extent from disk, but we can't currently
1035 * make the compressed bio endio callback populate the folios
1036 * for both ranges because each compressed bio is tightly
1037 * coupled with a single extent map, and each range can have
1038 * an extent map with a different offset value relative to the
1039 * uncompressed data of our extent and different lengths. This
1040 * is a corner case so we prioritize correctness over
1041 * non-optimal behavior (submitting 2 bios for the same extent).
1042 */
1043 if (compress_type != BTRFS_COMPRESS_NONE &&
1044 prev_em_start && *prev_em_start != (u64)-1 &&
1045 *prev_em_start != em->start)
1046 force_bio_submit = true;
1047
1048 if (prev_em_start)
1049 *prev_em_start = em->start;
1050
1051 free_extent_map(em);
1052 em = NULL;
1053
1054 /* we've found a hole, just zero and go on */
1055 if (block_start == EXTENT_MAP_HOLE) {
1056 folio_zero_range(folio, pg_offset, iosize);
1057
1058 end_folio_read(folio, true, cur, iosize);
1059 cur = cur + iosize;
1060 pg_offset += iosize;
1061 continue;
1062 }
1063 /* the get_extent function already copied into the folio */
1064 if (block_start == EXTENT_MAP_INLINE) {
1065 end_folio_read(folio, true, cur, iosize);
1066 cur = cur + iosize;
1067 pg_offset += iosize;
1068 continue;
1069 }
1070
1071 if (bio_ctrl->compress_type != compress_type) {
1072 submit_one_bio(bio_ctrl);
1073 bio_ctrl->compress_type = compress_type;
1074 }
1075
1076 if (force_bio_submit)
1077 submit_one_bio(bio_ctrl);
1078 submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1079 pg_offset);
1080 cur = cur + iosize;
1081 pg_offset += iosize;
1082 }
1083
1084 return 0;
1085 }
1086
btrfs_read_folio(struct file * file,struct folio * folio)1087 int btrfs_read_folio(struct file *file, struct folio *folio)
1088 {
1089 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1090 struct extent_map *em_cached = NULL;
1091 int ret;
1092
1093 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1094 free_extent_map(em_cached);
1095
1096 /*
1097 * If btrfs_do_readpage() failed we will want to submit the assembled
1098 * bio to do the cleanup.
1099 */
1100 submit_one_bio(&bio_ctrl);
1101 return ret;
1102 }
1103
1104 /*
1105 * helper for extent_writepage(), doing all of the delayed allocation setup.
1106 *
1107 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1108 * to write the page (copy into inline extent). In this case the IO has
1109 * been started and the page is already unlocked.
1110 *
1111 * This returns 0 if all went well (page still locked)
1112 * This returns < 0 if there were errors (page still locked)
1113 */
writepage_delalloc(struct btrfs_inode * inode,struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1114 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1115 struct folio *folio,
1116 struct btrfs_bio_ctrl *bio_ctrl)
1117 {
1118 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1119 struct writeback_control *wbc = bio_ctrl->wbc;
1120 const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1121 const u64 page_start = folio_pos(folio);
1122 const u64 page_end = page_start + folio_size(folio) - 1;
1123 /*
1124 * Save the last found delalloc end. As the delalloc end can go beyond
1125 * page boundary, thus we cannot rely on subpage bitmap to locate the
1126 * last delalloc end.
1127 */
1128 u64 last_delalloc_end = 0;
1129 u64 delalloc_start = page_start;
1130 u64 delalloc_end = page_end;
1131 u64 delalloc_to_write = 0;
1132 int ret = 0;
1133
1134 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1135 if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1136 ASSERT(fs_info->sectors_per_page > 1);
1137 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1138 } else {
1139 bio_ctrl->submit_bitmap = 1;
1140 }
1141
1142 /* Lock all (subpage) delalloc ranges inside the folio first. */
1143 while (delalloc_start < page_end) {
1144 delalloc_end = page_end;
1145 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1146 &delalloc_start, &delalloc_end)) {
1147 delalloc_start = delalloc_end + 1;
1148 continue;
1149 }
1150 btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
1151 min(delalloc_end, page_end) + 1 -
1152 delalloc_start);
1153 last_delalloc_end = delalloc_end;
1154 delalloc_start = delalloc_end + 1;
1155 }
1156 delalloc_start = page_start;
1157
1158 if (!last_delalloc_end)
1159 goto out;
1160
1161 /* Run the delalloc ranges for the above locked ranges. */
1162 while (delalloc_start < page_end) {
1163 u64 found_start;
1164 u32 found_len;
1165 bool found;
1166
1167 if (!is_subpage) {
1168 /*
1169 * For non-subpage case, the found delalloc range must
1170 * cover this folio and there must be only one locked
1171 * delalloc range.
1172 */
1173 found_start = page_start;
1174 found_len = last_delalloc_end + 1 - found_start;
1175 found = true;
1176 } else {
1177 found = btrfs_subpage_find_writer_locked(fs_info, folio,
1178 delalloc_start, &found_start, &found_len);
1179 }
1180 if (!found)
1181 break;
1182 /*
1183 * The subpage range covers the last sector, the delalloc range may
1184 * end beyond the folio boundary, use the saved delalloc_end
1185 * instead.
1186 */
1187 if (found_start + found_len >= page_end)
1188 found_len = last_delalloc_end + 1 - found_start;
1189
1190 if (ret >= 0) {
1191 /* No errors hit so far, run the current delalloc range. */
1192 ret = btrfs_run_delalloc_range(inode, folio,
1193 found_start,
1194 found_start + found_len - 1,
1195 wbc);
1196 } else {
1197 /*
1198 * We've hit an error during previous delalloc range,
1199 * have to cleanup the remaining locked ranges.
1200 */
1201 unlock_extent(&inode->io_tree, found_start,
1202 found_start + found_len - 1, NULL);
1203 __unlock_for_delalloc(&inode->vfs_inode, folio,
1204 found_start,
1205 found_start + found_len - 1);
1206 }
1207
1208 /*
1209 * We have some ranges that's going to be submitted asynchronously
1210 * (compression or inline). These range have their own control
1211 * on when to unlock the pages. We should not touch them
1212 * anymore, so clear the range from the submission bitmap.
1213 */
1214 if (ret > 0) {
1215 unsigned int start_bit = (found_start - page_start) >>
1216 fs_info->sectorsize_bits;
1217 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1218 page_start) >> fs_info->sectorsize_bits;
1219 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1220 }
1221 /*
1222 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1223 * thus for the last range, we cannot touch the folio anymore.
1224 */
1225 if (found_start + found_len >= last_delalloc_end + 1)
1226 break;
1227
1228 delalloc_start = found_start + found_len;
1229 }
1230 if (ret < 0)
1231 return ret;
1232 out:
1233 if (last_delalloc_end)
1234 delalloc_end = last_delalloc_end;
1235 else
1236 delalloc_end = page_end;
1237 /*
1238 * delalloc_end is already one less than the total length, so
1239 * we don't subtract one from PAGE_SIZE
1240 */
1241 delalloc_to_write +=
1242 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1243
1244 /*
1245 * If all ranges are submitted asynchronously, we just need to account
1246 * for them here.
1247 */
1248 if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1249 wbc->nr_to_write -= delalloc_to_write;
1250 return 1;
1251 }
1252
1253 if (wbc->nr_to_write < delalloc_to_write) {
1254 int thresh = 8192;
1255
1256 if (delalloc_to_write < thresh * 2)
1257 thresh = delalloc_to_write;
1258 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1259 thresh);
1260 }
1261
1262 return 0;
1263 }
1264
1265 /*
1266 * Return 0 if we have submitted or queued the sector for submission.
1267 * Return <0 for critical errors.
1268 *
1269 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1270 */
submit_one_sector(struct btrfs_inode * inode,struct folio * folio,u64 filepos,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1271 static int submit_one_sector(struct btrfs_inode *inode,
1272 struct folio *folio,
1273 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1274 loff_t i_size)
1275 {
1276 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1277 struct extent_map *em;
1278 u64 block_start;
1279 u64 disk_bytenr;
1280 u64 extent_offset;
1281 u64 em_end;
1282 const u32 sectorsize = fs_info->sectorsize;
1283
1284 ASSERT(IS_ALIGNED(filepos, sectorsize));
1285
1286 /* @filepos >= i_size case should be handled by the caller. */
1287 ASSERT(filepos < i_size);
1288
1289 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1290 if (IS_ERR(em))
1291 return PTR_ERR_OR_ZERO(em);
1292
1293 extent_offset = filepos - em->start;
1294 em_end = extent_map_end(em);
1295 ASSERT(filepos <= em_end);
1296 ASSERT(IS_ALIGNED(em->start, sectorsize));
1297 ASSERT(IS_ALIGNED(em->len, sectorsize));
1298
1299 block_start = extent_map_block_start(em);
1300 disk_bytenr = extent_map_block_start(em) + extent_offset;
1301
1302 ASSERT(!extent_map_is_compressed(em));
1303 ASSERT(block_start != EXTENT_MAP_HOLE);
1304 ASSERT(block_start != EXTENT_MAP_INLINE);
1305
1306 free_extent_map(em);
1307 em = NULL;
1308
1309 btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1);
1310 /*
1311 * Above call should set the whole folio with writeback flag, even
1312 * just for a single subpage sector.
1313 * As long as the folio is properly locked and the range is correct,
1314 * we should always get the folio with writeback flag.
1315 */
1316 ASSERT(folio_test_writeback(folio));
1317
1318 /*
1319 * Although the PageDirty bit is cleared before entering this
1320 * function, subpage dirty bit is not cleared.
1321 * So clear subpage dirty bit here so next time we won't submit
1322 * folio for range already written to disk.
1323 */
1324 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1325 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1326 sectorsize, filepos - folio_pos(folio));
1327 return 0;
1328 }
1329
1330 /*
1331 * Helper for extent_writepage(). This calls the writepage start hooks,
1332 * and does the loop to map the page into extents and bios.
1333 *
1334 * We return 1 if the IO is started and the page is unlocked,
1335 * 0 if all went well (page still locked)
1336 * < 0 if there were errors (page still locked)
1337 */
extent_writepage_io(struct btrfs_inode * inode,struct folio * folio,u64 start,u32 len,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1338 static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1339 struct folio *folio,
1340 u64 start, u32 len,
1341 struct btrfs_bio_ctrl *bio_ctrl,
1342 loff_t i_size)
1343 {
1344 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1345 unsigned long range_bitmap = 0;
1346 bool submitted_io = false;
1347 const u64 folio_start = folio_pos(folio);
1348 u64 cur;
1349 int bit;
1350 int ret = 0;
1351
1352 ASSERT(start >= folio_start &&
1353 start + len <= folio_start + folio_size(folio));
1354
1355 ret = btrfs_writepage_cow_fixup(folio);
1356 if (ret) {
1357 /* Fixup worker will requeue */
1358 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1359 folio_unlock(folio);
1360 return 1;
1361 }
1362
1363 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1364 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1365 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1366 fs_info->sectors_per_page);
1367
1368 bio_ctrl->end_io_func = end_bbio_data_write;
1369
1370 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1371 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1372
1373 if (cur >= i_size) {
1374 btrfs_mark_ordered_io_finished(inode, folio, cur,
1375 start + len - cur, true);
1376 /*
1377 * This range is beyond i_size, thus we don't need to
1378 * bother writing back.
1379 * But we still need to clear the dirty subpage bit, or
1380 * the next time the folio gets dirtied, we will try to
1381 * writeback the sectors with subpage dirty bits,
1382 * causing writeback without ordered extent.
1383 */
1384 btrfs_folio_clear_dirty(fs_info, folio, cur,
1385 start + len - cur);
1386 break;
1387 }
1388 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1389 if (ret < 0)
1390 goto out;
1391 submitted_io = true;
1392 }
1393
1394 btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
1395 out:
1396 /*
1397 * If we didn't submitted any sector (>= i_size), folio dirty get
1398 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1399 * by folio_start_writeback() if the folio is not dirty).
1400 *
1401 * Here we set writeback and clear for the range. If the full folio
1402 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1403 */
1404 if (!submitted_io) {
1405 btrfs_folio_set_writeback(fs_info, folio, start, len);
1406 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1407 }
1408 return ret;
1409 }
1410
1411 /*
1412 * the writepage semantics are similar to regular writepage. extent
1413 * records are inserted to lock ranges in the tree, and as dirty areas
1414 * are found, they are marked writeback. Then the lock bits are removed
1415 * and the end_io handler clears the writeback ranges
1416 *
1417 * Return 0 if everything goes well.
1418 * Return <0 for error.
1419 */
extent_writepage(struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1420 static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1421 {
1422 struct inode *inode = folio->mapping->host;
1423 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1424 const u64 page_start = folio_pos(folio);
1425 int ret;
1426 size_t pg_offset;
1427 loff_t i_size = i_size_read(inode);
1428 unsigned long end_index = i_size >> PAGE_SHIFT;
1429
1430 trace_extent_writepage(folio, inode, bio_ctrl->wbc);
1431
1432 WARN_ON(!folio_test_locked(folio));
1433
1434 pg_offset = offset_in_folio(folio, i_size);
1435 if (folio->index > end_index ||
1436 (folio->index == end_index && !pg_offset)) {
1437 folio_invalidate(folio, 0, folio_size(folio));
1438 folio_unlock(folio);
1439 return 0;
1440 }
1441
1442 if (folio->index == end_index)
1443 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1444
1445 /*
1446 * Default to unlock the whole folio.
1447 * The proper bitmap can only be initialized until writepage_delalloc().
1448 */
1449 bio_ctrl->submit_bitmap = (unsigned long)-1;
1450 ret = set_folio_extent_mapped(folio);
1451 if (ret < 0)
1452 goto done;
1453
1454 ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
1455 if (ret == 1)
1456 return 0;
1457 if (ret)
1458 goto done;
1459
1460 ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
1461 PAGE_SIZE, bio_ctrl, i_size);
1462 if (ret == 1)
1463 return 0;
1464
1465 bio_ctrl->wbc->nr_to_write--;
1466
1467 done:
1468 if (ret) {
1469 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
1470 page_start, PAGE_SIZE, !ret);
1471 mapping_set_error(folio->mapping, ret);
1472 }
1473
1474 /*
1475 * Only unlock ranges that are submitted. As there can be some async
1476 * submitted ranges inside the folio.
1477 */
1478 btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1479 ASSERT(ret <= 0);
1480 return ret;
1481 }
1482
wait_on_extent_buffer_writeback(struct extent_buffer * eb)1483 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1484 {
1485 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1486 TASK_UNINTERRUPTIBLE);
1487 }
1488
1489 /*
1490 * Lock extent buffer status and pages for writeback.
1491 *
1492 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1493 * extent buffer is not dirty)
1494 * Return %true is the extent buffer is submitted to bio.
1495 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1496 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1497 struct writeback_control *wbc)
1498 {
1499 struct btrfs_fs_info *fs_info = eb->fs_info;
1500 bool ret = false;
1501
1502 btrfs_tree_lock(eb);
1503 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1504 btrfs_tree_unlock(eb);
1505 if (wbc->sync_mode != WB_SYNC_ALL)
1506 return false;
1507 wait_on_extent_buffer_writeback(eb);
1508 btrfs_tree_lock(eb);
1509 }
1510
1511 /*
1512 * We need to do this to prevent races in people who check if the eb is
1513 * under IO since we can end up having no IO bits set for a short period
1514 * of time.
1515 */
1516 spin_lock(&eb->refs_lock);
1517 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1518 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1519 spin_unlock(&eb->refs_lock);
1520 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1521 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1522 -eb->len,
1523 fs_info->dirty_metadata_batch);
1524 ret = true;
1525 } else {
1526 spin_unlock(&eb->refs_lock);
1527 }
1528 btrfs_tree_unlock(eb);
1529 return ret;
1530 }
1531
set_btree_ioerr(struct extent_buffer * eb)1532 static void set_btree_ioerr(struct extent_buffer *eb)
1533 {
1534 struct btrfs_fs_info *fs_info = eb->fs_info;
1535
1536 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1537
1538 /*
1539 * A read may stumble upon this buffer later, make sure that it gets an
1540 * error and knows there was an error.
1541 */
1542 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1543
1544 /*
1545 * We need to set the mapping with the io error as well because a write
1546 * error will flip the file system readonly, and then syncfs() will
1547 * return a 0 because we are readonly if we don't modify the err seq for
1548 * the superblock.
1549 */
1550 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1551
1552 /*
1553 * If writeback for a btree extent that doesn't belong to a log tree
1554 * failed, increment the counter transaction->eb_write_errors.
1555 * We do this because while the transaction is running and before it's
1556 * committing (when we call filemap_fdata[write|wait]_range against
1557 * the btree inode), we might have
1558 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1559 * returns an error or an error happens during writeback, when we're
1560 * committing the transaction we wouldn't know about it, since the pages
1561 * can be no longer dirty nor marked anymore for writeback (if a
1562 * subsequent modification to the extent buffer didn't happen before the
1563 * transaction commit), which makes filemap_fdata[write|wait]_range not
1564 * able to find the pages which contain errors at transaction
1565 * commit time. So if this happens we must abort the transaction,
1566 * otherwise we commit a super block with btree roots that point to
1567 * btree nodes/leafs whose content on disk is invalid - either garbage
1568 * or the content of some node/leaf from a past generation that got
1569 * cowed or deleted and is no longer valid.
1570 *
1571 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1572 * not be enough - we need to distinguish between log tree extents vs
1573 * non-log tree extents, and the next filemap_fdatawait_range() call
1574 * will catch and clear such errors in the mapping - and that call might
1575 * be from a log sync and not from a transaction commit. Also, checking
1576 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1577 * not done and would not be reliable - the eb might have been released
1578 * from memory and reading it back again means that flag would not be
1579 * set (since it's a runtime flag, not persisted on disk).
1580 *
1581 * Using the flags below in the btree inode also makes us achieve the
1582 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1583 * writeback for all dirty pages and before filemap_fdatawait_range()
1584 * is called, the writeback for all dirty pages had already finished
1585 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1586 * filemap_fdatawait_range() would return success, as it could not know
1587 * that writeback errors happened (the pages were no longer tagged for
1588 * writeback).
1589 */
1590 switch (eb->log_index) {
1591 case -1:
1592 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1593 break;
1594 case 0:
1595 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1596 break;
1597 case 1:
1598 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1599 break;
1600 default:
1601 BUG(); /* unexpected, logic error */
1602 }
1603 }
1604
1605 /*
1606 * The endio specific version which won't touch any unsafe spinlock in endio
1607 * context.
1608 */
find_extent_buffer_nolock(const struct btrfs_fs_info * fs_info,u64 start)1609 static struct extent_buffer *find_extent_buffer_nolock(
1610 const struct btrfs_fs_info *fs_info, u64 start)
1611 {
1612 struct extent_buffer *eb;
1613
1614 rcu_read_lock();
1615 eb = radix_tree_lookup(&fs_info->buffer_radix,
1616 start >> fs_info->sectorsize_bits);
1617 if (eb && atomic_inc_not_zero(&eb->refs)) {
1618 rcu_read_unlock();
1619 return eb;
1620 }
1621 rcu_read_unlock();
1622 return NULL;
1623 }
1624
end_bbio_meta_write(struct btrfs_bio * bbio)1625 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1626 {
1627 struct extent_buffer *eb = bbio->private;
1628 struct btrfs_fs_info *fs_info = eb->fs_info;
1629 bool uptodate = !bbio->bio.bi_status;
1630 struct folio_iter fi;
1631 u32 bio_offset = 0;
1632
1633 if (!uptodate)
1634 set_btree_ioerr(eb);
1635
1636 bio_for_each_folio_all(fi, &bbio->bio) {
1637 u64 start = eb->start + bio_offset;
1638 struct folio *folio = fi.folio;
1639 u32 len = fi.length;
1640
1641 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1642 bio_offset += len;
1643 }
1644
1645 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1646 smp_mb__after_atomic();
1647 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1648
1649 bio_put(&bbio->bio);
1650 }
1651
prepare_eb_write(struct extent_buffer * eb)1652 static void prepare_eb_write(struct extent_buffer *eb)
1653 {
1654 u32 nritems;
1655 unsigned long start;
1656 unsigned long end;
1657
1658 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1659
1660 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1661 nritems = btrfs_header_nritems(eb);
1662 if (btrfs_header_level(eb) > 0) {
1663 end = btrfs_node_key_ptr_offset(eb, nritems);
1664 memzero_extent_buffer(eb, end, eb->len - end);
1665 } else {
1666 /*
1667 * Leaf:
1668 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1669 */
1670 start = btrfs_item_nr_offset(eb, nritems);
1671 end = btrfs_item_nr_offset(eb, 0);
1672 if (nritems == 0)
1673 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1674 else
1675 end += btrfs_item_offset(eb, nritems - 1);
1676 memzero_extent_buffer(eb, start, end - start);
1677 }
1678 }
1679
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)1680 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1681 struct writeback_control *wbc)
1682 {
1683 struct btrfs_fs_info *fs_info = eb->fs_info;
1684 struct btrfs_bio *bbio;
1685
1686 prepare_eb_write(eb);
1687
1688 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1689 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1690 eb->fs_info, end_bbio_meta_write, eb);
1691 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1692 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1693 wbc_init_bio(wbc, &bbio->bio);
1694 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1695 bbio->file_offset = eb->start;
1696 if (fs_info->nodesize < PAGE_SIZE) {
1697 struct folio *folio = eb->folios[0];
1698 bool ret;
1699
1700 folio_lock(folio);
1701 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1702 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1703 eb->len)) {
1704 folio_clear_dirty_for_io(folio);
1705 wbc->nr_to_write--;
1706 }
1707 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1708 eb->start - folio_pos(folio));
1709 ASSERT(ret);
1710 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1711 folio_unlock(folio);
1712 } else {
1713 int num_folios = num_extent_folios(eb);
1714
1715 for (int i = 0; i < num_folios; i++) {
1716 struct folio *folio = eb->folios[i];
1717 bool ret;
1718
1719 folio_lock(folio);
1720 folio_clear_dirty_for_io(folio);
1721 folio_start_writeback(folio);
1722 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1723 ASSERT(ret);
1724 wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1725 eb->folio_size);
1726 wbc->nr_to_write -= folio_nr_pages(folio);
1727 folio_unlock(folio);
1728 }
1729 }
1730 btrfs_submit_bbio(bbio, 0);
1731 }
1732
1733 /*
1734 * Submit one subpage btree page.
1735 *
1736 * The main difference to submit_eb_page() is:
1737 * - Page locking
1738 * For subpage, we don't rely on page locking at all.
1739 *
1740 * - Flush write bio
1741 * We only flush bio if we may be unable to fit current extent buffers into
1742 * current bio.
1743 *
1744 * Return >=0 for the number of submitted extent buffers.
1745 * Return <0 for fatal error.
1746 */
submit_eb_subpage(struct folio * folio,struct writeback_control * wbc)1747 static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1748 {
1749 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1750 int submitted = 0;
1751 u64 folio_start = folio_pos(folio);
1752 int bit_start = 0;
1753 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1754
1755 /* Lock and write each dirty extent buffers in the range */
1756 while (bit_start < fs_info->sectors_per_page) {
1757 struct btrfs_subpage *subpage = folio_get_private(folio);
1758 struct extent_buffer *eb;
1759 unsigned long flags;
1760 u64 start;
1761
1762 /*
1763 * Take private lock to ensure the subpage won't be detached
1764 * in the meantime.
1765 */
1766 spin_lock(&folio->mapping->i_private_lock);
1767 if (!folio_test_private(folio)) {
1768 spin_unlock(&folio->mapping->i_private_lock);
1769 break;
1770 }
1771 spin_lock_irqsave(&subpage->lock, flags);
1772 if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1773 subpage->bitmaps)) {
1774 spin_unlock_irqrestore(&subpage->lock, flags);
1775 spin_unlock(&folio->mapping->i_private_lock);
1776 bit_start++;
1777 continue;
1778 }
1779
1780 start = folio_start + bit_start * fs_info->sectorsize;
1781 bit_start += sectors_per_node;
1782
1783 /*
1784 * Here we just want to grab the eb without touching extra
1785 * spin locks, so call find_extent_buffer_nolock().
1786 */
1787 eb = find_extent_buffer_nolock(fs_info, start);
1788 spin_unlock_irqrestore(&subpage->lock, flags);
1789 spin_unlock(&folio->mapping->i_private_lock);
1790
1791 /*
1792 * The eb has already reached 0 refs thus find_extent_buffer()
1793 * doesn't return it. We don't need to write back such eb
1794 * anyway.
1795 */
1796 if (!eb)
1797 continue;
1798
1799 if (lock_extent_buffer_for_io(eb, wbc)) {
1800 write_one_eb(eb, wbc);
1801 submitted++;
1802 }
1803 free_extent_buffer(eb);
1804 }
1805 return submitted;
1806 }
1807
1808 /*
1809 * Submit all page(s) of one extent buffer.
1810 *
1811 * @page: the page of one extent buffer
1812 * @eb_context: to determine if we need to submit this page, if current page
1813 * belongs to this eb, we don't need to submit
1814 *
1815 * The caller should pass each page in their bytenr order, and here we use
1816 * @eb_context to determine if we have submitted pages of one extent buffer.
1817 *
1818 * If we have, we just skip until we hit a new page that doesn't belong to
1819 * current @eb_context.
1820 *
1821 * If not, we submit all the page(s) of the extent buffer.
1822 *
1823 * Return >0 if we have submitted the extent buffer successfully.
1824 * Return 0 if we don't need to submit the page, as it's already submitted by
1825 * previous call.
1826 * Return <0 for fatal error.
1827 */
submit_eb_page(struct folio * folio,struct btrfs_eb_write_context * ctx)1828 static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1829 {
1830 struct writeback_control *wbc = ctx->wbc;
1831 struct address_space *mapping = folio->mapping;
1832 struct extent_buffer *eb;
1833 int ret;
1834
1835 if (!folio_test_private(folio))
1836 return 0;
1837
1838 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1839 return submit_eb_subpage(folio, wbc);
1840
1841 spin_lock(&mapping->i_private_lock);
1842 if (!folio_test_private(folio)) {
1843 spin_unlock(&mapping->i_private_lock);
1844 return 0;
1845 }
1846
1847 eb = folio_get_private(folio);
1848
1849 /*
1850 * Shouldn't happen and normally this would be a BUG_ON but no point
1851 * crashing the machine for something we can survive anyway.
1852 */
1853 if (WARN_ON(!eb)) {
1854 spin_unlock(&mapping->i_private_lock);
1855 return 0;
1856 }
1857
1858 if (eb == ctx->eb) {
1859 spin_unlock(&mapping->i_private_lock);
1860 return 0;
1861 }
1862 ret = atomic_inc_not_zero(&eb->refs);
1863 spin_unlock(&mapping->i_private_lock);
1864 if (!ret)
1865 return 0;
1866
1867 ctx->eb = eb;
1868
1869 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1870 if (ret) {
1871 if (ret == -EBUSY)
1872 ret = 0;
1873 free_extent_buffer(eb);
1874 return ret;
1875 }
1876
1877 if (!lock_extent_buffer_for_io(eb, wbc)) {
1878 free_extent_buffer(eb);
1879 return 0;
1880 }
1881 /* Implies write in zoned mode. */
1882 if (ctx->zoned_bg) {
1883 /* Mark the last eb in the block group. */
1884 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1885 ctx->zoned_bg->meta_write_pointer += eb->len;
1886 }
1887 write_one_eb(eb, wbc);
1888 free_extent_buffer(eb);
1889 return 1;
1890 }
1891
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)1892 int btree_write_cache_pages(struct address_space *mapping,
1893 struct writeback_control *wbc)
1894 {
1895 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1896 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1897 int ret = 0;
1898 int done = 0;
1899 int nr_to_write_done = 0;
1900 struct folio_batch fbatch;
1901 unsigned int nr_folios;
1902 pgoff_t index;
1903 pgoff_t end; /* Inclusive */
1904 int scanned = 0;
1905 xa_mark_t tag;
1906
1907 folio_batch_init(&fbatch);
1908 if (wbc->range_cyclic) {
1909 index = mapping->writeback_index; /* Start from prev offset */
1910 end = -1;
1911 /*
1912 * Start from the beginning does not need to cycle over the
1913 * range, mark it as scanned.
1914 */
1915 scanned = (index == 0);
1916 } else {
1917 index = wbc->range_start >> PAGE_SHIFT;
1918 end = wbc->range_end >> PAGE_SHIFT;
1919 scanned = 1;
1920 }
1921 if (wbc->sync_mode == WB_SYNC_ALL)
1922 tag = PAGECACHE_TAG_TOWRITE;
1923 else
1924 tag = PAGECACHE_TAG_DIRTY;
1925 btrfs_zoned_meta_io_lock(fs_info);
1926 retry:
1927 if (wbc->sync_mode == WB_SYNC_ALL)
1928 tag_pages_for_writeback(mapping, index, end);
1929 while (!done && !nr_to_write_done && (index <= end) &&
1930 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1931 tag, &fbatch))) {
1932 unsigned i;
1933
1934 for (i = 0; i < nr_folios; i++) {
1935 struct folio *folio = fbatch.folios[i];
1936
1937 ret = submit_eb_page(folio, &ctx);
1938 if (ret == 0)
1939 continue;
1940 if (ret < 0) {
1941 done = 1;
1942 break;
1943 }
1944
1945 /*
1946 * the filesystem may choose to bump up nr_to_write.
1947 * We have to make sure to honor the new nr_to_write
1948 * at any time
1949 */
1950 nr_to_write_done = wbc->nr_to_write <= 0;
1951 }
1952 folio_batch_release(&fbatch);
1953 cond_resched();
1954 }
1955 if (!scanned && !done) {
1956 /*
1957 * We hit the last page and there is more work to be done: wrap
1958 * back to the start of the file
1959 */
1960 scanned = 1;
1961 index = 0;
1962 goto retry;
1963 }
1964 /*
1965 * If something went wrong, don't allow any metadata write bio to be
1966 * submitted.
1967 *
1968 * This would prevent use-after-free if we had dirty pages not
1969 * cleaned up, which can still happen by fuzzed images.
1970 *
1971 * - Bad extent tree
1972 * Allowing existing tree block to be allocated for other trees.
1973 *
1974 * - Log tree operations
1975 * Exiting tree blocks get allocated to log tree, bumps its
1976 * generation, then get cleaned in tree re-balance.
1977 * Such tree block will not be written back, since it's clean,
1978 * thus no WRITTEN flag set.
1979 * And after log writes back, this tree block is not traced by
1980 * any dirty extent_io_tree.
1981 *
1982 * - Offending tree block gets re-dirtied from its original owner
1983 * Since it has bumped generation, no WRITTEN flag, it can be
1984 * reused without COWing. This tree block will not be traced
1985 * by btrfs_transaction::dirty_pages.
1986 *
1987 * Now such dirty tree block will not be cleaned by any dirty
1988 * extent io tree. Thus we don't want to submit such wild eb
1989 * if the fs already has error.
1990 *
1991 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
1992 * were submitted. Reset it to 0 to avoid false alerts for the caller.
1993 */
1994 if (ret > 0)
1995 ret = 0;
1996 if (!ret && BTRFS_FS_ERROR(fs_info))
1997 ret = -EROFS;
1998
1999 if (ctx.zoned_bg)
2000 btrfs_put_block_group(ctx.zoned_bg);
2001 btrfs_zoned_meta_io_unlock(fs_info);
2002 return ret;
2003 }
2004
2005 /*
2006 * Walk the list of dirty pages of the given address space and write all of them.
2007 *
2008 * @mapping: address space structure to write
2009 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2010 * @bio_ctrl: holds context for the write, namely the bio
2011 *
2012 * If a page is already under I/O, write_cache_pages() skips it, even
2013 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2014 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2015 * and msync() need to guarantee that all the data which was dirty at the time
2016 * the call was made get new I/O started against them. If wbc->sync_mode is
2017 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2018 * existing IO to complete.
2019 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)2020 static int extent_write_cache_pages(struct address_space *mapping,
2021 struct btrfs_bio_ctrl *bio_ctrl)
2022 {
2023 struct writeback_control *wbc = bio_ctrl->wbc;
2024 struct inode *inode = mapping->host;
2025 int ret = 0;
2026 int done = 0;
2027 int nr_to_write_done = 0;
2028 struct folio_batch fbatch;
2029 unsigned int nr_folios;
2030 pgoff_t index;
2031 pgoff_t end; /* Inclusive */
2032 pgoff_t done_index;
2033 int range_whole = 0;
2034 int scanned = 0;
2035 xa_mark_t tag;
2036
2037 /*
2038 * We have to hold onto the inode so that ordered extents can do their
2039 * work when the IO finishes. The alternative to this is failing to add
2040 * an ordered extent if the igrab() fails there and that is a huge pain
2041 * to deal with, so instead just hold onto the inode throughout the
2042 * writepages operation. If it fails here we are freeing up the inode
2043 * anyway and we'd rather not waste our time writing out stuff that is
2044 * going to be truncated anyway.
2045 */
2046 if (!igrab(inode))
2047 return 0;
2048
2049 folio_batch_init(&fbatch);
2050 if (wbc->range_cyclic) {
2051 index = mapping->writeback_index; /* Start from prev offset */
2052 end = -1;
2053 /*
2054 * Start from the beginning does not need to cycle over the
2055 * range, mark it as scanned.
2056 */
2057 scanned = (index == 0);
2058 } else {
2059 index = wbc->range_start >> PAGE_SHIFT;
2060 end = wbc->range_end >> PAGE_SHIFT;
2061 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2062 range_whole = 1;
2063 scanned = 1;
2064 }
2065
2066 /*
2067 * We do the tagged writepage as long as the snapshot flush bit is set
2068 * and we are the first one who do the filemap_flush() on this inode.
2069 *
2070 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2071 * not race in and drop the bit.
2072 */
2073 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2074 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2075 &BTRFS_I(inode)->runtime_flags))
2076 wbc->tagged_writepages = 1;
2077
2078 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2079 tag = PAGECACHE_TAG_TOWRITE;
2080 else
2081 tag = PAGECACHE_TAG_DIRTY;
2082 retry:
2083 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2084 tag_pages_for_writeback(mapping, index, end);
2085 done_index = index;
2086 while (!done && !nr_to_write_done && (index <= end) &&
2087 (nr_folios = filemap_get_folios_tag(mapping, &index,
2088 end, tag, &fbatch))) {
2089 unsigned i;
2090
2091 for (i = 0; i < nr_folios; i++) {
2092 struct folio *folio = fbatch.folios[i];
2093
2094 done_index = folio_next_index(folio);
2095 /*
2096 * At this point we hold neither the i_pages lock nor
2097 * the page lock: the page may be truncated or
2098 * invalidated (changing page->mapping to NULL),
2099 * or even swizzled back from swapper_space to
2100 * tmpfs file mapping
2101 */
2102 if (!folio_trylock(folio)) {
2103 submit_write_bio(bio_ctrl, 0);
2104 folio_lock(folio);
2105 }
2106
2107 if (unlikely(folio->mapping != mapping)) {
2108 folio_unlock(folio);
2109 continue;
2110 }
2111
2112 if (!folio_test_dirty(folio)) {
2113 /* Someone wrote it for us. */
2114 folio_unlock(folio);
2115 continue;
2116 }
2117
2118 if (wbc->sync_mode != WB_SYNC_NONE) {
2119 if (folio_test_writeback(folio))
2120 submit_write_bio(bio_ctrl, 0);
2121 folio_wait_writeback(folio);
2122 }
2123
2124 if (folio_test_writeback(folio) ||
2125 !folio_clear_dirty_for_io(folio)) {
2126 folio_unlock(folio);
2127 continue;
2128 }
2129
2130 ret = extent_writepage(folio, bio_ctrl);
2131 if (ret < 0) {
2132 done = 1;
2133 break;
2134 }
2135
2136 /*
2137 * The filesystem may choose to bump up nr_to_write.
2138 * We have to make sure to honor the new nr_to_write
2139 * at any time.
2140 */
2141 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2142 wbc->nr_to_write <= 0);
2143 }
2144 folio_batch_release(&fbatch);
2145 cond_resched();
2146 }
2147 if (!scanned && !done) {
2148 /*
2149 * We hit the last page and there is more work to be done: wrap
2150 * back to the start of the file
2151 */
2152 scanned = 1;
2153 index = 0;
2154
2155 /*
2156 * If we're looping we could run into a page that is locked by a
2157 * writer and that writer could be waiting on writeback for a
2158 * page in our current bio, and thus deadlock, so flush the
2159 * write bio here.
2160 */
2161 submit_write_bio(bio_ctrl, 0);
2162 goto retry;
2163 }
2164
2165 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2166 mapping->writeback_index = done_index;
2167
2168 btrfs_add_delayed_iput(BTRFS_I(inode));
2169 return ret;
2170 }
2171
2172 /*
2173 * Submit the pages in the range to bio for call sites which delalloc range has
2174 * already been ran (aka, ordered extent inserted) and all pages are still
2175 * locked.
2176 */
extent_write_locked_range(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2177 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2178 u64 start, u64 end, struct writeback_control *wbc,
2179 bool pages_dirty)
2180 {
2181 bool found_error = false;
2182 int ret = 0;
2183 struct address_space *mapping = inode->i_mapping;
2184 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2185 const u32 sectorsize = fs_info->sectorsize;
2186 loff_t i_size = i_size_read(inode);
2187 u64 cur = start;
2188 struct btrfs_bio_ctrl bio_ctrl = {
2189 .wbc = wbc,
2190 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2191 };
2192
2193 if (wbc->no_cgroup_owner)
2194 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2195
2196 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2197
2198 while (cur <= end) {
2199 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2200 u32 cur_len = cur_end + 1 - cur;
2201 struct folio *folio;
2202
2203 folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0);
2204
2205 /*
2206 * This shouldn't happen, the pages are pinned and locked, this
2207 * code is just in case, but shouldn't actually be run.
2208 */
2209 if (IS_ERR(folio)) {
2210 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2211 cur, cur_len, false);
2212 mapping_set_error(mapping, PTR_ERR(folio));
2213 cur = cur_end + 1;
2214 continue;
2215 }
2216
2217 ASSERT(folio_test_locked(folio));
2218 if (pages_dirty && folio != locked_folio)
2219 ASSERT(folio_test_dirty(folio));
2220
2221 /*
2222 * Set the submission bitmap to submit all sectors.
2223 * extent_writepage_io() will do the truncation correctly.
2224 */
2225 bio_ctrl.submit_bitmap = (unsigned long)-1;
2226 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2227 &bio_ctrl, i_size);
2228 if (ret == 1)
2229 goto next_page;
2230
2231 if (ret) {
2232 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
2233 cur, cur_len, !ret);
2234 mapping_set_error(mapping, ret);
2235 }
2236 btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
2237 if (ret < 0)
2238 found_error = true;
2239 next_page:
2240 folio_put(folio);
2241 cur = cur_end + 1;
2242 }
2243
2244 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2245 }
2246
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2247 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2248 {
2249 struct inode *inode = mapping->host;
2250 int ret = 0;
2251 struct btrfs_bio_ctrl bio_ctrl = {
2252 .wbc = wbc,
2253 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2254 };
2255
2256 /*
2257 * Allow only a single thread to do the reloc work in zoned mode to
2258 * protect the write pointer updates.
2259 */
2260 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2261 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2262 submit_write_bio(&bio_ctrl, ret);
2263 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2264 return ret;
2265 }
2266
btrfs_readahead(struct readahead_control * rac)2267 void btrfs_readahead(struct readahead_control *rac)
2268 {
2269 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2270 struct folio *folio;
2271 struct extent_map *em_cached = NULL;
2272 u64 prev_em_start = (u64)-1;
2273
2274 while ((folio = readahead_folio(rac)) != NULL)
2275 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2276
2277 if (em_cached)
2278 free_extent_map(em_cached);
2279 submit_one_bio(&bio_ctrl);
2280 }
2281
2282 /*
2283 * basic invalidate_folio code, this waits on any locked or writeback
2284 * ranges corresponding to the folio, and then deletes any extent state
2285 * records from the tree
2286 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2287 int extent_invalidate_folio(struct extent_io_tree *tree,
2288 struct folio *folio, size_t offset)
2289 {
2290 struct extent_state *cached_state = NULL;
2291 u64 start = folio_pos(folio);
2292 u64 end = start + folio_size(folio) - 1;
2293 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2294
2295 /* This function is only called for the btree inode */
2296 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2297
2298 start += ALIGN(offset, blocksize);
2299 if (start > end)
2300 return 0;
2301
2302 lock_extent(tree, start, end, &cached_state);
2303 folio_wait_writeback(folio);
2304
2305 /*
2306 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2307 * so here we only need to unlock the extent range to free any
2308 * existing extent state.
2309 */
2310 unlock_extent(tree, start, end, &cached_state);
2311 return 0;
2312 }
2313
2314 /*
2315 * a helper for release_folio, this tests for areas of the page that
2316 * are locked or under IO and drops the related state bits if it is safe
2317 * to drop the page.
2318 */
try_release_extent_state(struct extent_io_tree * tree,struct folio * folio,gfp_t mask)2319 static bool try_release_extent_state(struct extent_io_tree *tree,
2320 struct folio *folio, gfp_t mask)
2321 {
2322 u64 start = folio_pos(folio);
2323 u64 end = start + PAGE_SIZE - 1;
2324 bool ret;
2325
2326 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2327 ret = false;
2328 } else {
2329 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2330 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2331 EXTENT_QGROUP_RESERVED);
2332 int ret2;
2333
2334 /*
2335 * At this point we can safely clear everything except the
2336 * locked bit, the nodatasum bit and the delalloc new bit.
2337 * The delalloc new bit will be cleared by ordered extent
2338 * completion.
2339 */
2340 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2341
2342 /* if clear_extent_bit failed for enomem reasons,
2343 * we can't allow the release to continue.
2344 */
2345 if (ret2 < 0)
2346 ret = false;
2347 else
2348 ret = true;
2349 }
2350 return ret;
2351 }
2352
2353 /*
2354 * a helper for release_folio. As long as there are no locked extents
2355 * in the range corresponding to the page, both state records and extent
2356 * map records are removed
2357 */
try_release_extent_mapping(struct folio * folio,gfp_t mask)2358 bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2359 {
2360 u64 start = folio_pos(folio);
2361 u64 end = start + PAGE_SIZE - 1;
2362 struct btrfs_inode *inode = folio_to_inode(folio);
2363 struct extent_io_tree *io_tree = &inode->io_tree;
2364
2365 while (start <= end) {
2366 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2367 const u64 len = end - start + 1;
2368 struct extent_map_tree *extent_tree = &inode->extent_tree;
2369 struct extent_map *em;
2370
2371 write_lock(&extent_tree->lock);
2372 em = lookup_extent_mapping(extent_tree, start, len);
2373 if (!em) {
2374 write_unlock(&extent_tree->lock);
2375 break;
2376 }
2377 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2378 write_unlock(&extent_tree->lock);
2379 free_extent_map(em);
2380 break;
2381 }
2382 if (test_range_bit_exists(io_tree, em->start,
2383 extent_map_end(em) - 1, EXTENT_LOCKED))
2384 goto next;
2385 /*
2386 * If it's not in the list of modified extents, used by a fast
2387 * fsync, we can remove it. If it's being logged we can safely
2388 * remove it since fsync took an extra reference on the em.
2389 */
2390 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2391 goto remove_em;
2392 /*
2393 * If it's in the list of modified extents, remove it only if
2394 * its generation is older then the current one, in which case
2395 * we don't need it for a fast fsync. Otherwise don't remove it,
2396 * we could be racing with an ongoing fast fsync that could miss
2397 * the new extent.
2398 */
2399 if (em->generation >= cur_gen)
2400 goto next;
2401 remove_em:
2402 /*
2403 * We only remove extent maps that are not in the list of
2404 * modified extents or that are in the list but with a
2405 * generation lower then the current generation, so there is no
2406 * need to set the full fsync flag on the inode (it hurts the
2407 * fsync performance for workloads with a data size that exceeds
2408 * or is close to the system's memory).
2409 */
2410 remove_extent_mapping(inode, em);
2411 /* Once for the inode's extent map tree. */
2412 free_extent_map(em);
2413 next:
2414 start = extent_map_end(em);
2415 write_unlock(&extent_tree->lock);
2416
2417 /* Once for us, for the lookup_extent_mapping() reference. */
2418 free_extent_map(em);
2419
2420 if (need_resched()) {
2421 /*
2422 * If we need to resched but we can't block just exit
2423 * and leave any remaining extent maps.
2424 */
2425 if (!gfpflags_allow_blocking(mask))
2426 break;
2427
2428 cond_resched();
2429 }
2430 }
2431 return try_release_extent_state(io_tree, folio, mask);
2432 }
2433
__free_extent_buffer(struct extent_buffer * eb)2434 static void __free_extent_buffer(struct extent_buffer *eb)
2435 {
2436 kmem_cache_free(extent_buffer_cache, eb);
2437 }
2438
extent_buffer_under_io(const struct extent_buffer * eb)2439 static int extent_buffer_under_io(const struct extent_buffer *eb)
2440 {
2441 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2442 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2443 }
2444
folio_range_has_eb(struct btrfs_fs_info * fs_info,struct folio * folio)2445 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
2446 {
2447 struct btrfs_subpage *subpage;
2448
2449 lockdep_assert_held(&folio->mapping->i_private_lock);
2450
2451 if (folio_test_private(folio)) {
2452 subpage = folio_get_private(folio);
2453 if (atomic_read(&subpage->eb_refs))
2454 return true;
2455 /*
2456 * Even there is no eb refs here, we may still have
2457 * end_folio_read() call relying on page::private.
2458 */
2459 if (atomic_read(&subpage->readers))
2460 return true;
2461 }
2462 return false;
2463 }
2464
detach_extent_buffer_folio(const struct extent_buffer * eb,struct folio * folio)2465 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2466 {
2467 struct btrfs_fs_info *fs_info = eb->fs_info;
2468 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2469
2470 /*
2471 * For mapped eb, we're going to change the folio private, which should
2472 * be done under the i_private_lock.
2473 */
2474 if (mapped)
2475 spin_lock(&folio->mapping->i_private_lock);
2476
2477 if (!folio_test_private(folio)) {
2478 if (mapped)
2479 spin_unlock(&folio->mapping->i_private_lock);
2480 return;
2481 }
2482
2483 if (fs_info->nodesize >= PAGE_SIZE) {
2484 /*
2485 * We do this since we'll remove the pages after we've
2486 * removed the eb from the radix tree, so we could race
2487 * and have this page now attached to the new eb. So
2488 * only clear folio if it's still connected to
2489 * this eb.
2490 */
2491 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2492 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2493 BUG_ON(folio_test_dirty(folio));
2494 BUG_ON(folio_test_writeback(folio));
2495 /* We need to make sure we haven't be attached to a new eb. */
2496 folio_detach_private(folio);
2497 }
2498 if (mapped)
2499 spin_unlock(&folio->mapping->i_private_lock);
2500 return;
2501 }
2502
2503 /*
2504 * For subpage, we can have dummy eb with folio private attached. In
2505 * this case, we can directly detach the private as such folio is only
2506 * attached to one dummy eb, no sharing.
2507 */
2508 if (!mapped) {
2509 btrfs_detach_subpage(fs_info, folio);
2510 return;
2511 }
2512
2513 btrfs_folio_dec_eb_refs(fs_info, folio);
2514
2515 /*
2516 * We can only detach the folio private if there are no other ebs in the
2517 * page range and no unfinished IO.
2518 */
2519 if (!folio_range_has_eb(fs_info, folio))
2520 btrfs_detach_subpage(fs_info, folio);
2521
2522 spin_unlock(&folio->mapping->i_private_lock);
2523 }
2524
2525 /* Release all pages attached to the extent buffer */
btrfs_release_extent_buffer_pages(const struct extent_buffer * eb)2526 static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2527 {
2528 ASSERT(!extent_buffer_under_io(eb));
2529
2530 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2531 struct folio *folio = eb->folios[i];
2532
2533 if (!folio)
2534 continue;
2535
2536 detach_extent_buffer_folio(eb, folio);
2537
2538 /* One for when we allocated the folio. */
2539 folio_put(folio);
2540 }
2541 }
2542
2543 /*
2544 * Helper for releasing the extent buffer.
2545 */
btrfs_release_extent_buffer(struct extent_buffer * eb)2546 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2547 {
2548 btrfs_release_extent_buffer_pages(eb);
2549 btrfs_leak_debug_del_eb(eb);
2550 __free_extent_buffer(eb);
2551 }
2552
2553 static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2554 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2555 unsigned long len)
2556 {
2557 struct extent_buffer *eb = NULL;
2558
2559 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2560 eb->start = start;
2561 eb->len = len;
2562 eb->fs_info = fs_info;
2563 init_rwsem(&eb->lock);
2564
2565 btrfs_leak_debug_add_eb(eb);
2566
2567 spin_lock_init(&eb->refs_lock);
2568 atomic_set(&eb->refs, 1);
2569
2570 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2571
2572 return eb;
2573 }
2574
btrfs_clone_extent_buffer(const struct extent_buffer * src)2575 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2576 {
2577 struct extent_buffer *new;
2578 int num_folios = num_extent_folios(src);
2579 int ret;
2580
2581 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2582 if (new == NULL)
2583 return NULL;
2584
2585 /*
2586 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2587 * btrfs_release_extent_buffer() have different behavior for
2588 * UNMAPPED subpage extent buffer.
2589 */
2590 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2591
2592 ret = alloc_eb_folio_array(new, false);
2593 if (ret) {
2594 btrfs_release_extent_buffer(new);
2595 return NULL;
2596 }
2597
2598 for (int i = 0; i < num_folios; i++) {
2599 struct folio *folio = new->folios[i];
2600
2601 ret = attach_extent_buffer_folio(new, folio, NULL);
2602 if (ret < 0) {
2603 btrfs_release_extent_buffer(new);
2604 return NULL;
2605 }
2606 WARN_ON(folio_test_dirty(folio));
2607 }
2608 copy_extent_buffer_full(new, src);
2609 set_extent_buffer_uptodate(new);
2610
2611 return new;
2612 }
2613
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2614 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2615 u64 start, unsigned long len)
2616 {
2617 struct extent_buffer *eb;
2618 int num_folios = 0;
2619 int ret;
2620
2621 eb = __alloc_extent_buffer(fs_info, start, len);
2622 if (!eb)
2623 return NULL;
2624
2625 ret = alloc_eb_folio_array(eb, false);
2626 if (ret)
2627 goto err;
2628
2629 num_folios = num_extent_folios(eb);
2630 for (int i = 0; i < num_folios; i++) {
2631 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2632 if (ret < 0)
2633 goto err;
2634 }
2635
2636 set_extent_buffer_uptodate(eb);
2637 btrfs_set_header_nritems(eb, 0);
2638 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2639
2640 return eb;
2641 err:
2642 for (int i = 0; i < num_folios; i++) {
2643 if (eb->folios[i]) {
2644 detach_extent_buffer_folio(eb, eb->folios[i]);
2645 folio_put(eb->folios[i]);
2646 }
2647 }
2648 __free_extent_buffer(eb);
2649 return NULL;
2650 }
2651
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2652 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2653 u64 start)
2654 {
2655 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2656 }
2657
check_buffer_tree_ref(struct extent_buffer * eb)2658 static void check_buffer_tree_ref(struct extent_buffer *eb)
2659 {
2660 int refs;
2661 /*
2662 * The TREE_REF bit is first set when the extent_buffer is added
2663 * to the radix tree. It is also reset, if unset, when a new reference
2664 * is created by find_extent_buffer.
2665 *
2666 * It is only cleared in two cases: freeing the last non-tree
2667 * reference to the extent_buffer when its STALE bit is set or
2668 * calling release_folio when the tree reference is the only reference.
2669 *
2670 * In both cases, care is taken to ensure that the extent_buffer's
2671 * pages are not under io. However, release_folio can be concurrently
2672 * called with creating new references, which is prone to race
2673 * conditions between the calls to check_buffer_tree_ref in those
2674 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2675 *
2676 * The actual lifetime of the extent_buffer in the radix tree is
2677 * adequately protected by the refcount, but the TREE_REF bit and
2678 * its corresponding reference are not. To protect against this
2679 * class of races, we call check_buffer_tree_ref from the codepaths
2680 * which trigger io. Note that once io is initiated, TREE_REF can no
2681 * longer be cleared, so that is the moment at which any such race is
2682 * best fixed.
2683 */
2684 refs = atomic_read(&eb->refs);
2685 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2686 return;
2687
2688 spin_lock(&eb->refs_lock);
2689 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2690 atomic_inc(&eb->refs);
2691 spin_unlock(&eb->refs_lock);
2692 }
2693
mark_extent_buffer_accessed(struct extent_buffer * eb)2694 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2695 {
2696 int num_folios= num_extent_folios(eb);
2697
2698 check_buffer_tree_ref(eb);
2699
2700 for (int i = 0; i < num_folios; i++)
2701 folio_mark_accessed(eb->folios[i]);
2702 }
2703
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2704 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2705 u64 start)
2706 {
2707 struct extent_buffer *eb;
2708
2709 eb = find_extent_buffer_nolock(fs_info, start);
2710 if (!eb)
2711 return NULL;
2712 /*
2713 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2714 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2715 * another task running free_extent_buffer() might have seen that flag
2716 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2717 * writeback flags not set) and it's still in the tree (flag
2718 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2719 * decrementing the extent buffer's reference count twice. So here we
2720 * could race and increment the eb's reference count, clear its stale
2721 * flag, mark it as dirty and drop our reference before the other task
2722 * finishes executing free_extent_buffer, which would later result in
2723 * an attempt to free an extent buffer that is dirty.
2724 */
2725 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2726 spin_lock(&eb->refs_lock);
2727 spin_unlock(&eb->refs_lock);
2728 }
2729 mark_extent_buffer_accessed(eb);
2730 return eb;
2731 }
2732
2733 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2734 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2735 u64 start)
2736 {
2737 struct extent_buffer *eb, *exists = NULL;
2738 int ret;
2739
2740 eb = find_extent_buffer(fs_info, start);
2741 if (eb)
2742 return eb;
2743 eb = alloc_dummy_extent_buffer(fs_info, start);
2744 if (!eb)
2745 return ERR_PTR(-ENOMEM);
2746 eb->fs_info = fs_info;
2747 again:
2748 ret = radix_tree_preload(GFP_NOFS);
2749 if (ret) {
2750 exists = ERR_PTR(ret);
2751 goto free_eb;
2752 }
2753 spin_lock(&fs_info->buffer_lock);
2754 ret = radix_tree_insert(&fs_info->buffer_radix,
2755 start >> fs_info->sectorsize_bits, eb);
2756 spin_unlock(&fs_info->buffer_lock);
2757 radix_tree_preload_end();
2758 if (ret == -EEXIST) {
2759 exists = find_extent_buffer(fs_info, start);
2760 if (exists)
2761 goto free_eb;
2762 else
2763 goto again;
2764 }
2765 check_buffer_tree_ref(eb);
2766 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2767
2768 return eb;
2769 free_eb:
2770 btrfs_release_extent_buffer(eb);
2771 return exists;
2772 }
2773 #endif
2774
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct page * page)2775 static struct extent_buffer *grab_extent_buffer(
2776 struct btrfs_fs_info *fs_info, struct page *page)
2777 {
2778 struct folio *folio = page_folio(page);
2779 struct extent_buffer *exists;
2780
2781 lockdep_assert_held(&page->mapping->i_private_lock);
2782
2783 /*
2784 * For subpage case, we completely rely on radix tree to ensure we
2785 * don't try to insert two ebs for the same bytenr. So here we always
2786 * return NULL and just continue.
2787 */
2788 if (fs_info->nodesize < PAGE_SIZE)
2789 return NULL;
2790
2791 /* Page not yet attached to an extent buffer */
2792 if (!folio_test_private(folio))
2793 return NULL;
2794
2795 /*
2796 * We could have already allocated an eb for this page and attached one
2797 * so lets see if we can get a ref on the existing eb, and if we can we
2798 * know it's good and we can just return that one, else we know we can
2799 * just overwrite folio private.
2800 */
2801 exists = folio_get_private(folio);
2802 if (atomic_inc_not_zero(&exists->refs))
2803 return exists;
2804
2805 WARN_ON(PageDirty(page));
2806 folio_detach_private(folio);
2807 return NULL;
2808 }
2809
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)2810 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2811 {
2812 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2813 btrfs_err(fs_info, "bad tree block start %llu", start);
2814 return -EINVAL;
2815 }
2816
2817 if (fs_info->nodesize < PAGE_SIZE &&
2818 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2819 btrfs_err(fs_info,
2820 "tree block crosses page boundary, start %llu nodesize %u",
2821 start, fs_info->nodesize);
2822 return -EINVAL;
2823 }
2824 if (fs_info->nodesize >= PAGE_SIZE &&
2825 !PAGE_ALIGNED(start)) {
2826 btrfs_err(fs_info,
2827 "tree block is not page aligned, start %llu nodesize %u",
2828 start, fs_info->nodesize);
2829 return -EINVAL;
2830 }
2831 if (!IS_ALIGNED(start, fs_info->nodesize) &&
2832 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2833 btrfs_warn(fs_info,
2834 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2835 start, fs_info->nodesize);
2836 }
2837 return 0;
2838 }
2839
2840
2841 /*
2842 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2843 * Return >0 if there is already another extent buffer for the range,
2844 * and @found_eb_ret would be updated.
2845 * Return -EAGAIN if the filemap has an existing folio but with different size
2846 * than @eb.
2847 * The caller needs to free the existing folios and retry using the same order.
2848 */
attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_subpage * prealloc,struct extent_buffer ** found_eb_ret)2849 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2850 struct btrfs_subpage *prealloc,
2851 struct extent_buffer **found_eb_ret)
2852 {
2853
2854 struct btrfs_fs_info *fs_info = eb->fs_info;
2855 struct address_space *mapping = fs_info->btree_inode->i_mapping;
2856 const unsigned long index = eb->start >> PAGE_SHIFT;
2857 struct folio *existing_folio = NULL;
2858 int ret;
2859
2860 ASSERT(found_eb_ret);
2861
2862 /* Caller should ensure the folio exists. */
2863 ASSERT(eb->folios[i]);
2864
2865 retry:
2866 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2867 GFP_NOFS | __GFP_NOFAIL);
2868 if (!ret)
2869 goto finish;
2870
2871 existing_folio = filemap_lock_folio(mapping, index + i);
2872 /* The page cache only exists for a very short time, just retry. */
2873 if (IS_ERR(existing_folio)) {
2874 existing_folio = NULL;
2875 goto retry;
2876 }
2877
2878 /* For now, we should only have single-page folios for btree inode. */
2879 ASSERT(folio_nr_pages(existing_folio) == 1);
2880
2881 if (folio_size(existing_folio) != eb->folio_size) {
2882 folio_unlock(existing_folio);
2883 folio_put(existing_folio);
2884 return -EAGAIN;
2885 }
2886
2887 finish:
2888 spin_lock(&mapping->i_private_lock);
2889 if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
2890 /* We're going to reuse the existing page, can drop our folio now. */
2891 __free_page(folio_page(eb->folios[i], 0));
2892 eb->folios[i] = existing_folio;
2893 } else if (existing_folio) {
2894 struct extent_buffer *existing_eb;
2895
2896 existing_eb = grab_extent_buffer(fs_info,
2897 folio_page(existing_folio, 0));
2898 if (existing_eb) {
2899 /* The extent buffer still exists, we can use it directly. */
2900 *found_eb_ret = existing_eb;
2901 spin_unlock(&mapping->i_private_lock);
2902 folio_unlock(existing_folio);
2903 folio_put(existing_folio);
2904 return 1;
2905 }
2906 /* The extent buffer no longer exists, we can reuse the folio. */
2907 __free_page(folio_page(eb->folios[i], 0));
2908 eb->folios[i] = existing_folio;
2909 }
2910 eb->folio_size = folio_size(eb->folios[i]);
2911 eb->folio_shift = folio_shift(eb->folios[i]);
2912 /* Should not fail, as we have preallocated the memory. */
2913 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
2914 ASSERT(!ret);
2915 /*
2916 * To inform we have an extra eb under allocation, so that
2917 * detach_extent_buffer_page() won't release the folio private when the
2918 * eb hasn't been inserted into radix tree yet.
2919 *
2920 * The ref will be decreased when the eb releases the page, in
2921 * detach_extent_buffer_page(). Thus needs no special handling in the
2922 * error path.
2923 */
2924 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
2925 spin_unlock(&mapping->i_private_lock);
2926 return 0;
2927 }
2928
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)2929 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2930 u64 start, u64 owner_root, int level)
2931 {
2932 unsigned long len = fs_info->nodesize;
2933 int num_folios;
2934 int attached = 0;
2935 struct extent_buffer *eb;
2936 struct extent_buffer *existing_eb = NULL;
2937 struct btrfs_subpage *prealloc = NULL;
2938 u64 lockdep_owner = owner_root;
2939 bool page_contig = true;
2940 int uptodate = 1;
2941 int ret;
2942
2943 if (check_eb_alignment(fs_info, start))
2944 return ERR_PTR(-EINVAL);
2945
2946 #if BITS_PER_LONG == 32
2947 if (start >= MAX_LFS_FILESIZE) {
2948 btrfs_err_rl(fs_info,
2949 "extent buffer %llu is beyond 32bit page cache limit", start);
2950 btrfs_err_32bit_limit(fs_info);
2951 return ERR_PTR(-EOVERFLOW);
2952 }
2953 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
2954 btrfs_warn_32bit_limit(fs_info);
2955 #endif
2956
2957 eb = find_extent_buffer(fs_info, start);
2958 if (eb)
2959 return eb;
2960
2961 eb = __alloc_extent_buffer(fs_info, start, len);
2962 if (!eb)
2963 return ERR_PTR(-ENOMEM);
2964
2965 /*
2966 * The reloc trees are just snapshots, so we need them to appear to be
2967 * just like any other fs tree WRT lockdep.
2968 */
2969 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
2970 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
2971
2972 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
2973
2974 /*
2975 * Preallocate folio private for subpage case, so that we won't
2976 * allocate memory with i_private_lock nor page lock hold.
2977 *
2978 * The memory will be freed by attach_extent_buffer_page() or freed
2979 * manually if we exit earlier.
2980 */
2981 if (fs_info->nodesize < PAGE_SIZE) {
2982 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
2983 if (IS_ERR(prealloc)) {
2984 ret = PTR_ERR(prealloc);
2985 goto out;
2986 }
2987 }
2988
2989 reallocate:
2990 /* Allocate all pages first. */
2991 ret = alloc_eb_folio_array(eb, true);
2992 if (ret < 0) {
2993 btrfs_free_subpage(prealloc);
2994 goto out;
2995 }
2996
2997 num_folios = num_extent_folios(eb);
2998 /* Attach all pages to the filemap. */
2999 for (int i = 0; i < num_folios; i++) {
3000 struct folio *folio;
3001
3002 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3003 if (ret > 0) {
3004 ASSERT(existing_eb);
3005 goto out;
3006 }
3007
3008 /*
3009 * TODO: Special handling for a corner case where the order of
3010 * folios mismatch between the new eb and filemap.
3011 *
3012 * This happens when:
3013 *
3014 * - the new eb is using higher order folio
3015 *
3016 * - the filemap is still using 0-order folios for the range
3017 * This can happen at the previous eb allocation, and we don't
3018 * have higher order folio for the call.
3019 *
3020 * - the existing eb has already been freed
3021 *
3022 * In this case, we have to free the existing folios first, and
3023 * re-allocate using the same order.
3024 * Thankfully this is not going to happen yet, as we're still
3025 * using 0-order folios.
3026 */
3027 if (unlikely(ret == -EAGAIN)) {
3028 ASSERT(0);
3029 goto reallocate;
3030 }
3031 attached++;
3032
3033 /*
3034 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3035 * reliable, as we may choose to reuse the existing page cache
3036 * and free the allocated page.
3037 */
3038 folio = eb->folios[i];
3039 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3040
3041 /*
3042 * Check if the current page is physically contiguous with previous eb
3043 * page.
3044 * At this stage, either we allocated a large folio, thus @i
3045 * would only be 0, or we fall back to per-page allocation.
3046 */
3047 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3048 page_contig = false;
3049
3050 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3051 uptodate = 0;
3052
3053 /*
3054 * We can't unlock the pages just yet since the extent buffer
3055 * hasn't been properly inserted in the radix tree, this
3056 * opens a race with btree_release_folio which can free a page
3057 * while we are still filling in all pages for the buffer and
3058 * we could crash.
3059 */
3060 }
3061 if (uptodate)
3062 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3063 /* All pages are physically contiguous, can skip cross page handling. */
3064 if (page_contig)
3065 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3066 again:
3067 ret = radix_tree_preload(GFP_NOFS);
3068 if (ret)
3069 goto out;
3070
3071 spin_lock(&fs_info->buffer_lock);
3072 ret = radix_tree_insert(&fs_info->buffer_radix,
3073 start >> fs_info->sectorsize_bits, eb);
3074 spin_unlock(&fs_info->buffer_lock);
3075 radix_tree_preload_end();
3076 if (ret == -EEXIST) {
3077 ret = 0;
3078 existing_eb = find_extent_buffer(fs_info, start);
3079 if (existing_eb)
3080 goto out;
3081 else
3082 goto again;
3083 }
3084 /* add one reference for the tree */
3085 check_buffer_tree_ref(eb);
3086 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3087
3088 /*
3089 * Now it's safe to unlock the pages because any calls to
3090 * btree_release_folio will correctly detect that a page belongs to a
3091 * live buffer and won't free them prematurely.
3092 */
3093 for (int i = 0; i < num_folios; i++)
3094 unlock_page(folio_page(eb->folios[i], 0));
3095 return eb;
3096
3097 out:
3098 WARN_ON(!atomic_dec_and_test(&eb->refs));
3099
3100 /*
3101 * Any attached folios need to be detached before we unlock them. This
3102 * is because when we're inserting our new folios into the mapping, and
3103 * then attaching our eb to that folio. If we fail to insert our folio
3104 * we'll lookup the folio for that index, and grab that EB. We do not
3105 * want that to grab this eb, as we're getting ready to free it. So we
3106 * have to detach it first and then unlock it.
3107 *
3108 * We have to drop our reference and NULL it out here because in the
3109 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3110 * Below when we call btrfs_release_extent_buffer() we will call
3111 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3112 * case. If we left eb->folios[i] populated in the subpage case we'd
3113 * double put our reference and be super sad.
3114 */
3115 for (int i = 0; i < attached; i++) {
3116 ASSERT(eb->folios[i]);
3117 detach_extent_buffer_folio(eb, eb->folios[i]);
3118 unlock_page(folio_page(eb->folios[i], 0));
3119 folio_put(eb->folios[i]);
3120 eb->folios[i] = NULL;
3121 }
3122 /*
3123 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3124 * so it can be cleaned up without utlizing page->mapping.
3125 */
3126 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3127
3128 btrfs_release_extent_buffer(eb);
3129 if (ret < 0)
3130 return ERR_PTR(ret);
3131 ASSERT(existing_eb);
3132 return existing_eb;
3133 }
3134
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3135 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3136 {
3137 struct extent_buffer *eb =
3138 container_of(head, struct extent_buffer, rcu_head);
3139
3140 __free_extent_buffer(eb);
3141 }
3142
release_extent_buffer(struct extent_buffer * eb)3143 static int release_extent_buffer(struct extent_buffer *eb)
3144 __releases(&eb->refs_lock)
3145 {
3146 lockdep_assert_held(&eb->refs_lock);
3147
3148 WARN_ON(atomic_read(&eb->refs) == 0);
3149 if (atomic_dec_and_test(&eb->refs)) {
3150 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3151 struct btrfs_fs_info *fs_info = eb->fs_info;
3152
3153 spin_unlock(&eb->refs_lock);
3154
3155 spin_lock(&fs_info->buffer_lock);
3156 radix_tree_delete(&fs_info->buffer_radix,
3157 eb->start >> fs_info->sectorsize_bits);
3158 spin_unlock(&fs_info->buffer_lock);
3159 } else {
3160 spin_unlock(&eb->refs_lock);
3161 }
3162
3163 btrfs_leak_debug_del_eb(eb);
3164 /* Should be safe to release our pages at this point */
3165 btrfs_release_extent_buffer_pages(eb);
3166 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3167 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3168 __free_extent_buffer(eb);
3169 return 1;
3170 }
3171 #endif
3172 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3173 return 1;
3174 }
3175 spin_unlock(&eb->refs_lock);
3176
3177 return 0;
3178 }
3179
free_extent_buffer(struct extent_buffer * eb)3180 void free_extent_buffer(struct extent_buffer *eb)
3181 {
3182 int refs;
3183 if (!eb)
3184 return;
3185
3186 refs = atomic_read(&eb->refs);
3187 while (1) {
3188 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3189 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3190 refs == 1))
3191 break;
3192 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3193 return;
3194 }
3195
3196 spin_lock(&eb->refs_lock);
3197 if (atomic_read(&eb->refs) == 2 &&
3198 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3199 !extent_buffer_under_io(eb) &&
3200 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3201 atomic_dec(&eb->refs);
3202
3203 /*
3204 * I know this is terrible, but it's temporary until we stop tracking
3205 * the uptodate bits and such for the extent buffers.
3206 */
3207 release_extent_buffer(eb);
3208 }
3209
free_extent_buffer_stale(struct extent_buffer * eb)3210 void free_extent_buffer_stale(struct extent_buffer *eb)
3211 {
3212 if (!eb)
3213 return;
3214
3215 spin_lock(&eb->refs_lock);
3216 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3217
3218 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3219 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3220 atomic_dec(&eb->refs);
3221 release_extent_buffer(eb);
3222 }
3223
btree_clear_folio_dirty(struct folio * folio)3224 static void btree_clear_folio_dirty(struct folio *folio)
3225 {
3226 ASSERT(folio_test_dirty(folio));
3227 ASSERT(folio_test_locked(folio));
3228 folio_clear_dirty_for_io(folio);
3229 xa_lock_irq(&folio->mapping->i_pages);
3230 if (!folio_test_dirty(folio))
3231 __xa_clear_mark(&folio->mapping->i_pages,
3232 folio_index(folio), PAGECACHE_TAG_DIRTY);
3233 xa_unlock_irq(&folio->mapping->i_pages);
3234 }
3235
clear_subpage_extent_buffer_dirty(const struct extent_buffer * eb)3236 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3237 {
3238 struct btrfs_fs_info *fs_info = eb->fs_info;
3239 struct folio *folio = eb->folios[0];
3240 bool last;
3241
3242 /* btree_clear_folio_dirty() needs page locked. */
3243 folio_lock(folio);
3244 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3245 if (last)
3246 btree_clear_folio_dirty(folio);
3247 folio_unlock(folio);
3248 WARN_ON(atomic_read(&eb->refs) == 0);
3249 }
3250
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3251 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3252 struct extent_buffer *eb)
3253 {
3254 struct btrfs_fs_info *fs_info = eb->fs_info;
3255 int num_folios;
3256
3257 btrfs_assert_tree_write_locked(eb);
3258
3259 if (trans && btrfs_header_generation(eb) != trans->transid)
3260 return;
3261
3262 /*
3263 * Instead of clearing the dirty flag off of the buffer, mark it as
3264 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3265 * write-ordering in zoned mode, without the need to later re-dirty
3266 * the extent_buffer.
3267 *
3268 * The actual zeroout of the buffer will happen later in
3269 * btree_csum_one_bio.
3270 */
3271 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3272 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3273 return;
3274 }
3275
3276 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3277 return;
3278
3279 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3280 fs_info->dirty_metadata_batch);
3281
3282 if (eb->fs_info->nodesize < PAGE_SIZE)
3283 return clear_subpage_extent_buffer_dirty(eb);
3284
3285 num_folios = num_extent_folios(eb);
3286 for (int i = 0; i < num_folios; i++) {
3287 struct folio *folio = eb->folios[i];
3288
3289 if (!folio_test_dirty(folio))
3290 continue;
3291 folio_lock(folio);
3292 btree_clear_folio_dirty(folio);
3293 folio_unlock(folio);
3294 }
3295 WARN_ON(atomic_read(&eb->refs) == 0);
3296 }
3297
set_extent_buffer_dirty(struct extent_buffer * eb)3298 void set_extent_buffer_dirty(struct extent_buffer *eb)
3299 {
3300 int num_folios;
3301 bool was_dirty;
3302
3303 check_buffer_tree_ref(eb);
3304
3305 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3306
3307 num_folios = num_extent_folios(eb);
3308 WARN_ON(atomic_read(&eb->refs) == 0);
3309 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3310 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3311
3312 if (!was_dirty) {
3313 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3314
3315 /*
3316 * For subpage case, we can have other extent buffers in the
3317 * same page, and in clear_subpage_extent_buffer_dirty() we
3318 * have to clear page dirty without subpage lock held.
3319 * This can cause race where our page gets dirty cleared after
3320 * we just set it.
3321 *
3322 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3323 * its page for other reasons, we can use page lock to prevent
3324 * the above race.
3325 */
3326 if (subpage)
3327 lock_page(folio_page(eb->folios[0], 0));
3328 for (int i = 0; i < num_folios; i++)
3329 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3330 eb->start, eb->len);
3331 if (subpage)
3332 unlock_page(folio_page(eb->folios[0], 0));
3333 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3334 eb->len,
3335 eb->fs_info->dirty_metadata_batch);
3336 }
3337 #ifdef CONFIG_BTRFS_DEBUG
3338 for (int i = 0; i < num_folios; i++)
3339 ASSERT(folio_test_dirty(eb->folios[i]));
3340 #endif
3341 }
3342
clear_extent_buffer_uptodate(struct extent_buffer * eb)3343 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3344 {
3345 struct btrfs_fs_info *fs_info = eb->fs_info;
3346 int num_folios = num_extent_folios(eb);
3347
3348 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3349 for (int i = 0; i < num_folios; i++) {
3350 struct folio *folio = eb->folios[i];
3351
3352 if (!folio)
3353 continue;
3354
3355 /*
3356 * This is special handling for metadata subpage, as regular
3357 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3358 */
3359 if (fs_info->nodesize >= PAGE_SIZE)
3360 folio_clear_uptodate(folio);
3361 else
3362 btrfs_subpage_clear_uptodate(fs_info, folio,
3363 eb->start, eb->len);
3364 }
3365 }
3366
set_extent_buffer_uptodate(struct extent_buffer * eb)3367 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3368 {
3369 struct btrfs_fs_info *fs_info = eb->fs_info;
3370 int num_folios = num_extent_folios(eb);
3371
3372 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3373 for (int i = 0; i < num_folios; i++) {
3374 struct folio *folio = eb->folios[i];
3375
3376 /*
3377 * This is special handling for metadata subpage, as regular
3378 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3379 */
3380 if (fs_info->nodesize >= PAGE_SIZE)
3381 folio_mark_uptodate(folio);
3382 else
3383 btrfs_subpage_set_uptodate(fs_info, folio,
3384 eb->start, eb->len);
3385 }
3386 }
3387
clear_extent_buffer_reading(struct extent_buffer * eb)3388 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3389 {
3390 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3391 smp_mb__after_atomic();
3392 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3393 }
3394
end_bbio_meta_read(struct btrfs_bio * bbio)3395 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3396 {
3397 struct extent_buffer *eb = bbio->private;
3398 struct btrfs_fs_info *fs_info = eb->fs_info;
3399 bool uptodate = !bbio->bio.bi_status;
3400 struct folio_iter fi;
3401 u32 bio_offset = 0;
3402
3403 /*
3404 * If the extent buffer is marked UPTODATE before the read operation
3405 * completes, other calls to read_extent_buffer_pages() will return
3406 * early without waiting for the read to finish, causing data races.
3407 */
3408 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3409
3410 eb->read_mirror = bbio->mirror_num;
3411
3412 if (uptodate &&
3413 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3414 uptodate = false;
3415
3416 if (uptodate) {
3417 set_extent_buffer_uptodate(eb);
3418 } else {
3419 clear_extent_buffer_uptodate(eb);
3420 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3421 }
3422
3423 bio_for_each_folio_all(fi, &bbio->bio) {
3424 struct folio *folio = fi.folio;
3425 u64 start = eb->start + bio_offset;
3426 u32 len = fi.length;
3427
3428 if (uptodate)
3429 btrfs_folio_set_uptodate(fs_info, folio, start, len);
3430 else
3431 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3432
3433 bio_offset += len;
3434 }
3435
3436 clear_extent_buffer_reading(eb);
3437 free_extent_buffer(eb);
3438
3439 bio_put(&bbio->bio);
3440 }
3441
read_extent_buffer_pages(struct extent_buffer * eb,int wait,int mirror_num,const struct btrfs_tree_parent_check * check)3442 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3443 const struct btrfs_tree_parent_check *check)
3444 {
3445 struct btrfs_bio *bbio;
3446 bool ret;
3447
3448 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3449 return 0;
3450
3451 /*
3452 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3453 * operation, which could potentially still be in flight. In this case
3454 * we simply want to return an error.
3455 */
3456 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3457 return -EIO;
3458
3459 /* Someone else is already reading the buffer, just wait for it. */
3460 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3461 goto done;
3462
3463 /*
3464 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3465 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3466 * started and finished reading the same eb. In this case, UPTODATE
3467 * will now be set, and we shouldn't read it in again.
3468 */
3469 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3470 clear_extent_buffer_reading(eb);
3471 return 0;
3472 }
3473
3474 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3475 eb->read_mirror = 0;
3476 check_buffer_tree_ref(eb);
3477 atomic_inc(&eb->refs);
3478
3479 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3480 REQ_OP_READ | REQ_META, eb->fs_info,
3481 end_bbio_meta_read, eb);
3482 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3483 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3484 bbio->file_offset = eb->start;
3485 memcpy(&bbio->parent_check, check, sizeof(*check));
3486 if (eb->fs_info->nodesize < PAGE_SIZE) {
3487 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3488 eb->start - folio_pos(eb->folios[0]));
3489 ASSERT(ret);
3490 } else {
3491 int num_folios = num_extent_folios(eb);
3492
3493 for (int i = 0; i < num_folios; i++) {
3494 struct folio *folio = eb->folios[i];
3495
3496 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3497 ASSERT(ret);
3498 }
3499 }
3500 btrfs_submit_bbio(bbio, mirror_num);
3501
3502 done:
3503 if (wait == WAIT_COMPLETE) {
3504 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3505 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3506 return -EIO;
3507 }
3508
3509 return 0;
3510 }
3511
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3512 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3513 unsigned long len)
3514 {
3515 btrfs_warn(eb->fs_info,
3516 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3517 eb->start, eb->len, start, len);
3518 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3519
3520 return true;
3521 }
3522
3523 /*
3524 * Check if the [start, start + len) range is valid before reading/writing
3525 * the eb.
3526 * NOTE: @start and @len are offset inside the eb, not logical address.
3527 *
3528 * Caller should not touch the dst/src memory if this function returns error.
3529 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3530 static inline int check_eb_range(const struct extent_buffer *eb,
3531 unsigned long start, unsigned long len)
3532 {
3533 unsigned long offset;
3534
3535 /* start, start + len should not go beyond eb->len nor overflow */
3536 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3537 return report_eb_range(eb, start, len);
3538
3539 return false;
3540 }
3541
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3542 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3543 unsigned long start, unsigned long len)
3544 {
3545 const int unit_size = eb->folio_size;
3546 size_t cur;
3547 size_t offset;
3548 char *dst = (char *)dstv;
3549 unsigned long i = get_eb_folio_index(eb, start);
3550
3551 if (check_eb_range(eb, start, len)) {
3552 /*
3553 * Invalid range hit, reset the memory, so callers won't get
3554 * some random garbage for their uninitialized memory.
3555 */
3556 memset(dstv, 0, len);
3557 return;
3558 }
3559
3560 if (eb->addr) {
3561 memcpy(dstv, eb->addr + start, len);
3562 return;
3563 }
3564
3565 offset = get_eb_offset_in_folio(eb, start);
3566
3567 while (len > 0) {
3568 char *kaddr;
3569
3570 cur = min(len, unit_size - offset);
3571 kaddr = folio_address(eb->folios[i]);
3572 memcpy(dst, kaddr + offset, cur);
3573
3574 dst += cur;
3575 len -= cur;
3576 offset = 0;
3577 i++;
3578 }
3579 }
3580
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)3581 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3582 void __user *dstv,
3583 unsigned long start, unsigned long len)
3584 {
3585 const int unit_size = eb->folio_size;
3586 size_t cur;
3587 size_t offset;
3588 char __user *dst = (char __user *)dstv;
3589 unsigned long i = get_eb_folio_index(eb, start);
3590 int ret = 0;
3591
3592 WARN_ON(start > eb->len);
3593 WARN_ON(start + len > eb->start + eb->len);
3594
3595 if (eb->addr) {
3596 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3597 ret = -EFAULT;
3598 return ret;
3599 }
3600
3601 offset = get_eb_offset_in_folio(eb, start);
3602
3603 while (len > 0) {
3604 char *kaddr;
3605
3606 cur = min(len, unit_size - offset);
3607 kaddr = folio_address(eb->folios[i]);
3608 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3609 ret = -EFAULT;
3610 break;
3611 }
3612
3613 dst += cur;
3614 len -= cur;
3615 offset = 0;
3616 i++;
3617 }
3618
3619 return ret;
3620 }
3621
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3622 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3623 unsigned long start, unsigned long len)
3624 {
3625 const int unit_size = eb->folio_size;
3626 size_t cur;
3627 size_t offset;
3628 char *kaddr;
3629 char *ptr = (char *)ptrv;
3630 unsigned long i = get_eb_folio_index(eb, start);
3631 int ret = 0;
3632
3633 if (check_eb_range(eb, start, len))
3634 return -EINVAL;
3635
3636 if (eb->addr)
3637 return memcmp(ptrv, eb->addr + start, len);
3638
3639 offset = get_eb_offset_in_folio(eb, start);
3640
3641 while (len > 0) {
3642 cur = min(len, unit_size - offset);
3643 kaddr = folio_address(eb->folios[i]);
3644 ret = memcmp(ptr, kaddr + offset, cur);
3645 if (ret)
3646 break;
3647
3648 ptr += cur;
3649 len -= cur;
3650 offset = 0;
3651 i++;
3652 }
3653 return ret;
3654 }
3655
3656 /*
3657 * Check that the extent buffer is uptodate.
3658 *
3659 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3660 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3661 */
assert_eb_folio_uptodate(const struct extent_buffer * eb,int i)3662 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3663 {
3664 struct btrfs_fs_info *fs_info = eb->fs_info;
3665 struct folio *folio = eb->folios[i];
3666
3667 ASSERT(folio);
3668
3669 /*
3670 * If we are using the commit root we could potentially clear a page
3671 * Uptodate while we're using the extent buffer that we've previously
3672 * looked up. We don't want to complain in this case, as the page was
3673 * valid before, we just didn't write it out. Instead we want to catch
3674 * the case where we didn't actually read the block properly, which
3675 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3676 */
3677 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3678 return;
3679
3680 if (fs_info->nodesize < PAGE_SIZE) {
3681 folio = eb->folios[0];
3682 ASSERT(i == 0);
3683 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3684 eb->start, eb->len)))
3685 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3686 } else {
3687 WARN_ON(!folio_test_uptodate(folio));
3688 }
3689 }
3690
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)3691 static void __write_extent_buffer(const struct extent_buffer *eb,
3692 const void *srcv, unsigned long start,
3693 unsigned long len, bool use_memmove)
3694 {
3695 const int unit_size = eb->folio_size;
3696 size_t cur;
3697 size_t offset;
3698 char *kaddr;
3699 const char *src = (const char *)srcv;
3700 unsigned long i = get_eb_folio_index(eb, start);
3701 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3702 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3703
3704 if (check_eb_range(eb, start, len))
3705 return;
3706
3707 if (eb->addr) {
3708 if (use_memmove)
3709 memmove(eb->addr + start, srcv, len);
3710 else
3711 memcpy(eb->addr + start, srcv, len);
3712 return;
3713 }
3714
3715 offset = get_eb_offset_in_folio(eb, start);
3716
3717 while (len > 0) {
3718 if (check_uptodate)
3719 assert_eb_folio_uptodate(eb, i);
3720
3721 cur = min(len, unit_size - offset);
3722 kaddr = folio_address(eb->folios[i]);
3723 if (use_memmove)
3724 memmove(kaddr + offset, src, cur);
3725 else
3726 memcpy(kaddr + offset, src, cur);
3727
3728 src += cur;
3729 len -= cur;
3730 offset = 0;
3731 i++;
3732 }
3733 }
3734
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)3735 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3736 unsigned long start, unsigned long len)
3737 {
3738 return __write_extent_buffer(eb, srcv, start, len, false);
3739 }
3740
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)3741 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3742 unsigned long start, unsigned long len)
3743 {
3744 const int unit_size = eb->folio_size;
3745 unsigned long cur = start;
3746
3747 if (eb->addr) {
3748 memset(eb->addr + start, c, len);
3749 return;
3750 }
3751
3752 while (cur < start + len) {
3753 unsigned long index = get_eb_folio_index(eb, cur);
3754 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3755 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3756
3757 assert_eb_folio_uptodate(eb, index);
3758 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3759
3760 cur += cur_len;
3761 }
3762 }
3763
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)3764 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3765 unsigned long len)
3766 {
3767 if (check_eb_range(eb, start, len))
3768 return;
3769 return memset_extent_buffer(eb, 0, start, len);
3770 }
3771
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)3772 void copy_extent_buffer_full(const struct extent_buffer *dst,
3773 const struct extent_buffer *src)
3774 {
3775 const int unit_size = src->folio_size;
3776 unsigned long cur = 0;
3777
3778 ASSERT(dst->len == src->len);
3779
3780 while (cur < src->len) {
3781 unsigned long index = get_eb_folio_index(src, cur);
3782 unsigned long offset = get_eb_offset_in_folio(src, cur);
3783 unsigned long cur_len = min(src->len, unit_size - offset);
3784 void *addr = folio_address(src->folios[index]) + offset;
3785
3786 write_extent_buffer(dst, addr, cur, cur_len);
3787
3788 cur += cur_len;
3789 }
3790 }
3791
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3792 void copy_extent_buffer(const struct extent_buffer *dst,
3793 const struct extent_buffer *src,
3794 unsigned long dst_offset, unsigned long src_offset,
3795 unsigned long len)
3796 {
3797 const int unit_size = dst->folio_size;
3798 u64 dst_len = dst->len;
3799 size_t cur;
3800 size_t offset;
3801 char *kaddr;
3802 unsigned long i = get_eb_folio_index(dst, dst_offset);
3803
3804 if (check_eb_range(dst, dst_offset, len) ||
3805 check_eb_range(src, src_offset, len))
3806 return;
3807
3808 WARN_ON(src->len != dst_len);
3809
3810 offset = get_eb_offset_in_folio(dst, dst_offset);
3811
3812 while (len > 0) {
3813 assert_eb_folio_uptodate(dst, i);
3814
3815 cur = min(len, (unsigned long)(unit_size - offset));
3816
3817 kaddr = folio_address(dst->folios[i]);
3818 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3819
3820 src_offset += cur;
3821 len -= cur;
3822 offset = 0;
3823 i++;
3824 }
3825 }
3826
3827 /*
3828 * Calculate the folio and offset of the byte containing the given bit number.
3829 *
3830 * @eb: the extent buffer
3831 * @start: offset of the bitmap item in the extent buffer
3832 * @nr: bit number
3833 * @folio_index: return index of the folio in the extent buffer that contains
3834 * the given bit number
3835 * @folio_offset: return offset into the folio given by folio_index
3836 *
3837 * This helper hides the ugliness of finding the byte in an extent buffer which
3838 * contains a given bit.
3839 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * folio_index,size_t * folio_offset)3840 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3841 unsigned long start, unsigned long nr,
3842 unsigned long *folio_index,
3843 size_t *folio_offset)
3844 {
3845 size_t byte_offset = BIT_BYTE(nr);
3846 size_t offset;
3847
3848 /*
3849 * The byte we want is the offset of the extent buffer + the offset of
3850 * the bitmap item in the extent buffer + the offset of the byte in the
3851 * bitmap item.
3852 */
3853 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3854
3855 *folio_index = offset >> eb->folio_shift;
3856 *folio_offset = offset_in_eb_folio(eb, offset);
3857 }
3858
3859 /*
3860 * Determine whether a bit in a bitmap item is set.
3861 *
3862 * @eb: the extent buffer
3863 * @start: offset of the bitmap item in the extent buffer
3864 * @nr: bit number to test
3865 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)3866 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3867 unsigned long nr)
3868 {
3869 unsigned long i;
3870 size_t offset;
3871 u8 *kaddr;
3872
3873 eb_bitmap_offset(eb, start, nr, &i, &offset);
3874 assert_eb_folio_uptodate(eb, i);
3875 kaddr = folio_address(eb->folios[i]);
3876 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3877 }
3878
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)3879 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3880 {
3881 unsigned long index = get_eb_folio_index(eb, bytenr);
3882
3883 if (check_eb_range(eb, bytenr, 1))
3884 return NULL;
3885 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
3886 }
3887
3888 /*
3889 * Set an area of a bitmap to 1.
3890 *
3891 * @eb: the extent buffer
3892 * @start: offset of the bitmap item in the extent buffer
3893 * @pos: bit number of the first bit
3894 * @len: number of bits to set
3895 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3896 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
3897 unsigned long pos, unsigned long len)
3898 {
3899 unsigned int first_byte = start + BIT_BYTE(pos);
3900 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3901 const bool same_byte = (first_byte == last_byte);
3902 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3903 u8 *kaddr;
3904
3905 if (same_byte)
3906 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3907
3908 /* Handle the first byte. */
3909 kaddr = extent_buffer_get_byte(eb, first_byte);
3910 *kaddr |= mask;
3911 if (same_byte)
3912 return;
3913
3914 /* Handle the byte aligned part. */
3915 ASSERT(first_byte + 1 <= last_byte);
3916 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
3917
3918 /* Handle the last byte. */
3919 kaddr = extent_buffer_get_byte(eb, last_byte);
3920 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
3921 }
3922
3923
3924 /*
3925 * Clear an area of a bitmap.
3926 *
3927 * @eb: the extent buffer
3928 * @start: offset of the bitmap item in the extent buffer
3929 * @pos: bit number of the first bit
3930 * @len: number of bits to clear
3931 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3932 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
3933 unsigned long start, unsigned long pos,
3934 unsigned long len)
3935 {
3936 unsigned int first_byte = start + BIT_BYTE(pos);
3937 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3938 const bool same_byte = (first_byte == last_byte);
3939 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3940 u8 *kaddr;
3941
3942 if (same_byte)
3943 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3944
3945 /* Handle the first byte. */
3946 kaddr = extent_buffer_get_byte(eb, first_byte);
3947 *kaddr &= ~mask;
3948 if (same_byte)
3949 return;
3950
3951 /* Handle the byte aligned part. */
3952 ASSERT(first_byte + 1 <= last_byte);
3953 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
3954
3955 /* Handle the last byte. */
3956 kaddr = extent_buffer_get_byte(eb, last_byte);
3957 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
3958 }
3959
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)3960 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3961 {
3962 unsigned long distance = (src > dst) ? src - dst : dst - src;
3963 return distance < len;
3964 }
3965
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3966 void memcpy_extent_buffer(const struct extent_buffer *dst,
3967 unsigned long dst_offset, unsigned long src_offset,
3968 unsigned long len)
3969 {
3970 const int unit_size = dst->folio_size;
3971 unsigned long cur_off = 0;
3972
3973 if (check_eb_range(dst, dst_offset, len) ||
3974 check_eb_range(dst, src_offset, len))
3975 return;
3976
3977 if (dst->addr) {
3978 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
3979
3980 if (use_memmove)
3981 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
3982 else
3983 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
3984 return;
3985 }
3986
3987 while (cur_off < len) {
3988 unsigned long cur_src = cur_off + src_offset;
3989 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
3990 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
3991 unsigned long cur_len = min(src_offset + len - cur_src,
3992 unit_size - folio_off);
3993 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
3994 const bool use_memmove = areas_overlap(src_offset + cur_off,
3995 dst_offset + cur_off, cur_len);
3996
3997 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
3998 use_memmove);
3999 cur_off += cur_len;
4000 }
4001 }
4002
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4003 void memmove_extent_buffer(const struct extent_buffer *dst,
4004 unsigned long dst_offset, unsigned long src_offset,
4005 unsigned long len)
4006 {
4007 unsigned long dst_end = dst_offset + len - 1;
4008 unsigned long src_end = src_offset + len - 1;
4009
4010 if (check_eb_range(dst, dst_offset, len) ||
4011 check_eb_range(dst, src_offset, len))
4012 return;
4013
4014 if (dst_offset < src_offset) {
4015 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4016 return;
4017 }
4018
4019 if (dst->addr) {
4020 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4021 return;
4022 }
4023
4024 while (len > 0) {
4025 unsigned long src_i;
4026 size_t cur;
4027 size_t dst_off_in_folio;
4028 size_t src_off_in_folio;
4029 void *src_addr;
4030 bool use_memmove;
4031
4032 src_i = get_eb_folio_index(dst, src_end);
4033
4034 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4035 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4036
4037 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4038 cur = min(cur, dst_off_in_folio + 1);
4039
4040 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4041 cur + 1;
4042 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4043 cur);
4044
4045 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4046 use_memmove);
4047
4048 dst_end -= cur;
4049 src_end -= cur;
4050 len -= cur;
4051 }
4052 }
4053
4054 #define GANG_LOOKUP_SIZE 16
get_next_extent_buffer(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 bytenr)4055 static struct extent_buffer *get_next_extent_buffer(
4056 const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4057 {
4058 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4059 struct extent_buffer *found = NULL;
4060 u64 folio_start = folio_pos(folio);
4061 u64 cur = folio_start;
4062
4063 ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4064 lockdep_assert_held(&fs_info->buffer_lock);
4065
4066 while (cur < folio_start + PAGE_SIZE) {
4067 int ret;
4068 int i;
4069
4070 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4071 (void **)gang, cur >> fs_info->sectorsize_bits,
4072 min_t(unsigned int, GANG_LOOKUP_SIZE,
4073 PAGE_SIZE / fs_info->nodesize));
4074 if (ret == 0)
4075 goto out;
4076 for (i = 0; i < ret; i++) {
4077 /* Already beyond page end */
4078 if (gang[i]->start >= folio_start + PAGE_SIZE)
4079 goto out;
4080 /* Found one */
4081 if (gang[i]->start >= bytenr) {
4082 found = gang[i];
4083 goto out;
4084 }
4085 }
4086 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4087 }
4088 out:
4089 return found;
4090 }
4091
try_release_subpage_extent_buffer(struct folio * folio)4092 static int try_release_subpage_extent_buffer(struct folio *folio)
4093 {
4094 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4095 u64 cur = folio_pos(folio);
4096 const u64 end = cur + PAGE_SIZE;
4097 int ret;
4098
4099 while (cur < end) {
4100 struct extent_buffer *eb = NULL;
4101
4102 /*
4103 * Unlike try_release_extent_buffer() which uses folio private
4104 * to grab buffer, for subpage case we rely on radix tree, thus
4105 * we need to ensure radix tree consistency.
4106 *
4107 * We also want an atomic snapshot of the radix tree, thus go
4108 * with spinlock rather than RCU.
4109 */
4110 spin_lock(&fs_info->buffer_lock);
4111 eb = get_next_extent_buffer(fs_info, folio, cur);
4112 if (!eb) {
4113 /* No more eb in the page range after or at cur */
4114 spin_unlock(&fs_info->buffer_lock);
4115 break;
4116 }
4117 cur = eb->start + eb->len;
4118
4119 /*
4120 * The same as try_release_extent_buffer(), to ensure the eb
4121 * won't disappear out from under us.
4122 */
4123 spin_lock(&eb->refs_lock);
4124 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4125 spin_unlock(&eb->refs_lock);
4126 spin_unlock(&fs_info->buffer_lock);
4127 break;
4128 }
4129 spin_unlock(&fs_info->buffer_lock);
4130
4131 /*
4132 * If tree ref isn't set then we know the ref on this eb is a
4133 * real ref, so just return, this eb will likely be freed soon
4134 * anyway.
4135 */
4136 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4137 spin_unlock(&eb->refs_lock);
4138 break;
4139 }
4140
4141 /*
4142 * Here we don't care about the return value, we will always
4143 * check the folio private at the end. And
4144 * release_extent_buffer() will release the refs_lock.
4145 */
4146 release_extent_buffer(eb);
4147 }
4148 /*
4149 * Finally to check if we have cleared folio private, as if we have
4150 * released all ebs in the page, the folio private should be cleared now.
4151 */
4152 spin_lock(&folio->mapping->i_private_lock);
4153 if (!folio_test_private(folio))
4154 ret = 1;
4155 else
4156 ret = 0;
4157 spin_unlock(&folio->mapping->i_private_lock);
4158 return ret;
4159
4160 }
4161
try_release_extent_buffer(struct folio * folio)4162 int try_release_extent_buffer(struct folio *folio)
4163 {
4164 struct extent_buffer *eb;
4165
4166 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4167 return try_release_subpage_extent_buffer(folio);
4168
4169 /*
4170 * We need to make sure nobody is changing folio private, as we rely on
4171 * folio private as the pointer to extent buffer.
4172 */
4173 spin_lock(&folio->mapping->i_private_lock);
4174 if (!folio_test_private(folio)) {
4175 spin_unlock(&folio->mapping->i_private_lock);
4176 return 1;
4177 }
4178
4179 eb = folio_get_private(folio);
4180 BUG_ON(!eb);
4181
4182 /*
4183 * This is a little awful but should be ok, we need to make sure that
4184 * the eb doesn't disappear out from under us while we're looking at
4185 * this page.
4186 */
4187 spin_lock(&eb->refs_lock);
4188 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4189 spin_unlock(&eb->refs_lock);
4190 spin_unlock(&folio->mapping->i_private_lock);
4191 return 0;
4192 }
4193 spin_unlock(&folio->mapping->i_private_lock);
4194
4195 /*
4196 * If tree ref isn't set then we know the ref on this eb is a real ref,
4197 * so just return, this page will likely be freed soon anyway.
4198 */
4199 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4200 spin_unlock(&eb->refs_lock);
4201 return 0;
4202 }
4203
4204 return release_extent_buffer(eb);
4205 }
4206
4207 /*
4208 * Attempt to readahead a child block.
4209 *
4210 * @fs_info: the fs_info
4211 * @bytenr: bytenr to read
4212 * @owner_root: objectid of the root that owns this eb
4213 * @gen: generation for the uptodate check, can be 0
4214 * @level: level for the eb
4215 *
4216 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4217 * normal uptodate check of the eb, without checking the generation. If we have
4218 * to read the block we will not block on anything.
4219 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4220 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4221 u64 bytenr, u64 owner_root, u64 gen, int level)
4222 {
4223 struct btrfs_tree_parent_check check = {
4224 .has_first_key = 0,
4225 .level = level,
4226 .transid = gen
4227 };
4228 struct extent_buffer *eb;
4229 int ret;
4230
4231 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4232 if (IS_ERR(eb))
4233 return;
4234
4235 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4236 free_extent_buffer(eb);
4237 return;
4238 }
4239
4240 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4241 if (ret < 0)
4242 free_extent_buffer_stale(eb);
4243 else
4244 free_extent_buffer(eb);
4245 }
4246
4247 /*
4248 * Readahead a node's child block.
4249 *
4250 * @node: parent node we're reading from
4251 * @slot: slot in the parent node for the child we want to read
4252 *
4253 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4254 * the slot in the node provided.
4255 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4256 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4257 {
4258 btrfs_readahead_tree_block(node->fs_info,
4259 btrfs_node_blockptr(node, slot),
4260 btrfs_header_owner(node),
4261 btrfs_node_ptr_generation(node, slot),
4262 btrfs_header_level(node) - 1);
4263 }
4264