1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/sched/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/blkdev.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include <linux/prefetch.h>
16 #include <linux/fsverity.h>
17 #include "extent_io.h"
18 #include "extent-io-tree.h"
19 #include "extent_map.h"
20 #include "ctree.h"
21 #include "btrfs_inode.h"
22 #include "bio.h"
23 #include "locking.h"
24 #include "backref.h"
25 #include "disk-io.h"
26 #include "subpage.h"
27 #include "zoned.h"
28 #include "block-group.h"
29 #include "compression.h"
30 #include "fs.h"
31 #include "accessors.h"
32 #include "file-item.h"
33 #include "file.h"
34 #include "dev-replace.h"
35 #include "super.h"
36 #include "transaction.h"
37
38 static struct kmem_cache *extent_buffer_cache;
39
40 #ifdef CONFIG_BTRFS_DEBUG
btrfs_leak_debug_add_eb(struct extent_buffer * eb)41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42 {
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49 }
50
btrfs_leak_debug_del_eb(struct extent_buffer * eb)51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52 {
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59 }
60
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62 {
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87 }
88 #else
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0)
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0)
91 #endif
92
93 /*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97 struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 enum btrfs_compression_type compress_type;
100 u32 len_to_oe_boundary;
101 blk_opf_t opf;
102 btrfs_bio_end_io_t end_io_func;
103 struct writeback_control *wbc;
104
105 /*
106 * The sectors of the page which are going to be submitted by
107 * extent_writepage_io().
108 * This is to avoid touching ranges covered by compression/inline.
109 */
110 unsigned long submit_bitmap;
111 };
112
submit_one_bio(struct btrfs_bio_ctrl * bio_ctrl)113 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
114 {
115 struct btrfs_bio *bbio = bio_ctrl->bbio;
116
117 if (!bbio)
118 return;
119
120 /* Caller should ensure the bio has at least some range added */
121 ASSERT(bbio->bio.bi_iter.bi_size);
122
123 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
124 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
125 btrfs_submit_compressed_read(bbio);
126 else
127 btrfs_submit_bbio(bbio, 0);
128
129 /* The bbio is owned by the end_io handler now */
130 bio_ctrl->bbio = NULL;
131 }
132
133 /*
134 * Submit or fail the current bio in the bio_ctrl structure.
135 */
submit_write_bio(struct btrfs_bio_ctrl * bio_ctrl,int ret)136 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
137 {
138 struct btrfs_bio *bbio = bio_ctrl->bbio;
139
140 if (!bbio)
141 return;
142
143 if (ret) {
144 ASSERT(ret < 0);
145 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
146 /* The bio is owned by the end_io handler now */
147 bio_ctrl->bbio = NULL;
148 } else {
149 submit_one_bio(bio_ctrl);
150 }
151 }
152
extent_buffer_init_cachep(void)153 int __init extent_buffer_init_cachep(void)
154 {
155 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
156 sizeof(struct extent_buffer), 0, 0,
157 NULL);
158 if (!extent_buffer_cache)
159 return -ENOMEM;
160
161 return 0;
162 }
163
extent_buffer_free_cachep(void)164 void __cold extent_buffer_free_cachep(void)
165 {
166 /*
167 * Make sure all delayed rcu free are flushed before we
168 * destroy caches.
169 */
170 rcu_barrier();
171 kmem_cache_destroy(extent_buffer_cache);
172 }
173
process_one_folio(struct btrfs_fs_info * fs_info,struct folio * folio,const struct folio * locked_folio,unsigned long page_ops,u64 start,u64 end)174 static void process_one_folio(struct btrfs_fs_info *fs_info,
175 struct folio *folio, const struct folio *locked_folio,
176 unsigned long page_ops, u64 start, u64 end)
177 {
178 u32 len;
179
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
181 len = end + 1 - start;
182
183 if (page_ops & PAGE_SET_ORDERED)
184 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
185 if (page_ops & PAGE_START_WRITEBACK) {
186 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
187 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
188 }
189 if (page_ops & PAGE_END_WRITEBACK)
190 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
191
192 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
193 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
194 }
195
__process_folios_contig(struct address_space * mapping,const struct folio * locked_folio,u64 start,u64 end,unsigned long page_ops)196 static void __process_folios_contig(struct address_space *mapping,
197 const struct folio *locked_folio, u64 start,
198 u64 end, unsigned long page_ops)
199 {
200 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
201 pgoff_t start_index = start >> PAGE_SHIFT;
202 pgoff_t end_index = end >> PAGE_SHIFT;
203 pgoff_t index = start_index;
204 struct folio_batch fbatch;
205 int i;
206
207 folio_batch_init(&fbatch);
208 while (index <= end_index) {
209 int found_folios;
210
211 found_folios = filemap_get_folios_contig(mapping, &index,
212 end_index, &fbatch);
213 for (i = 0; i < found_folios; i++) {
214 struct folio *folio = fbatch.folios[i];
215
216 process_one_folio(fs_info, folio, locked_folio,
217 page_ops, start, end);
218 }
219 folio_batch_release(&fbatch);
220 cond_resched();
221 }
222 }
223
__unlock_for_delalloc(const struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)224 static noinline void __unlock_for_delalloc(const struct inode *inode,
225 const struct folio *locked_folio,
226 u64 start, u64 end)
227 {
228 unsigned long index = start >> PAGE_SHIFT;
229 unsigned long end_index = end >> PAGE_SHIFT;
230
231 ASSERT(locked_folio);
232 if (index == locked_folio->index && end_index == index)
233 return;
234
235 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
236 PAGE_UNLOCK);
237 }
238
lock_delalloc_folios(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end)239 static noinline int lock_delalloc_folios(struct inode *inode,
240 const struct folio *locked_folio,
241 u64 start, u64 end)
242 {
243 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
244 struct address_space *mapping = inode->i_mapping;
245 pgoff_t start_index = start >> PAGE_SHIFT;
246 pgoff_t end_index = end >> PAGE_SHIFT;
247 pgoff_t index = start_index;
248 u64 processed_end = start;
249 struct folio_batch fbatch;
250
251 if (index == locked_folio->index && index == end_index)
252 return 0;
253
254 folio_batch_init(&fbatch);
255 while (index <= end_index) {
256 unsigned int found_folios, i;
257
258 found_folios = filemap_get_folios_contig(mapping, &index,
259 end_index, &fbatch);
260 if (found_folios == 0)
261 goto out;
262
263 for (i = 0; i < found_folios; i++) {
264 struct folio *folio = fbatch.folios[i];
265 u64 range_start;
266 u32 range_len;
267
268 if (folio == locked_folio)
269 continue;
270
271 folio_lock(folio);
272 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
273 folio_unlock(folio);
274 goto out;
275 }
276 range_start = max_t(u64, folio_pos(folio), start);
277 range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
278 end + 1) - range_start;
279 btrfs_folio_set_writer_lock(fs_info, folio, range_start, range_len);
280
281 processed_end = range_start + range_len - 1;
282 }
283 folio_batch_release(&fbatch);
284 cond_resched();
285 }
286
287 return 0;
288 out:
289 folio_batch_release(&fbatch);
290 if (processed_end > start)
291 __unlock_for_delalloc(inode, locked_folio, start,
292 processed_end);
293 return -EAGAIN;
294 }
295
296 /*
297 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
298 * more than @max_bytes.
299 *
300 * @start: The original start bytenr to search.
301 * Will store the extent range start bytenr.
302 * @end: The original end bytenr of the search range
303 * Will store the extent range end bytenr.
304 *
305 * Return true if we find a delalloc range which starts inside the original
306 * range, and @start/@end will store the delalloc range start/end.
307 *
308 * Return false if we can't find any delalloc range which starts inside the
309 * original range, and @start/@end will be the non-delalloc range start/end.
310 */
311 EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct folio * locked_folio,u64 * start,u64 * end)312 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
313 struct folio *locked_folio,
314 u64 *start, u64 *end)
315 {
316 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
317 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
318 const u64 orig_start = *start;
319 const u64 orig_end = *end;
320 /* The sanity tests may not set a valid fs_info. */
321 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
322 u64 delalloc_start;
323 u64 delalloc_end;
324 bool found;
325 struct extent_state *cached_state = NULL;
326 int ret;
327 int loops = 0;
328
329 /* Caller should pass a valid @end to indicate the search range end */
330 ASSERT(orig_end > orig_start);
331
332 /* The range should at least cover part of the folio */
333 ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
334 orig_end <= folio_pos(locked_folio)));
335 again:
336 /* step one, find a bunch of delalloc bytes starting at start */
337 delalloc_start = *start;
338 delalloc_end = 0;
339 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
340 max_bytes, &cached_state);
341 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
342 *start = delalloc_start;
343
344 /* @delalloc_end can be -1, never go beyond @orig_end */
345 *end = min(delalloc_end, orig_end);
346 free_extent_state(cached_state);
347 return false;
348 }
349
350 /*
351 * start comes from the offset of locked_folio. We have to lock
352 * folios in order, so we can't process delalloc bytes before
353 * locked_folio
354 */
355 if (delalloc_start < *start)
356 delalloc_start = *start;
357
358 /*
359 * make sure to limit the number of folios we try to lock down
360 */
361 if (delalloc_end + 1 - delalloc_start > max_bytes)
362 delalloc_end = delalloc_start + max_bytes - 1;
363
364 /* step two, lock all the folioss after the folios that has start */
365 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
366 delalloc_end);
367 ASSERT(!ret || ret == -EAGAIN);
368 if (ret == -EAGAIN) {
369 /* some of the folios are gone, lets avoid looping by
370 * shortening the size of the delalloc range we're searching
371 */
372 free_extent_state(cached_state);
373 cached_state = NULL;
374 if (!loops) {
375 max_bytes = PAGE_SIZE;
376 loops = 1;
377 goto again;
378 } else {
379 found = false;
380 goto out_failed;
381 }
382 }
383
384 /* step three, lock the state bits for the whole range */
385 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
386
387 /* then test to make sure it is all still delalloc */
388 ret = test_range_bit(tree, delalloc_start, delalloc_end,
389 EXTENT_DELALLOC, cached_state);
390
391 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
392 if (!ret) {
393 __unlock_for_delalloc(inode, locked_folio, delalloc_start,
394 delalloc_end);
395 cond_resched();
396 goto again;
397 }
398 *start = delalloc_start;
399 *end = delalloc_end;
400 out_failed:
401 return found;
402 }
403
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,const struct folio * locked_folio,struct extent_state ** cached,u32 clear_bits,unsigned long page_ops)404 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
405 const struct folio *locked_folio,
406 struct extent_state **cached,
407 u32 clear_bits, unsigned long page_ops)
408 {
409 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
410
411 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
412 end, page_ops);
413 }
414
btrfs_verify_folio(struct folio * folio,u64 start,u32 len)415 static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
416 {
417 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
418
419 if (!fsverity_active(folio->mapping->host) ||
420 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
421 start >= i_size_read(folio->mapping->host))
422 return true;
423 return fsverity_verify_folio(folio);
424 }
425
end_folio_read(struct folio * folio,bool uptodate,u64 start,u32 len)426 static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
427 {
428 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
429
430 ASSERT(folio_pos(folio) <= start &&
431 start + len <= folio_pos(folio) + PAGE_SIZE);
432
433 if (uptodate && btrfs_verify_folio(folio, start, len))
434 btrfs_folio_set_uptodate(fs_info, folio, start, len);
435 else
436 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
437
438 if (!btrfs_is_subpage(fs_info, folio->mapping))
439 folio_unlock(folio);
440 else
441 btrfs_subpage_end_reader(fs_info, folio, start, len);
442 }
443
444 /*
445 * After a write IO is done, we need to:
446 *
447 * - clear the uptodate bits on error
448 * - clear the writeback bits in the extent tree for the range
449 * - filio_end_writeback() if there is no more pending io for the folio
450 *
451 * Scheduling is not allowed, so the extent state tree is expected
452 * to have one and only one object corresponding to this IO.
453 */
end_bbio_data_write(struct btrfs_bio * bbio)454 static void end_bbio_data_write(struct btrfs_bio *bbio)
455 {
456 struct btrfs_fs_info *fs_info = bbio->fs_info;
457 struct bio *bio = &bbio->bio;
458 int error = blk_status_to_errno(bio->bi_status);
459 struct folio_iter fi;
460 const u32 sectorsize = fs_info->sectorsize;
461
462 ASSERT(!bio_flagged(bio, BIO_CLONED));
463 bio_for_each_folio_all(fi, bio) {
464 struct folio *folio = fi.folio;
465 u64 start = folio_pos(folio) + fi.offset;
466 u32 len = fi.length;
467
468 /* Only order 0 (single page) folios are allowed for data. */
469 ASSERT(folio_order(folio) == 0);
470
471 /* Our read/write should always be sector aligned. */
472 if (!IS_ALIGNED(fi.offset, sectorsize))
473 btrfs_err(fs_info,
474 "partial page write in btrfs with offset %zu and length %zu",
475 fi.offset, fi.length);
476 else if (!IS_ALIGNED(fi.length, sectorsize))
477 btrfs_info(fs_info,
478 "incomplete page write with offset %zu and length %zu",
479 fi.offset, fi.length);
480
481 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
482 !error);
483 if (error)
484 mapping_set_error(folio->mapping, error);
485 btrfs_folio_clear_writeback(fs_info, folio, start, len);
486 }
487
488 bio_put(bio);
489 }
490
begin_folio_read(struct btrfs_fs_info * fs_info,struct folio * folio)491 static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
492 {
493 ASSERT(folio_test_locked(folio));
494 if (!btrfs_is_subpage(fs_info, folio->mapping))
495 return;
496
497 ASSERT(folio_test_private(folio));
498 btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
499 }
500
501 /*
502 * After a data read IO is done, we need to:
503 *
504 * - clear the uptodate bits on error
505 * - set the uptodate bits if things worked
506 * - set the folio up to date if all extents in the tree are uptodate
507 * - clear the lock bit in the extent tree
508 * - unlock the folio if there are no other extents locked for it
509 *
510 * Scheduling is not allowed, so the extent state tree is expected
511 * to have one and only one object corresponding to this IO.
512 */
end_bbio_data_read(struct btrfs_bio * bbio)513 static void end_bbio_data_read(struct btrfs_bio *bbio)
514 {
515 struct btrfs_fs_info *fs_info = bbio->fs_info;
516 struct bio *bio = &bbio->bio;
517 struct folio_iter fi;
518 const u32 sectorsize = fs_info->sectorsize;
519
520 ASSERT(!bio_flagged(bio, BIO_CLONED));
521 bio_for_each_folio_all(fi, &bbio->bio) {
522 bool uptodate = !bio->bi_status;
523 struct folio *folio = fi.folio;
524 struct inode *inode = folio->mapping->host;
525 u64 start;
526 u64 end;
527 u32 len;
528
529 /* For now only order 0 folios are supported for data. */
530 ASSERT(folio_order(folio) == 0);
531 btrfs_debug(fs_info,
532 "%s: bi_sector=%llu, err=%d, mirror=%u",
533 __func__, bio->bi_iter.bi_sector, bio->bi_status,
534 bbio->mirror_num);
535
536 /*
537 * We always issue full-sector reads, but if some block in a
538 * folio fails to read, blk_update_request() will advance
539 * bv_offset and adjust bv_len to compensate. Print a warning
540 * for unaligned offsets, and an error if they don't add up to
541 * a full sector.
542 */
543 if (!IS_ALIGNED(fi.offset, sectorsize))
544 btrfs_err(fs_info,
545 "partial page read in btrfs with offset %zu and length %zu",
546 fi.offset, fi.length);
547 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
548 btrfs_info(fs_info,
549 "incomplete page read with offset %zu and length %zu",
550 fi.offset, fi.length);
551
552 start = folio_pos(folio) + fi.offset;
553 end = start + fi.length - 1;
554 len = fi.length;
555
556 if (likely(uptodate)) {
557 loff_t i_size = i_size_read(inode);
558 pgoff_t end_index = i_size >> folio_shift(folio);
559
560 /*
561 * Zero out the remaining part if this range straddles
562 * i_size.
563 *
564 * Here we should only zero the range inside the folio,
565 * not touch anything else.
566 *
567 * NOTE: i_size is exclusive while end is inclusive.
568 */
569 if (folio_index(folio) == end_index && i_size <= end) {
570 u32 zero_start = max(offset_in_folio(folio, i_size),
571 offset_in_folio(folio, start));
572 u32 zero_len = offset_in_folio(folio, end) + 1 -
573 zero_start;
574
575 folio_zero_range(folio, zero_start, zero_len);
576 }
577 }
578
579 /* Update page status and unlock. */
580 end_folio_read(folio, uptodate, start, len);
581 }
582 bio_put(bio);
583 }
584
585 /*
586 * Populate every free slot in a provided array with folios using GFP_NOFS.
587 *
588 * @nr_folios: number of folios to allocate
589 * @folio_array: the array to fill with folios; any existing non-NULL entries in
590 * the array will be skipped
591 *
592 * Return: 0 if all folios were able to be allocated;
593 * -ENOMEM otherwise, the partially allocated folios would be freed and
594 * the array slots zeroed
595 */
btrfs_alloc_folio_array(unsigned int nr_folios,struct folio ** folio_array)596 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
597 {
598 for (int i = 0; i < nr_folios; i++) {
599 if (folio_array[i])
600 continue;
601 folio_array[i] = folio_alloc(GFP_NOFS, 0);
602 if (!folio_array[i])
603 goto error;
604 }
605 return 0;
606 error:
607 for (int i = 0; i < nr_folios; i++) {
608 if (folio_array[i])
609 folio_put(folio_array[i]);
610 }
611 return -ENOMEM;
612 }
613
614 /*
615 * Populate every free slot in a provided array with pages, using GFP_NOFS.
616 *
617 * @nr_pages: number of pages to allocate
618 * @page_array: the array to fill with pages; any existing non-null entries in
619 * the array will be skipped
620 * @nofail: whether using __GFP_NOFAIL flag
621 *
622 * Return: 0 if all pages were able to be allocated;
623 * -ENOMEM otherwise, the partially allocated pages would be freed and
624 * the array slots zeroed
625 */
btrfs_alloc_page_array(unsigned int nr_pages,struct page ** page_array,bool nofail)626 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
627 bool nofail)
628 {
629 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
630 unsigned int allocated;
631
632 for (allocated = 0; allocated < nr_pages;) {
633 unsigned int last = allocated;
634
635 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
636 if (unlikely(allocated == last)) {
637 /* No progress, fail and do cleanup. */
638 for (int i = 0; i < allocated; i++) {
639 __free_page(page_array[i]);
640 page_array[i] = NULL;
641 }
642 return -ENOMEM;
643 }
644 }
645 return 0;
646 }
647
648 /*
649 * Populate needed folios for the extent buffer.
650 *
651 * For now, the folios populated are always in order 0 (aka, single page).
652 */
alloc_eb_folio_array(struct extent_buffer * eb,bool nofail)653 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
654 {
655 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
656 int num_pages = num_extent_pages(eb);
657 int ret;
658
659 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
660 if (ret < 0)
661 return ret;
662
663 for (int i = 0; i < num_pages; i++)
664 eb->folios[i] = page_folio(page_array[i]);
665 eb->folio_size = PAGE_SIZE;
666 eb->folio_shift = PAGE_SHIFT;
667 return 0;
668 }
669
btrfs_bio_is_contig(struct btrfs_bio_ctrl * bio_ctrl,struct folio * folio,u64 disk_bytenr,unsigned int pg_offset)670 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
671 struct folio *folio, u64 disk_bytenr,
672 unsigned int pg_offset)
673 {
674 struct bio *bio = &bio_ctrl->bbio->bio;
675 struct bio_vec *bvec = bio_last_bvec_all(bio);
676 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
677 struct folio *bv_folio = page_folio(bvec->bv_page);
678
679 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
680 /*
681 * For compression, all IO should have its logical bytenr set
682 * to the starting bytenr of the compressed extent.
683 */
684 return bio->bi_iter.bi_sector == sector;
685 }
686
687 /*
688 * The contig check requires the following conditions to be met:
689 *
690 * 1) The folios are belonging to the same inode
691 * This is implied by the call chain.
692 *
693 * 2) The range has adjacent logical bytenr
694 *
695 * 3) The range has adjacent file offset
696 * This is required for the usage of btrfs_bio->file_offset.
697 */
698 return bio_end_sector(bio) == sector &&
699 folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
700 folio_pos(folio) + pg_offset;
701 }
702
alloc_new_bio(struct btrfs_inode * inode,struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,u64 file_offset)703 static void alloc_new_bio(struct btrfs_inode *inode,
704 struct btrfs_bio_ctrl *bio_ctrl,
705 u64 disk_bytenr, u64 file_offset)
706 {
707 struct btrfs_fs_info *fs_info = inode->root->fs_info;
708 struct btrfs_bio *bbio;
709
710 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
711 bio_ctrl->end_io_func, NULL);
712 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
713 bbio->inode = inode;
714 bbio->file_offset = file_offset;
715 bio_ctrl->bbio = bbio;
716 bio_ctrl->len_to_oe_boundary = U32_MAX;
717
718 /* Limit data write bios to the ordered boundary. */
719 if (bio_ctrl->wbc) {
720 struct btrfs_ordered_extent *ordered;
721
722 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
723 if (ordered) {
724 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
725 ordered->file_offset +
726 ordered->disk_num_bytes - file_offset);
727 bbio->ordered = ordered;
728 }
729
730 /*
731 * Pick the last added device to support cgroup writeback. For
732 * multi-device file systems this means blk-cgroup policies have
733 * to always be set on the last added/replaced device.
734 * This is a bit odd but has been like that for a long time.
735 */
736 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
737 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
738 }
739 }
740
741 /*
742 * @disk_bytenr: logical bytenr where the write will be
743 * @page: page to add to the bio
744 * @size: portion of page that we want to write to
745 * @pg_offset: offset of the new bio or to check whether we are adding
746 * a contiguous page to the previous one
747 *
748 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
749 * new one in @bio_ctrl->bbio.
750 * The mirror number for this IO should already be initizlied in
751 * @bio_ctrl->mirror_num.
752 */
submit_extent_folio(struct btrfs_bio_ctrl * bio_ctrl,u64 disk_bytenr,struct folio * folio,size_t size,unsigned long pg_offset)753 static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
754 u64 disk_bytenr, struct folio *folio,
755 size_t size, unsigned long pg_offset)
756 {
757 struct btrfs_inode *inode = folio_to_inode(folio);
758
759 ASSERT(pg_offset + size <= PAGE_SIZE);
760 ASSERT(bio_ctrl->end_io_func);
761
762 if (bio_ctrl->bbio &&
763 !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
764 submit_one_bio(bio_ctrl);
765
766 do {
767 u32 len = size;
768
769 /* Allocate new bio if needed */
770 if (!bio_ctrl->bbio) {
771 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
772 folio_pos(folio) + pg_offset);
773 }
774
775 /* Cap to the current ordered extent boundary if there is one. */
776 if (len > bio_ctrl->len_to_oe_boundary) {
777 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
778 ASSERT(is_data_inode(inode));
779 len = bio_ctrl->len_to_oe_boundary;
780 }
781
782 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
783 /* bio full: move on to a new one */
784 submit_one_bio(bio_ctrl);
785 continue;
786 }
787
788 if (bio_ctrl->wbc)
789 wbc_account_cgroup_owner(bio_ctrl->wbc, &folio->page,
790 len);
791
792 size -= len;
793 pg_offset += len;
794 disk_bytenr += len;
795
796 /*
797 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
798 * sector aligned. alloc_new_bio() then sets it to the end of
799 * our ordered extent for writes into zoned devices.
800 *
801 * When len_to_oe_boundary is tracking an ordered extent, we
802 * trust the ordered extent code to align things properly, and
803 * the check above to cap our write to the ordered extent
804 * boundary is correct.
805 *
806 * When len_to_oe_boundary is U32_MAX, the cap above would
807 * result in a 4095 byte IO for the last folio right before
808 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
809 * the checks required to make sure we don't overflow the bio,
810 * and we should just ignore len_to_oe_boundary completely
811 * unless we're using it to track an ordered extent.
812 *
813 * It's pretty hard to make a bio sized U32_MAX, but it can
814 * happen when the page cache is able to feed us contiguous
815 * folios for large extents.
816 */
817 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
818 bio_ctrl->len_to_oe_boundary -= len;
819
820 /* Ordered extent boundary: move on to a new bio. */
821 if (bio_ctrl->len_to_oe_boundary == 0)
822 submit_one_bio(bio_ctrl);
823 } while (size);
824 }
825
attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_subpage * prealloc)826 static int attach_extent_buffer_folio(struct extent_buffer *eb,
827 struct folio *folio,
828 struct btrfs_subpage *prealloc)
829 {
830 struct btrfs_fs_info *fs_info = eb->fs_info;
831 int ret = 0;
832
833 /*
834 * If the page is mapped to btree inode, we should hold the private
835 * lock to prevent race.
836 * For cloned or dummy extent buffers, their pages are not mapped and
837 * will not race with any other ebs.
838 */
839 if (folio->mapping)
840 lockdep_assert_held(&folio->mapping->i_private_lock);
841
842 if (fs_info->nodesize >= PAGE_SIZE) {
843 if (!folio_test_private(folio))
844 folio_attach_private(folio, eb);
845 else
846 WARN_ON(folio_get_private(folio) != eb);
847 return 0;
848 }
849
850 /* Already mapped, just free prealloc */
851 if (folio_test_private(folio)) {
852 btrfs_free_subpage(prealloc);
853 return 0;
854 }
855
856 if (prealloc)
857 /* Has preallocated memory for subpage */
858 folio_attach_private(folio, prealloc);
859 else
860 /* Do new allocation to attach subpage */
861 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
862 return ret;
863 }
864
set_page_extent_mapped(struct page * page)865 int set_page_extent_mapped(struct page *page)
866 {
867 return set_folio_extent_mapped(page_folio(page));
868 }
869
set_folio_extent_mapped(struct folio * folio)870 int set_folio_extent_mapped(struct folio *folio)
871 {
872 struct btrfs_fs_info *fs_info;
873
874 ASSERT(folio->mapping);
875
876 if (folio_test_private(folio))
877 return 0;
878
879 fs_info = folio_to_fs_info(folio);
880
881 if (btrfs_is_subpage(fs_info, folio->mapping))
882 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
883
884 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
885 return 0;
886 }
887
clear_folio_extent_mapped(struct folio * folio)888 void clear_folio_extent_mapped(struct folio *folio)
889 {
890 struct btrfs_fs_info *fs_info;
891
892 ASSERT(folio->mapping);
893
894 if (!folio_test_private(folio))
895 return;
896
897 fs_info = folio_to_fs_info(folio);
898 if (btrfs_is_subpage(fs_info, folio->mapping))
899 return btrfs_detach_subpage(fs_info, folio);
900
901 folio_detach_private(folio);
902 }
903
__get_extent_map(struct inode * inode,struct folio * folio,u64 start,u64 len,struct extent_map ** em_cached)904 static struct extent_map *__get_extent_map(struct inode *inode,
905 struct folio *folio, u64 start,
906 u64 len, struct extent_map **em_cached)
907 {
908 struct extent_map *em;
909 struct extent_state *cached_state = NULL;
910
911 ASSERT(em_cached);
912
913 if (*em_cached) {
914 em = *em_cached;
915 if (extent_map_in_tree(em) && start >= em->start &&
916 start < extent_map_end(em)) {
917 refcount_inc(&em->refs);
918 return em;
919 }
920
921 free_extent_map(em);
922 *em_cached = NULL;
923 }
924
925 btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
926 em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
927 if (!IS_ERR(em)) {
928 BUG_ON(*em_cached);
929 refcount_inc(&em->refs);
930 *em_cached = em;
931 }
932 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
933
934 return em;
935 }
936 /*
937 * basic readpage implementation. Locked extent state structs are inserted
938 * into the tree that are removed when the IO is done (by the end_io
939 * handlers)
940 * XXX JDM: This needs looking at to ensure proper page locking
941 * return 0 on success, otherwise return error
942 */
btrfs_do_readpage(struct folio * folio,struct extent_map ** em_cached,struct btrfs_bio_ctrl * bio_ctrl,u64 * prev_em_start)943 static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
944 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
945 {
946 struct inode *inode = folio->mapping->host;
947 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
948 u64 start = folio_pos(folio);
949 const u64 end = start + PAGE_SIZE - 1;
950 u64 cur = start;
951 u64 extent_offset;
952 u64 last_byte = i_size_read(inode);
953 u64 block_start;
954 struct extent_map *em;
955 int ret = 0;
956 size_t pg_offset = 0;
957 size_t iosize;
958 size_t blocksize = fs_info->sectorsize;
959
960 ret = set_folio_extent_mapped(folio);
961 if (ret < 0) {
962 folio_unlock(folio);
963 return ret;
964 }
965
966 if (folio->index == last_byte >> folio_shift(folio)) {
967 size_t zero_offset = offset_in_folio(folio, last_byte);
968
969 if (zero_offset) {
970 iosize = folio_size(folio) - zero_offset;
971 folio_zero_range(folio, zero_offset, iosize);
972 }
973 }
974 bio_ctrl->end_io_func = end_bbio_data_read;
975 begin_folio_read(fs_info, folio);
976 while (cur <= end) {
977 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
978 bool force_bio_submit = false;
979 u64 disk_bytenr;
980
981 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
982 if (cur >= last_byte) {
983 iosize = folio_size(folio) - pg_offset;
984 folio_zero_range(folio, pg_offset, iosize);
985 end_folio_read(folio, true, cur, iosize);
986 break;
987 }
988 em = __get_extent_map(inode, folio, cur, end - cur + 1,
989 em_cached);
990 if (IS_ERR(em)) {
991 end_folio_read(folio, false, cur, end + 1 - cur);
992 return PTR_ERR(em);
993 }
994 extent_offset = cur - em->start;
995 BUG_ON(extent_map_end(em) <= cur);
996 BUG_ON(end < cur);
997
998 compress_type = extent_map_compression(em);
999
1000 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1001 iosize = ALIGN(iosize, blocksize);
1002 if (compress_type != BTRFS_COMPRESS_NONE)
1003 disk_bytenr = em->disk_bytenr;
1004 else
1005 disk_bytenr = extent_map_block_start(em) + extent_offset;
1006 block_start = extent_map_block_start(em);
1007 if (em->flags & EXTENT_FLAG_PREALLOC)
1008 block_start = EXTENT_MAP_HOLE;
1009
1010 /*
1011 * If we have a file range that points to a compressed extent
1012 * and it's followed by a consecutive file range that points
1013 * to the same compressed extent (possibly with a different
1014 * offset and/or length, so it either points to the whole extent
1015 * or only part of it), we must make sure we do not submit a
1016 * single bio to populate the folios for the 2 ranges because
1017 * this makes the compressed extent read zero out the folios
1018 * belonging to the 2nd range. Imagine the following scenario:
1019 *
1020 * File layout
1021 * [0 - 8K] [8K - 24K]
1022 * | |
1023 * | |
1024 * points to extent X, points to extent X,
1025 * offset 4K, length of 8K offset 0, length 16K
1026 *
1027 * [extent X, compressed length = 4K uncompressed length = 16K]
1028 *
1029 * If the bio to read the compressed extent covers both ranges,
1030 * it will decompress extent X into the folios belonging to the
1031 * first range and then it will stop, zeroing out the remaining
1032 * folios that belong to the other range that points to extent X.
1033 * So here we make sure we submit 2 bios, one for the first
1034 * range and another one for the third range. Both will target
1035 * the same physical extent from disk, but we can't currently
1036 * make the compressed bio endio callback populate the folios
1037 * for both ranges because each compressed bio is tightly
1038 * coupled with a single extent map, and each range can have
1039 * an extent map with a different offset value relative to the
1040 * uncompressed data of our extent and different lengths. This
1041 * is a corner case so we prioritize correctness over
1042 * non-optimal behavior (submitting 2 bios for the same extent).
1043 */
1044 if (compress_type != BTRFS_COMPRESS_NONE &&
1045 prev_em_start && *prev_em_start != (u64)-1 &&
1046 *prev_em_start != em->start)
1047 force_bio_submit = true;
1048
1049 if (prev_em_start)
1050 *prev_em_start = em->start;
1051
1052 free_extent_map(em);
1053 em = NULL;
1054
1055 /* we've found a hole, just zero and go on */
1056 if (block_start == EXTENT_MAP_HOLE) {
1057 folio_zero_range(folio, pg_offset, iosize);
1058
1059 end_folio_read(folio, true, cur, iosize);
1060 cur = cur + iosize;
1061 pg_offset += iosize;
1062 continue;
1063 }
1064 /* the get_extent function already copied into the folio */
1065 if (block_start == EXTENT_MAP_INLINE) {
1066 end_folio_read(folio, true, cur, iosize);
1067 cur = cur + iosize;
1068 pg_offset += iosize;
1069 continue;
1070 }
1071
1072 if (bio_ctrl->compress_type != compress_type) {
1073 submit_one_bio(bio_ctrl);
1074 bio_ctrl->compress_type = compress_type;
1075 }
1076
1077 if (force_bio_submit)
1078 submit_one_bio(bio_ctrl);
1079 submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1080 pg_offset);
1081 cur = cur + iosize;
1082 pg_offset += iosize;
1083 }
1084
1085 return 0;
1086 }
1087
btrfs_read_folio(struct file * file,struct folio * folio)1088 int btrfs_read_folio(struct file *file, struct folio *folio)
1089 {
1090 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1091 struct extent_map *em_cached = NULL;
1092 int ret;
1093
1094 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1095 free_extent_map(em_cached);
1096
1097 /*
1098 * If btrfs_do_readpage() failed we will want to submit the assembled
1099 * bio to do the cleanup.
1100 */
1101 submit_one_bio(&bio_ctrl);
1102 return ret;
1103 }
1104
1105 /*
1106 * helper for extent_writepage(), doing all of the delayed allocation setup.
1107 *
1108 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1109 * to write the page (copy into inline extent). In this case the IO has
1110 * been started and the page is already unlocked.
1111 *
1112 * This returns 0 if all went well (page still locked)
1113 * This returns < 0 if there were errors (page still locked)
1114 */
writepage_delalloc(struct btrfs_inode * inode,struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1115 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1116 struct folio *folio,
1117 struct btrfs_bio_ctrl *bio_ctrl)
1118 {
1119 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1120 struct writeback_control *wbc = bio_ctrl->wbc;
1121 const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1122 const u64 page_start = folio_pos(folio);
1123 const u64 page_end = page_start + folio_size(folio) - 1;
1124 /*
1125 * Save the last found delalloc end. As the delalloc end can go beyond
1126 * page boundary, thus we cannot rely on subpage bitmap to locate the
1127 * last delalloc end.
1128 */
1129 u64 last_delalloc_end = 0;
1130 u64 delalloc_start = page_start;
1131 u64 delalloc_end = page_end;
1132 u64 delalloc_to_write = 0;
1133 int ret = 0;
1134
1135 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1136 if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1137 ASSERT(fs_info->sectors_per_page > 1);
1138 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1139 } else {
1140 bio_ctrl->submit_bitmap = 1;
1141 }
1142
1143 /* Lock all (subpage) delalloc ranges inside the folio first. */
1144 while (delalloc_start < page_end) {
1145 delalloc_end = page_end;
1146 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1147 &delalloc_start, &delalloc_end)) {
1148 delalloc_start = delalloc_end + 1;
1149 continue;
1150 }
1151 btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
1152 min(delalloc_end, page_end) + 1 -
1153 delalloc_start);
1154 last_delalloc_end = delalloc_end;
1155 delalloc_start = delalloc_end + 1;
1156 }
1157 delalloc_start = page_start;
1158
1159 if (!last_delalloc_end)
1160 goto out;
1161
1162 /* Run the delalloc ranges for the above locked ranges. */
1163 while (delalloc_start < page_end) {
1164 u64 found_start;
1165 u32 found_len;
1166 bool found;
1167
1168 if (!is_subpage) {
1169 /*
1170 * For non-subpage case, the found delalloc range must
1171 * cover this folio and there must be only one locked
1172 * delalloc range.
1173 */
1174 found_start = page_start;
1175 found_len = last_delalloc_end + 1 - found_start;
1176 found = true;
1177 } else {
1178 found = btrfs_subpage_find_writer_locked(fs_info, folio,
1179 delalloc_start, &found_start, &found_len);
1180 }
1181 if (!found)
1182 break;
1183 /*
1184 * The subpage range covers the last sector, the delalloc range may
1185 * end beyond the folio boundary, use the saved delalloc_end
1186 * instead.
1187 */
1188 if (found_start + found_len >= page_end)
1189 found_len = last_delalloc_end + 1 - found_start;
1190
1191 if (ret >= 0) {
1192 /* No errors hit so far, run the current delalloc range. */
1193 ret = btrfs_run_delalloc_range(inode, folio,
1194 found_start,
1195 found_start + found_len - 1,
1196 wbc);
1197 } else {
1198 /*
1199 * We've hit an error during previous delalloc range,
1200 * have to cleanup the remaining locked ranges.
1201 */
1202 unlock_extent(&inode->io_tree, found_start,
1203 found_start + found_len - 1, NULL);
1204 __unlock_for_delalloc(&inode->vfs_inode, folio,
1205 found_start,
1206 found_start + found_len - 1);
1207 }
1208
1209 /*
1210 * We have some ranges that's going to be submitted asynchronously
1211 * (compression or inline). These range have their own control
1212 * on when to unlock the pages. We should not touch them
1213 * anymore, so clear the range from the submission bitmap.
1214 */
1215 if (ret > 0) {
1216 unsigned int start_bit = (found_start - page_start) >>
1217 fs_info->sectorsize_bits;
1218 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1219 page_start) >> fs_info->sectorsize_bits;
1220 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1221 }
1222 /*
1223 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1224 * thus for the last range, we cannot touch the folio anymore.
1225 */
1226 if (found_start + found_len >= last_delalloc_end + 1)
1227 break;
1228
1229 delalloc_start = found_start + found_len;
1230 }
1231 if (ret < 0)
1232 return ret;
1233 out:
1234 if (last_delalloc_end)
1235 delalloc_end = last_delalloc_end;
1236 else
1237 delalloc_end = page_end;
1238 /*
1239 * delalloc_end is already one less than the total length, so
1240 * we don't subtract one from PAGE_SIZE
1241 */
1242 delalloc_to_write +=
1243 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1244
1245 /*
1246 * If all ranges are submitted asynchronously, we just need to account
1247 * for them here.
1248 */
1249 if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1250 wbc->nr_to_write -= delalloc_to_write;
1251 return 1;
1252 }
1253
1254 if (wbc->nr_to_write < delalloc_to_write) {
1255 int thresh = 8192;
1256
1257 if (delalloc_to_write < thresh * 2)
1258 thresh = delalloc_to_write;
1259 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1260 thresh);
1261 }
1262
1263 return 0;
1264 }
1265
1266 /*
1267 * Return 0 if we have submitted or queued the sector for submission.
1268 * Return <0 for critical errors.
1269 *
1270 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1271 */
submit_one_sector(struct btrfs_inode * inode,struct folio * folio,u64 filepos,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1272 static int submit_one_sector(struct btrfs_inode *inode,
1273 struct folio *folio,
1274 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1275 loff_t i_size)
1276 {
1277 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1278 struct extent_map *em;
1279 u64 block_start;
1280 u64 disk_bytenr;
1281 u64 extent_offset;
1282 u64 em_end;
1283 const u32 sectorsize = fs_info->sectorsize;
1284
1285 ASSERT(IS_ALIGNED(filepos, sectorsize));
1286
1287 /* @filepos >= i_size case should be handled by the caller. */
1288 ASSERT(filepos < i_size);
1289
1290 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1291 if (IS_ERR(em))
1292 return PTR_ERR_OR_ZERO(em);
1293
1294 extent_offset = filepos - em->start;
1295 em_end = extent_map_end(em);
1296 ASSERT(filepos <= em_end);
1297 ASSERT(IS_ALIGNED(em->start, sectorsize));
1298 ASSERT(IS_ALIGNED(em->len, sectorsize));
1299
1300 block_start = extent_map_block_start(em);
1301 disk_bytenr = extent_map_block_start(em) + extent_offset;
1302
1303 ASSERT(!extent_map_is_compressed(em));
1304 ASSERT(block_start != EXTENT_MAP_HOLE);
1305 ASSERT(block_start != EXTENT_MAP_INLINE);
1306
1307 free_extent_map(em);
1308 em = NULL;
1309
1310 /*
1311 * Although the PageDirty bit is cleared before entering this
1312 * function, subpage dirty bit is not cleared.
1313 * So clear subpage dirty bit here so next time we won't submit
1314 * a folio for a range already written to disk.
1315 */
1316 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1317 btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1);
1318 /*
1319 * Above call should set the whole folio with writeback flag, even
1320 * just for a single subpage sector.
1321 * As long as the folio is properly locked and the range is correct,
1322 * we should always get the folio with writeback flag.
1323 */
1324 ASSERT(folio_test_writeback(folio));
1325
1326 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1327 sectorsize, filepos - folio_pos(folio));
1328 return 0;
1329 }
1330
1331 /*
1332 * Helper for extent_writepage(). This calls the writepage start hooks,
1333 * and does the loop to map the page into extents and bios.
1334 *
1335 * We return 1 if the IO is started and the page is unlocked,
1336 * 0 if all went well (page still locked)
1337 * < 0 if there were errors (page still locked)
1338 */
extent_writepage_io(struct btrfs_inode * inode,struct folio * folio,u64 start,u32 len,struct btrfs_bio_ctrl * bio_ctrl,loff_t i_size)1339 static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1340 struct folio *folio,
1341 u64 start, u32 len,
1342 struct btrfs_bio_ctrl *bio_ctrl,
1343 loff_t i_size)
1344 {
1345 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1346 unsigned long range_bitmap = 0;
1347 bool submitted_io = false;
1348 const u64 folio_start = folio_pos(folio);
1349 u64 cur;
1350 int bit;
1351 int ret = 0;
1352
1353 ASSERT(start >= folio_start &&
1354 start + len <= folio_start + folio_size(folio));
1355
1356 ret = btrfs_writepage_cow_fixup(folio);
1357 if (ret) {
1358 /* Fixup worker will requeue */
1359 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1360 folio_unlock(folio);
1361 return 1;
1362 }
1363
1364 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1365 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1366 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1367 fs_info->sectors_per_page);
1368
1369 bio_ctrl->end_io_func = end_bbio_data_write;
1370
1371 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1372 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1373
1374 if (cur >= i_size) {
1375 btrfs_mark_ordered_io_finished(inode, folio, cur,
1376 start + len - cur, true);
1377 /*
1378 * This range is beyond i_size, thus we don't need to
1379 * bother writing back.
1380 * But we still need to clear the dirty subpage bit, or
1381 * the next time the folio gets dirtied, we will try to
1382 * writeback the sectors with subpage dirty bits,
1383 * causing writeback without ordered extent.
1384 */
1385 btrfs_folio_clear_dirty(fs_info, folio, cur,
1386 start + len - cur);
1387 break;
1388 }
1389 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1390 if (ret < 0)
1391 goto out;
1392 submitted_io = true;
1393 }
1394
1395 btrfs_folio_assert_not_dirty(fs_info, folio, start, len);
1396 out:
1397 /*
1398 * If we didn't submitted any sector (>= i_size), folio dirty get
1399 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1400 * by folio_start_writeback() if the folio is not dirty).
1401 *
1402 * Here we set writeback and clear for the range. If the full folio
1403 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1404 */
1405 if (!submitted_io) {
1406 btrfs_folio_set_writeback(fs_info, folio, start, len);
1407 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1408 }
1409 return ret;
1410 }
1411
1412 /*
1413 * the writepage semantics are similar to regular writepage. extent
1414 * records are inserted to lock ranges in the tree, and as dirty areas
1415 * are found, they are marked writeback. Then the lock bits are removed
1416 * and the end_io handler clears the writeback ranges
1417 *
1418 * Return 0 if everything goes well.
1419 * Return <0 for error.
1420 */
extent_writepage(struct folio * folio,struct btrfs_bio_ctrl * bio_ctrl)1421 static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1422 {
1423 struct inode *inode = folio->mapping->host;
1424 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1425 const u64 page_start = folio_pos(folio);
1426 int ret;
1427 size_t pg_offset;
1428 loff_t i_size = i_size_read(inode);
1429 unsigned long end_index = i_size >> PAGE_SHIFT;
1430
1431 trace_extent_writepage(folio, inode, bio_ctrl->wbc);
1432
1433 WARN_ON(!folio_test_locked(folio));
1434
1435 pg_offset = offset_in_folio(folio, i_size);
1436 if (folio->index > end_index ||
1437 (folio->index == end_index && !pg_offset)) {
1438 folio_invalidate(folio, 0, folio_size(folio));
1439 folio_unlock(folio);
1440 return 0;
1441 }
1442
1443 if (folio->index == end_index)
1444 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1445
1446 /*
1447 * Default to unlock the whole folio.
1448 * The proper bitmap can only be initialized until writepage_delalloc().
1449 */
1450 bio_ctrl->submit_bitmap = (unsigned long)-1;
1451 ret = set_folio_extent_mapped(folio);
1452 if (ret < 0)
1453 goto done;
1454
1455 ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
1456 if (ret == 1)
1457 return 0;
1458 if (ret)
1459 goto done;
1460
1461 ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
1462 PAGE_SIZE, bio_ctrl, i_size);
1463 if (ret == 1)
1464 return 0;
1465
1466 bio_ctrl->wbc->nr_to_write--;
1467
1468 done:
1469 if (ret) {
1470 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
1471 page_start, PAGE_SIZE, !ret);
1472 mapping_set_error(folio->mapping, ret);
1473 }
1474
1475 /*
1476 * Only unlock ranges that are submitted. As there can be some async
1477 * submitted ranges inside the folio.
1478 */
1479 btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1480 ASSERT(ret <= 0);
1481 return ret;
1482 }
1483
wait_on_extent_buffer_writeback(struct extent_buffer * eb)1484 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1485 {
1486 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1487 TASK_UNINTERRUPTIBLE);
1488 }
1489
1490 /*
1491 * Lock extent buffer status and pages for writeback.
1492 *
1493 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1494 * extent buffer is not dirty)
1495 * Return %true is the extent buffer is submitted to bio.
1496 */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct writeback_control * wbc)1497 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1498 struct writeback_control *wbc)
1499 {
1500 struct btrfs_fs_info *fs_info = eb->fs_info;
1501 bool ret = false;
1502
1503 btrfs_tree_lock(eb);
1504 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1505 btrfs_tree_unlock(eb);
1506 if (wbc->sync_mode != WB_SYNC_ALL)
1507 return false;
1508 wait_on_extent_buffer_writeback(eb);
1509 btrfs_tree_lock(eb);
1510 }
1511
1512 /*
1513 * We need to do this to prevent races in people who check if the eb is
1514 * under IO since we can end up having no IO bits set for a short period
1515 * of time.
1516 */
1517 spin_lock(&eb->refs_lock);
1518 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1519 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1520 spin_unlock(&eb->refs_lock);
1521 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1522 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1523 -eb->len,
1524 fs_info->dirty_metadata_batch);
1525 ret = true;
1526 } else {
1527 spin_unlock(&eb->refs_lock);
1528 }
1529 btrfs_tree_unlock(eb);
1530 return ret;
1531 }
1532
set_btree_ioerr(struct extent_buffer * eb)1533 static void set_btree_ioerr(struct extent_buffer *eb)
1534 {
1535 struct btrfs_fs_info *fs_info = eb->fs_info;
1536
1537 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1538
1539 /*
1540 * A read may stumble upon this buffer later, make sure that it gets an
1541 * error and knows there was an error.
1542 */
1543 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1544
1545 /*
1546 * We need to set the mapping with the io error as well because a write
1547 * error will flip the file system readonly, and then syncfs() will
1548 * return a 0 because we are readonly if we don't modify the err seq for
1549 * the superblock.
1550 */
1551 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1552
1553 /*
1554 * If writeback for a btree extent that doesn't belong to a log tree
1555 * failed, increment the counter transaction->eb_write_errors.
1556 * We do this because while the transaction is running and before it's
1557 * committing (when we call filemap_fdata[write|wait]_range against
1558 * the btree inode), we might have
1559 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1560 * returns an error or an error happens during writeback, when we're
1561 * committing the transaction we wouldn't know about it, since the pages
1562 * can be no longer dirty nor marked anymore for writeback (if a
1563 * subsequent modification to the extent buffer didn't happen before the
1564 * transaction commit), which makes filemap_fdata[write|wait]_range not
1565 * able to find the pages which contain errors at transaction
1566 * commit time. So if this happens we must abort the transaction,
1567 * otherwise we commit a super block with btree roots that point to
1568 * btree nodes/leafs whose content on disk is invalid - either garbage
1569 * or the content of some node/leaf from a past generation that got
1570 * cowed or deleted and is no longer valid.
1571 *
1572 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1573 * not be enough - we need to distinguish between log tree extents vs
1574 * non-log tree extents, and the next filemap_fdatawait_range() call
1575 * will catch and clear such errors in the mapping - and that call might
1576 * be from a log sync and not from a transaction commit. Also, checking
1577 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1578 * not done and would not be reliable - the eb might have been released
1579 * from memory and reading it back again means that flag would not be
1580 * set (since it's a runtime flag, not persisted on disk).
1581 *
1582 * Using the flags below in the btree inode also makes us achieve the
1583 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1584 * writeback for all dirty pages and before filemap_fdatawait_range()
1585 * is called, the writeback for all dirty pages had already finished
1586 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1587 * filemap_fdatawait_range() would return success, as it could not know
1588 * that writeback errors happened (the pages were no longer tagged for
1589 * writeback).
1590 */
1591 switch (eb->log_index) {
1592 case -1:
1593 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1594 break;
1595 case 0:
1596 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1597 break;
1598 case 1:
1599 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1600 break;
1601 default:
1602 BUG(); /* unexpected, logic error */
1603 }
1604 }
1605
1606 /*
1607 * The endio specific version which won't touch any unsafe spinlock in endio
1608 * context.
1609 */
find_extent_buffer_nolock(const struct btrfs_fs_info * fs_info,u64 start)1610 static struct extent_buffer *find_extent_buffer_nolock(
1611 const struct btrfs_fs_info *fs_info, u64 start)
1612 {
1613 struct extent_buffer *eb;
1614
1615 rcu_read_lock();
1616 eb = radix_tree_lookup(&fs_info->buffer_radix,
1617 start >> fs_info->sectorsize_bits);
1618 if (eb && atomic_inc_not_zero(&eb->refs)) {
1619 rcu_read_unlock();
1620 return eb;
1621 }
1622 rcu_read_unlock();
1623 return NULL;
1624 }
1625
end_bbio_meta_write(struct btrfs_bio * bbio)1626 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1627 {
1628 struct extent_buffer *eb = bbio->private;
1629 struct btrfs_fs_info *fs_info = eb->fs_info;
1630 bool uptodate = !bbio->bio.bi_status;
1631 struct folio_iter fi;
1632 u32 bio_offset = 0;
1633
1634 if (!uptodate)
1635 set_btree_ioerr(eb);
1636
1637 bio_for_each_folio_all(fi, &bbio->bio) {
1638 u64 start = eb->start + bio_offset;
1639 struct folio *folio = fi.folio;
1640 u32 len = fi.length;
1641
1642 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1643 bio_offset += len;
1644 }
1645
1646 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1647 smp_mb__after_atomic();
1648 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1649
1650 bio_put(&bbio->bio);
1651 }
1652
prepare_eb_write(struct extent_buffer * eb)1653 static void prepare_eb_write(struct extent_buffer *eb)
1654 {
1655 u32 nritems;
1656 unsigned long start;
1657 unsigned long end;
1658
1659 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1660
1661 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1662 nritems = btrfs_header_nritems(eb);
1663 if (btrfs_header_level(eb) > 0) {
1664 end = btrfs_node_key_ptr_offset(eb, nritems);
1665 memzero_extent_buffer(eb, end, eb->len - end);
1666 } else {
1667 /*
1668 * Leaf:
1669 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1670 */
1671 start = btrfs_item_nr_offset(eb, nritems);
1672 end = btrfs_item_nr_offset(eb, 0);
1673 if (nritems == 0)
1674 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1675 else
1676 end += btrfs_item_offset(eb, nritems - 1);
1677 memzero_extent_buffer(eb, start, end - start);
1678 }
1679 }
1680
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc)1681 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1682 struct writeback_control *wbc)
1683 {
1684 struct btrfs_fs_info *fs_info = eb->fs_info;
1685 struct btrfs_bio *bbio;
1686
1687 prepare_eb_write(eb);
1688
1689 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1690 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1691 eb->fs_info, end_bbio_meta_write, eb);
1692 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1693 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1694 wbc_init_bio(wbc, &bbio->bio);
1695 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1696 bbio->file_offset = eb->start;
1697 if (fs_info->nodesize < PAGE_SIZE) {
1698 struct folio *folio = eb->folios[0];
1699 bool ret;
1700
1701 folio_lock(folio);
1702 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1703 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1704 eb->len)) {
1705 folio_clear_dirty_for_io(folio);
1706 wbc->nr_to_write--;
1707 }
1708 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1709 eb->start - folio_pos(folio));
1710 ASSERT(ret);
1711 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1712 folio_unlock(folio);
1713 } else {
1714 int num_folios = num_extent_folios(eb);
1715
1716 for (int i = 0; i < num_folios; i++) {
1717 struct folio *folio = eb->folios[i];
1718 bool ret;
1719
1720 folio_lock(folio);
1721 folio_clear_dirty_for_io(folio);
1722 folio_start_writeback(folio);
1723 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1724 ASSERT(ret);
1725 wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1726 eb->folio_size);
1727 wbc->nr_to_write -= folio_nr_pages(folio);
1728 folio_unlock(folio);
1729 }
1730 }
1731 btrfs_submit_bbio(bbio, 0);
1732 }
1733
1734 /*
1735 * Submit one subpage btree page.
1736 *
1737 * The main difference to submit_eb_page() is:
1738 * - Page locking
1739 * For subpage, we don't rely on page locking at all.
1740 *
1741 * - Flush write bio
1742 * We only flush bio if we may be unable to fit current extent buffers into
1743 * current bio.
1744 *
1745 * Return >=0 for the number of submitted extent buffers.
1746 * Return <0 for fatal error.
1747 */
submit_eb_subpage(struct folio * folio,struct writeback_control * wbc)1748 static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1749 {
1750 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1751 int submitted = 0;
1752 u64 folio_start = folio_pos(folio);
1753 int bit_start = 0;
1754 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1755
1756 /* Lock and write each dirty extent buffers in the range */
1757 while (bit_start < fs_info->sectors_per_page) {
1758 struct btrfs_subpage *subpage = folio_get_private(folio);
1759 struct extent_buffer *eb;
1760 unsigned long flags;
1761 u64 start;
1762
1763 /*
1764 * Take private lock to ensure the subpage won't be detached
1765 * in the meantime.
1766 */
1767 spin_lock(&folio->mapping->i_private_lock);
1768 if (!folio_test_private(folio)) {
1769 spin_unlock(&folio->mapping->i_private_lock);
1770 break;
1771 }
1772 spin_lock_irqsave(&subpage->lock, flags);
1773 if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1774 subpage->bitmaps)) {
1775 spin_unlock_irqrestore(&subpage->lock, flags);
1776 spin_unlock(&folio->mapping->i_private_lock);
1777 bit_start++;
1778 continue;
1779 }
1780
1781 start = folio_start + bit_start * fs_info->sectorsize;
1782 bit_start += sectors_per_node;
1783
1784 /*
1785 * Here we just want to grab the eb without touching extra
1786 * spin locks, so call find_extent_buffer_nolock().
1787 */
1788 eb = find_extent_buffer_nolock(fs_info, start);
1789 spin_unlock_irqrestore(&subpage->lock, flags);
1790 spin_unlock(&folio->mapping->i_private_lock);
1791
1792 /*
1793 * The eb has already reached 0 refs thus find_extent_buffer()
1794 * doesn't return it. We don't need to write back such eb
1795 * anyway.
1796 */
1797 if (!eb)
1798 continue;
1799
1800 if (lock_extent_buffer_for_io(eb, wbc)) {
1801 write_one_eb(eb, wbc);
1802 submitted++;
1803 }
1804 free_extent_buffer(eb);
1805 }
1806 return submitted;
1807 }
1808
1809 /*
1810 * Submit all page(s) of one extent buffer.
1811 *
1812 * @page: the page of one extent buffer
1813 * @eb_context: to determine if we need to submit this page, if current page
1814 * belongs to this eb, we don't need to submit
1815 *
1816 * The caller should pass each page in their bytenr order, and here we use
1817 * @eb_context to determine if we have submitted pages of one extent buffer.
1818 *
1819 * If we have, we just skip until we hit a new page that doesn't belong to
1820 * current @eb_context.
1821 *
1822 * If not, we submit all the page(s) of the extent buffer.
1823 *
1824 * Return >0 if we have submitted the extent buffer successfully.
1825 * Return 0 if we don't need to submit the page, as it's already submitted by
1826 * previous call.
1827 * Return <0 for fatal error.
1828 */
submit_eb_page(struct folio * folio,struct btrfs_eb_write_context * ctx)1829 static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1830 {
1831 struct writeback_control *wbc = ctx->wbc;
1832 struct address_space *mapping = folio->mapping;
1833 struct extent_buffer *eb;
1834 int ret;
1835
1836 if (!folio_test_private(folio))
1837 return 0;
1838
1839 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1840 return submit_eb_subpage(folio, wbc);
1841
1842 spin_lock(&mapping->i_private_lock);
1843 if (!folio_test_private(folio)) {
1844 spin_unlock(&mapping->i_private_lock);
1845 return 0;
1846 }
1847
1848 eb = folio_get_private(folio);
1849
1850 /*
1851 * Shouldn't happen and normally this would be a BUG_ON but no point
1852 * crashing the machine for something we can survive anyway.
1853 */
1854 if (WARN_ON(!eb)) {
1855 spin_unlock(&mapping->i_private_lock);
1856 return 0;
1857 }
1858
1859 if (eb == ctx->eb) {
1860 spin_unlock(&mapping->i_private_lock);
1861 return 0;
1862 }
1863 ret = atomic_inc_not_zero(&eb->refs);
1864 spin_unlock(&mapping->i_private_lock);
1865 if (!ret)
1866 return 0;
1867
1868 ctx->eb = eb;
1869
1870 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1871 if (ret) {
1872 if (ret == -EBUSY)
1873 ret = 0;
1874 free_extent_buffer(eb);
1875 return ret;
1876 }
1877
1878 if (!lock_extent_buffer_for_io(eb, wbc)) {
1879 free_extent_buffer(eb);
1880 return 0;
1881 }
1882 /* Implies write in zoned mode. */
1883 if (ctx->zoned_bg) {
1884 /* Mark the last eb in the block group. */
1885 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1886 ctx->zoned_bg->meta_write_pointer += eb->len;
1887 }
1888 write_one_eb(eb, wbc);
1889 free_extent_buffer(eb);
1890 return 1;
1891 }
1892
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)1893 int btree_write_cache_pages(struct address_space *mapping,
1894 struct writeback_control *wbc)
1895 {
1896 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1897 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1898 int ret = 0;
1899 int done = 0;
1900 int nr_to_write_done = 0;
1901 struct folio_batch fbatch;
1902 unsigned int nr_folios;
1903 pgoff_t index;
1904 pgoff_t end; /* Inclusive */
1905 int scanned = 0;
1906 xa_mark_t tag;
1907
1908 folio_batch_init(&fbatch);
1909 if (wbc->range_cyclic) {
1910 index = mapping->writeback_index; /* Start from prev offset */
1911 end = -1;
1912 /*
1913 * Start from the beginning does not need to cycle over the
1914 * range, mark it as scanned.
1915 */
1916 scanned = (index == 0);
1917 } else {
1918 index = wbc->range_start >> PAGE_SHIFT;
1919 end = wbc->range_end >> PAGE_SHIFT;
1920 scanned = 1;
1921 }
1922 if (wbc->sync_mode == WB_SYNC_ALL)
1923 tag = PAGECACHE_TAG_TOWRITE;
1924 else
1925 tag = PAGECACHE_TAG_DIRTY;
1926 btrfs_zoned_meta_io_lock(fs_info);
1927 retry:
1928 if (wbc->sync_mode == WB_SYNC_ALL)
1929 tag_pages_for_writeback(mapping, index, end);
1930 while (!done && !nr_to_write_done && (index <= end) &&
1931 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1932 tag, &fbatch))) {
1933 unsigned i;
1934
1935 for (i = 0; i < nr_folios; i++) {
1936 struct folio *folio = fbatch.folios[i];
1937
1938 ret = submit_eb_page(folio, &ctx);
1939 if (ret == 0)
1940 continue;
1941 if (ret < 0) {
1942 done = 1;
1943 break;
1944 }
1945
1946 /*
1947 * the filesystem may choose to bump up nr_to_write.
1948 * We have to make sure to honor the new nr_to_write
1949 * at any time
1950 */
1951 nr_to_write_done = wbc->nr_to_write <= 0;
1952 }
1953 folio_batch_release(&fbatch);
1954 cond_resched();
1955 }
1956 if (!scanned && !done) {
1957 /*
1958 * We hit the last page and there is more work to be done: wrap
1959 * back to the start of the file
1960 */
1961 scanned = 1;
1962 index = 0;
1963 goto retry;
1964 }
1965 /*
1966 * If something went wrong, don't allow any metadata write bio to be
1967 * submitted.
1968 *
1969 * This would prevent use-after-free if we had dirty pages not
1970 * cleaned up, which can still happen by fuzzed images.
1971 *
1972 * - Bad extent tree
1973 * Allowing existing tree block to be allocated for other trees.
1974 *
1975 * - Log tree operations
1976 * Exiting tree blocks get allocated to log tree, bumps its
1977 * generation, then get cleaned in tree re-balance.
1978 * Such tree block will not be written back, since it's clean,
1979 * thus no WRITTEN flag set.
1980 * And after log writes back, this tree block is not traced by
1981 * any dirty extent_io_tree.
1982 *
1983 * - Offending tree block gets re-dirtied from its original owner
1984 * Since it has bumped generation, no WRITTEN flag, it can be
1985 * reused without COWing. This tree block will not be traced
1986 * by btrfs_transaction::dirty_pages.
1987 *
1988 * Now such dirty tree block will not be cleaned by any dirty
1989 * extent io tree. Thus we don't want to submit such wild eb
1990 * if the fs already has error.
1991 *
1992 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
1993 * were submitted. Reset it to 0 to avoid false alerts for the caller.
1994 */
1995 if (ret > 0)
1996 ret = 0;
1997 if (!ret && BTRFS_FS_ERROR(fs_info))
1998 ret = -EROFS;
1999
2000 if (ctx.zoned_bg)
2001 btrfs_put_block_group(ctx.zoned_bg);
2002 btrfs_zoned_meta_io_unlock(fs_info);
2003 return ret;
2004 }
2005
2006 /*
2007 * Walk the list of dirty pages of the given address space and write all of them.
2008 *
2009 * @mapping: address space structure to write
2010 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2011 * @bio_ctrl: holds context for the write, namely the bio
2012 *
2013 * If a page is already under I/O, write_cache_pages() skips it, even
2014 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2015 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2016 * and msync() need to guarantee that all the data which was dirty at the time
2017 * the call was made get new I/O started against them. If wbc->sync_mode is
2018 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2019 * existing IO to complete.
2020 */
extent_write_cache_pages(struct address_space * mapping,struct btrfs_bio_ctrl * bio_ctrl)2021 static int extent_write_cache_pages(struct address_space *mapping,
2022 struct btrfs_bio_ctrl *bio_ctrl)
2023 {
2024 struct writeback_control *wbc = bio_ctrl->wbc;
2025 struct inode *inode = mapping->host;
2026 int ret = 0;
2027 int done = 0;
2028 int nr_to_write_done = 0;
2029 struct folio_batch fbatch;
2030 unsigned int nr_folios;
2031 pgoff_t index;
2032 pgoff_t end; /* Inclusive */
2033 pgoff_t done_index;
2034 int range_whole = 0;
2035 int scanned = 0;
2036 xa_mark_t tag;
2037
2038 /*
2039 * We have to hold onto the inode so that ordered extents can do their
2040 * work when the IO finishes. The alternative to this is failing to add
2041 * an ordered extent if the igrab() fails there and that is a huge pain
2042 * to deal with, so instead just hold onto the inode throughout the
2043 * writepages operation. If it fails here we are freeing up the inode
2044 * anyway and we'd rather not waste our time writing out stuff that is
2045 * going to be truncated anyway.
2046 */
2047 if (!igrab(inode))
2048 return 0;
2049
2050 folio_batch_init(&fbatch);
2051 if (wbc->range_cyclic) {
2052 index = mapping->writeback_index; /* Start from prev offset */
2053 end = -1;
2054 /*
2055 * Start from the beginning does not need to cycle over the
2056 * range, mark it as scanned.
2057 */
2058 scanned = (index == 0);
2059 } else {
2060 index = wbc->range_start >> PAGE_SHIFT;
2061 end = wbc->range_end >> PAGE_SHIFT;
2062 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2063 range_whole = 1;
2064 scanned = 1;
2065 }
2066
2067 /*
2068 * We do the tagged writepage as long as the snapshot flush bit is set
2069 * and we are the first one who do the filemap_flush() on this inode.
2070 *
2071 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2072 * not race in and drop the bit.
2073 */
2074 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2075 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2076 &BTRFS_I(inode)->runtime_flags))
2077 wbc->tagged_writepages = 1;
2078
2079 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2080 tag = PAGECACHE_TAG_TOWRITE;
2081 else
2082 tag = PAGECACHE_TAG_DIRTY;
2083 retry:
2084 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2085 tag_pages_for_writeback(mapping, index, end);
2086 done_index = index;
2087 while (!done && !nr_to_write_done && (index <= end) &&
2088 (nr_folios = filemap_get_folios_tag(mapping, &index,
2089 end, tag, &fbatch))) {
2090 unsigned i;
2091
2092 for (i = 0; i < nr_folios; i++) {
2093 struct folio *folio = fbatch.folios[i];
2094
2095 done_index = folio_next_index(folio);
2096 /*
2097 * At this point we hold neither the i_pages lock nor
2098 * the page lock: the page may be truncated or
2099 * invalidated (changing page->mapping to NULL),
2100 * or even swizzled back from swapper_space to
2101 * tmpfs file mapping
2102 */
2103 if (!folio_trylock(folio)) {
2104 submit_write_bio(bio_ctrl, 0);
2105 folio_lock(folio);
2106 }
2107
2108 if (unlikely(folio->mapping != mapping)) {
2109 folio_unlock(folio);
2110 continue;
2111 }
2112
2113 if (!folio_test_dirty(folio)) {
2114 /* Someone wrote it for us. */
2115 folio_unlock(folio);
2116 continue;
2117 }
2118
2119 if (wbc->sync_mode != WB_SYNC_NONE) {
2120 if (folio_test_writeback(folio))
2121 submit_write_bio(bio_ctrl, 0);
2122 folio_wait_writeback(folio);
2123 }
2124
2125 if (folio_test_writeback(folio) ||
2126 !folio_clear_dirty_for_io(folio)) {
2127 folio_unlock(folio);
2128 continue;
2129 }
2130
2131 ret = extent_writepage(folio, bio_ctrl);
2132 if (ret < 0) {
2133 done = 1;
2134 break;
2135 }
2136
2137 /*
2138 * The filesystem may choose to bump up nr_to_write.
2139 * We have to make sure to honor the new nr_to_write
2140 * at any time.
2141 */
2142 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2143 wbc->nr_to_write <= 0);
2144 }
2145 folio_batch_release(&fbatch);
2146 cond_resched();
2147 }
2148 if (!scanned && !done) {
2149 /*
2150 * We hit the last page and there is more work to be done: wrap
2151 * back to the start of the file
2152 */
2153 scanned = 1;
2154 index = 0;
2155
2156 /*
2157 * If we're looping we could run into a page that is locked by a
2158 * writer and that writer could be waiting on writeback for a
2159 * page in our current bio, and thus deadlock, so flush the
2160 * write bio here.
2161 */
2162 submit_write_bio(bio_ctrl, 0);
2163 goto retry;
2164 }
2165
2166 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2167 mapping->writeback_index = done_index;
2168
2169 btrfs_add_delayed_iput(BTRFS_I(inode));
2170 return ret;
2171 }
2172
2173 /*
2174 * Submit the pages in the range to bio for call sites which delalloc range has
2175 * already been ran (aka, ordered extent inserted) and all pages are still
2176 * locked.
2177 */
extent_write_locked_range(struct inode * inode,const struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)2178 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2179 u64 start, u64 end, struct writeback_control *wbc,
2180 bool pages_dirty)
2181 {
2182 bool found_error = false;
2183 int ret = 0;
2184 struct address_space *mapping = inode->i_mapping;
2185 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2186 const u32 sectorsize = fs_info->sectorsize;
2187 loff_t i_size = i_size_read(inode);
2188 u64 cur = start;
2189 struct btrfs_bio_ctrl bio_ctrl = {
2190 .wbc = wbc,
2191 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2192 };
2193
2194 if (wbc->no_cgroup_owner)
2195 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2196
2197 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2198
2199 while (cur <= end) {
2200 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2201 u32 cur_len = cur_end + 1 - cur;
2202 struct folio *folio;
2203
2204 folio = __filemap_get_folio(mapping, cur >> PAGE_SHIFT, 0, 0);
2205
2206 /*
2207 * This shouldn't happen, the pages are pinned and locked, this
2208 * code is just in case, but shouldn't actually be run.
2209 */
2210 if (IS_ERR(folio)) {
2211 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2212 cur, cur_len, false);
2213 mapping_set_error(mapping, PTR_ERR(folio));
2214 cur = cur_end + 1;
2215 continue;
2216 }
2217
2218 ASSERT(folio_test_locked(folio));
2219 if (pages_dirty && folio != locked_folio)
2220 ASSERT(folio_test_dirty(folio));
2221
2222 /*
2223 * Set the submission bitmap to submit all sectors.
2224 * extent_writepage_io() will do the truncation correctly.
2225 */
2226 bio_ctrl.submit_bitmap = (unsigned long)-1;
2227 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2228 &bio_ctrl, i_size);
2229 if (ret == 1)
2230 goto next_page;
2231
2232 if (ret) {
2233 btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
2234 cur, cur_len, !ret);
2235 mapping_set_error(mapping, ret);
2236 }
2237 btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
2238 if (ret < 0)
2239 found_error = true;
2240 next_page:
2241 folio_put(folio);
2242 cur = cur_end + 1;
2243 }
2244
2245 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2246 }
2247
btrfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2248 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2249 {
2250 struct inode *inode = mapping->host;
2251 int ret = 0;
2252 struct btrfs_bio_ctrl bio_ctrl = {
2253 .wbc = wbc,
2254 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2255 };
2256
2257 /*
2258 * Allow only a single thread to do the reloc work in zoned mode to
2259 * protect the write pointer updates.
2260 */
2261 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2262 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2263 submit_write_bio(&bio_ctrl, ret);
2264 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2265 return ret;
2266 }
2267
btrfs_readahead(struct readahead_control * rac)2268 void btrfs_readahead(struct readahead_control *rac)
2269 {
2270 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2271 struct folio *folio;
2272 struct extent_map *em_cached = NULL;
2273 u64 prev_em_start = (u64)-1;
2274
2275 while ((folio = readahead_folio(rac)) != NULL)
2276 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2277
2278 if (em_cached)
2279 free_extent_map(em_cached);
2280 submit_one_bio(&bio_ctrl);
2281 }
2282
2283 /*
2284 * basic invalidate_folio code, this waits on any locked or writeback
2285 * ranges corresponding to the folio, and then deletes any extent state
2286 * records from the tree
2287 */
extent_invalidate_folio(struct extent_io_tree * tree,struct folio * folio,size_t offset)2288 int extent_invalidate_folio(struct extent_io_tree *tree,
2289 struct folio *folio, size_t offset)
2290 {
2291 struct extent_state *cached_state = NULL;
2292 u64 start = folio_pos(folio);
2293 u64 end = start + folio_size(folio) - 1;
2294 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2295
2296 /* This function is only called for the btree inode */
2297 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2298
2299 start += ALIGN(offset, blocksize);
2300 if (start > end)
2301 return 0;
2302
2303 lock_extent(tree, start, end, &cached_state);
2304 folio_wait_writeback(folio);
2305
2306 /*
2307 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2308 * so here we only need to unlock the extent range to free any
2309 * existing extent state.
2310 */
2311 unlock_extent(tree, start, end, &cached_state);
2312 return 0;
2313 }
2314
2315 /*
2316 * a helper for release_folio, this tests for areas of the page that
2317 * are locked or under IO and drops the related state bits if it is safe
2318 * to drop the page.
2319 */
try_release_extent_state(struct extent_io_tree * tree,struct folio * folio,gfp_t mask)2320 static bool try_release_extent_state(struct extent_io_tree *tree,
2321 struct folio *folio, gfp_t mask)
2322 {
2323 u64 start = folio_pos(folio);
2324 u64 end = start + PAGE_SIZE - 1;
2325 bool ret;
2326
2327 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2328 ret = false;
2329 } else {
2330 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2331 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2332 EXTENT_QGROUP_RESERVED);
2333 int ret2;
2334
2335 /*
2336 * At this point we can safely clear everything except the
2337 * locked bit, the nodatasum bit and the delalloc new bit.
2338 * The delalloc new bit will be cleared by ordered extent
2339 * completion.
2340 */
2341 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2342
2343 /* if clear_extent_bit failed for enomem reasons,
2344 * we can't allow the release to continue.
2345 */
2346 if (ret2 < 0)
2347 ret = false;
2348 else
2349 ret = true;
2350 }
2351 return ret;
2352 }
2353
2354 /*
2355 * a helper for release_folio. As long as there are no locked extents
2356 * in the range corresponding to the page, both state records and extent
2357 * map records are removed
2358 */
try_release_extent_mapping(struct folio * folio,gfp_t mask)2359 bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2360 {
2361 u64 start = folio_pos(folio);
2362 u64 end = start + PAGE_SIZE - 1;
2363 struct btrfs_inode *inode = folio_to_inode(folio);
2364 struct extent_io_tree *io_tree = &inode->io_tree;
2365
2366 while (start <= end) {
2367 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2368 const u64 len = end - start + 1;
2369 struct extent_map_tree *extent_tree = &inode->extent_tree;
2370 struct extent_map *em;
2371
2372 write_lock(&extent_tree->lock);
2373 em = lookup_extent_mapping(extent_tree, start, len);
2374 if (!em) {
2375 write_unlock(&extent_tree->lock);
2376 break;
2377 }
2378 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2379 write_unlock(&extent_tree->lock);
2380 free_extent_map(em);
2381 break;
2382 }
2383 if (test_range_bit_exists(io_tree, em->start,
2384 extent_map_end(em) - 1, EXTENT_LOCKED))
2385 goto next;
2386 /*
2387 * If it's not in the list of modified extents, used by a fast
2388 * fsync, we can remove it. If it's being logged we can safely
2389 * remove it since fsync took an extra reference on the em.
2390 */
2391 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2392 goto remove_em;
2393 /*
2394 * If it's in the list of modified extents, remove it only if
2395 * its generation is older then the current one, in which case
2396 * we don't need it for a fast fsync. Otherwise don't remove it,
2397 * we could be racing with an ongoing fast fsync that could miss
2398 * the new extent.
2399 */
2400 if (em->generation >= cur_gen)
2401 goto next;
2402 remove_em:
2403 /*
2404 * We only remove extent maps that are not in the list of
2405 * modified extents or that are in the list but with a
2406 * generation lower then the current generation, so there is no
2407 * need to set the full fsync flag on the inode (it hurts the
2408 * fsync performance for workloads with a data size that exceeds
2409 * or is close to the system's memory).
2410 */
2411 remove_extent_mapping(inode, em);
2412 /* Once for the inode's extent map tree. */
2413 free_extent_map(em);
2414 next:
2415 start = extent_map_end(em);
2416 write_unlock(&extent_tree->lock);
2417
2418 /* Once for us, for the lookup_extent_mapping() reference. */
2419 free_extent_map(em);
2420
2421 if (need_resched()) {
2422 /*
2423 * If we need to resched but we can't block just exit
2424 * and leave any remaining extent maps.
2425 */
2426 if (!gfpflags_allow_blocking(mask))
2427 break;
2428
2429 cond_resched();
2430 }
2431 }
2432 return try_release_extent_state(io_tree, folio, mask);
2433 }
2434
__free_extent_buffer(struct extent_buffer * eb)2435 static void __free_extent_buffer(struct extent_buffer *eb)
2436 {
2437 kmem_cache_free(extent_buffer_cache, eb);
2438 }
2439
extent_buffer_under_io(const struct extent_buffer * eb)2440 static int extent_buffer_under_io(const struct extent_buffer *eb)
2441 {
2442 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2443 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2444 }
2445
folio_range_has_eb(struct btrfs_fs_info * fs_info,struct folio * folio)2446 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
2447 {
2448 struct btrfs_subpage *subpage;
2449
2450 lockdep_assert_held(&folio->mapping->i_private_lock);
2451
2452 if (folio_test_private(folio)) {
2453 subpage = folio_get_private(folio);
2454 if (atomic_read(&subpage->eb_refs))
2455 return true;
2456 /*
2457 * Even there is no eb refs here, we may still have
2458 * end_folio_read() call relying on page::private.
2459 */
2460 if (atomic_read(&subpage->readers))
2461 return true;
2462 }
2463 return false;
2464 }
2465
detach_extent_buffer_folio(const struct extent_buffer * eb,struct folio * folio)2466 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2467 {
2468 struct btrfs_fs_info *fs_info = eb->fs_info;
2469 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2470
2471 /*
2472 * For mapped eb, we're going to change the folio private, which should
2473 * be done under the i_private_lock.
2474 */
2475 if (mapped)
2476 spin_lock(&folio->mapping->i_private_lock);
2477
2478 if (!folio_test_private(folio)) {
2479 if (mapped)
2480 spin_unlock(&folio->mapping->i_private_lock);
2481 return;
2482 }
2483
2484 if (fs_info->nodesize >= PAGE_SIZE) {
2485 /*
2486 * We do this since we'll remove the pages after we've
2487 * removed the eb from the radix tree, so we could race
2488 * and have this page now attached to the new eb. So
2489 * only clear folio if it's still connected to
2490 * this eb.
2491 */
2492 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2493 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2494 BUG_ON(folio_test_dirty(folio));
2495 BUG_ON(folio_test_writeback(folio));
2496 /* We need to make sure we haven't be attached to a new eb. */
2497 folio_detach_private(folio);
2498 }
2499 if (mapped)
2500 spin_unlock(&folio->mapping->i_private_lock);
2501 return;
2502 }
2503
2504 /*
2505 * For subpage, we can have dummy eb with folio private attached. In
2506 * this case, we can directly detach the private as such folio is only
2507 * attached to one dummy eb, no sharing.
2508 */
2509 if (!mapped) {
2510 btrfs_detach_subpage(fs_info, folio);
2511 return;
2512 }
2513
2514 btrfs_folio_dec_eb_refs(fs_info, folio);
2515
2516 /*
2517 * We can only detach the folio private if there are no other ebs in the
2518 * page range and no unfinished IO.
2519 */
2520 if (!folio_range_has_eb(fs_info, folio))
2521 btrfs_detach_subpage(fs_info, folio);
2522
2523 spin_unlock(&folio->mapping->i_private_lock);
2524 }
2525
2526 /* Release all pages attached to the extent buffer */
btrfs_release_extent_buffer_pages(const struct extent_buffer * eb)2527 static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2528 {
2529 ASSERT(!extent_buffer_under_io(eb));
2530
2531 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2532 struct folio *folio = eb->folios[i];
2533
2534 if (!folio)
2535 continue;
2536
2537 detach_extent_buffer_folio(eb, folio);
2538
2539 /* One for when we allocated the folio. */
2540 folio_put(folio);
2541 }
2542 }
2543
2544 /*
2545 * Helper for releasing the extent buffer.
2546 */
btrfs_release_extent_buffer(struct extent_buffer * eb)2547 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2548 {
2549 btrfs_release_extent_buffer_pages(eb);
2550 btrfs_leak_debug_del_eb(eb);
2551 __free_extent_buffer(eb);
2552 }
2553
2554 static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2555 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2556 unsigned long len)
2557 {
2558 struct extent_buffer *eb = NULL;
2559
2560 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2561 eb->start = start;
2562 eb->len = len;
2563 eb->fs_info = fs_info;
2564 init_rwsem(&eb->lock);
2565
2566 btrfs_leak_debug_add_eb(eb);
2567
2568 spin_lock_init(&eb->refs_lock);
2569 atomic_set(&eb->refs, 1);
2570
2571 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2572
2573 return eb;
2574 }
2575
btrfs_clone_extent_buffer(const struct extent_buffer * src)2576 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2577 {
2578 struct extent_buffer *new;
2579 int num_folios = num_extent_folios(src);
2580 int ret;
2581
2582 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2583 if (new == NULL)
2584 return NULL;
2585
2586 /*
2587 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2588 * btrfs_release_extent_buffer() have different behavior for
2589 * UNMAPPED subpage extent buffer.
2590 */
2591 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2592
2593 ret = alloc_eb_folio_array(new, false);
2594 if (ret) {
2595 btrfs_release_extent_buffer(new);
2596 return NULL;
2597 }
2598
2599 for (int i = 0; i < num_folios; i++) {
2600 struct folio *folio = new->folios[i];
2601
2602 ret = attach_extent_buffer_folio(new, folio, NULL);
2603 if (ret < 0) {
2604 btrfs_release_extent_buffer(new);
2605 return NULL;
2606 }
2607 WARN_ON(folio_test_dirty(folio));
2608 }
2609 copy_extent_buffer_full(new, src);
2610 set_extent_buffer_uptodate(new);
2611
2612 return new;
2613 }
2614
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)2615 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2616 u64 start, unsigned long len)
2617 {
2618 struct extent_buffer *eb;
2619 int num_folios = 0;
2620 int ret;
2621
2622 eb = __alloc_extent_buffer(fs_info, start, len);
2623 if (!eb)
2624 return NULL;
2625
2626 ret = alloc_eb_folio_array(eb, false);
2627 if (ret)
2628 goto err;
2629
2630 num_folios = num_extent_folios(eb);
2631 for (int i = 0; i < num_folios; i++) {
2632 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2633 if (ret < 0)
2634 goto err;
2635 }
2636
2637 set_extent_buffer_uptodate(eb);
2638 btrfs_set_header_nritems(eb, 0);
2639 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2640
2641 return eb;
2642 err:
2643 for (int i = 0; i < num_folios; i++) {
2644 if (eb->folios[i]) {
2645 detach_extent_buffer_folio(eb, eb->folios[i]);
2646 folio_put(eb->folios[i]);
2647 }
2648 }
2649 __free_extent_buffer(eb);
2650 return NULL;
2651 }
2652
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2653 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2654 u64 start)
2655 {
2656 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2657 }
2658
check_buffer_tree_ref(struct extent_buffer * eb)2659 static void check_buffer_tree_ref(struct extent_buffer *eb)
2660 {
2661 int refs;
2662 /*
2663 * The TREE_REF bit is first set when the extent_buffer is added
2664 * to the radix tree. It is also reset, if unset, when a new reference
2665 * is created by find_extent_buffer.
2666 *
2667 * It is only cleared in two cases: freeing the last non-tree
2668 * reference to the extent_buffer when its STALE bit is set or
2669 * calling release_folio when the tree reference is the only reference.
2670 *
2671 * In both cases, care is taken to ensure that the extent_buffer's
2672 * pages are not under io. However, release_folio can be concurrently
2673 * called with creating new references, which is prone to race
2674 * conditions between the calls to check_buffer_tree_ref in those
2675 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2676 *
2677 * The actual lifetime of the extent_buffer in the radix tree is
2678 * adequately protected by the refcount, but the TREE_REF bit and
2679 * its corresponding reference are not. To protect against this
2680 * class of races, we call check_buffer_tree_ref from the codepaths
2681 * which trigger io. Note that once io is initiated, TREE_REF can no
2682 * longer be cleared, so that is the moment at which any such race is
2683 * best fixed.
2684 */
2685 refs = atomic_read(&eb->refs);
2686 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2687 return;
2688
2689 spin_lock(&eb->refs_lock);
2690 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2691 atomic_inc(&eb->refs);
2692 spin_unlock(&eb->refs_lock);
2693 }
2694
mark_extent_buffer_accessed(struct extent_buffer * eb)2695 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2696 {
2697 int num_folios= num_extent_folios(eb);
2698
2699 check_buffer_tree_ref(eb);
2700
2701 for (int i = 0; i < num_folios; i++)
2702 folio_mark_accessed(eb->folios[i]);
2703 }
2704
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2705 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2706 u64 start)
2707 {
2708 struct extent_buffer *eb;
2709
2710 eb = find_extent_buffer_nolock(fs_info, start);
2711 if (!eb)
2712 return NULL;
2713 /*
2714 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2715 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2716 * another task running free_extent_buffer() might have seen that flag
2717 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2718 * writeback flags not set) and it's still in the tree (flag
2719 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2720 * decrementing the extent buffer's reference count twice. So here we
2721 * could race and increment the eb's reference count, clear its stale
2722 * flag, mark it as dirty and drop our reference before the other task
2723 * finishes executing free_extent_buffer, which would later result in
2724 * an attempt to free an extent buffer that is dirty.
2725 */
2726 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2727 spin_lock(&eb->refs_lock);
2728 spin_unlock(&eb->refs_lock);
2729 }
2730 mark_extent_buffer_accessed(eb);
2731 return eb;
2732 }
2733
2734 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)2735 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2736 u64 start)
2737 {
2738 struct extent_buffer *eb, *exists = NULL;
2739 int ret;
2740
2741 eb = find_extent_buffer(fs_info, start);
2742 if (eb)
2743 return eb;
2744 eb = alloc_dummy_extent_buffer(fs_info, start);
2745 if (!eb)
2746 return ERR_PTR(-ENOMEM);
2747 eb->fs_info = fs_info;
2748 again:
2749 ret = radix_tree_preload(GFP_NOFS);
2750 if (ret) {
2751 exists = ERR_PTR(ret);
2752 goto free_eb;
2753 }
2754 spin_lock(&fs_info->buffer_lock);
2755 ret = radix_tree_insert(&fs_info->buffer_radix,
2756 start >> fs_info->sectorsize_bits, eb);
2757 spin_unlock(&fs_info->buffer_lock);
2758 radix_tree_preload_end();
2759 if (ret == -EEXIST) {
2760 exists = find_extent_buffer(fs_info, start);
2761 if (exists)
2762 goto free_eb;
2763 else
2764 goto again;
2765 }
2766 check_buffer_tree_ref(eb);
2767 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2768
2769 return eb;
2770 free_eb:
2771 btrfs_release_extent_buffer(eb);
2772 return exists;
2773 }
2774 #endif
2775
grab_extent_buffer(struct btrfs_fs_info * fs_info,struct page * page)2776 static struct extent_buffer *grab_extent_buffer(
2777 struct btrfs_fs_info *fs_info, struct page *page)
2778 {
2779 struct folio *folio = page_folio(page);
2780 struct extent_buffer *exists;
2781
2782 lockdep_assert_held(&page->mapping->i_private_lock);
2783
2784 /*
2785 * For subpage case, we completely rely on radix tree to ensure we
2786 * don't try to insert two ebs for the same bytenr. So here we always
2787 * return NULL and just continue.
2788 */
2789 if (fs_info->nodesize < PAGE_SIZE)
2790 return NULL;
2791
2792 /* Page not yet attached to an extent buffer */
2793 if (!folio_test_private(folio))
2794 return NULL;
2795
2796 /*
2797 * We could have already allocated an eb for this page and attached one
2798 * so lets see if we can get a ref on the existing eb, and if we can we
2799 * know it's good and we can just return that one, else we know we can
2800 * just overwrite folio private.
2801 */
2802 exists = folio_get_private(folio);
2803 if (atomic_inc_not_zero(&exists->refs))
2804 return exists;
2805
2806 WARN_ON(PageDirty(page));
2807 folio_detach_private(folio);
2808 return NULL;
2809 }
2810
check_eb_alignment(struct btrfs_fs_info * fs_info,u64 start)2811 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2812 {
2813 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2814 btrfs_err(fs_info, "bad tree block start %llu", start);
2815 return -EINVAL;
2816 }
2817
2818 if (fs_info->nodesize < PAGE_SIZE &&
2819 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2820 btrfs_err(fs_info,
2821 "tree block crosses page boundary, start %llu nodesize %u",
2822 start, fs_info->nodesize);
2823 return -EINVAL;
2824 }
2825 if (fs_info->nodesize >= PAGE_SIZE &&
2826 !PAGE_ALIGNED(start)) {
2827 btrfs_err(fs_info,
2828 "tree block is not page aligned, start %llu nodesize %u",
2829 start, fs_info->nodesize);
2830 return -EINVAL;
2831 }
2832 if (!IS_ALIGNED(start, fs_info->nodesize) &&
2833 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2834 btrfs_warn(fs_info,
2835 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2836 start, fs_info->nodesize);
2837 }
2838 return 0;
2839 }
2840
2841
2842 /*
2843 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2844 * Return >0 if there is already another extent buffer for the range,
2845 * and @found_eb_ret would be updated.
2846 * Return -EAGAIN if the filemap has an existing folio but with different size
2847 * than @eb.
2848 * The caller needs to free the existing folios and retry using the same order.
2849 */
attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_subpage * prealloc,struct extent_buffer ** found_eb_ret)2850 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2851 struct btrfs_subpage *prealloc,
2852 struct extent_buffer **found_eb_ret)
2853 {
2854
2855 struct btrfs_fs_info *fs_info = eb->fs_info;
2856 struct address_space *mapping = fs_info->btree_inode->i_mapping;
2857 const unsigned long index = eb->start >> PAGE_SHIFT;
2858 struct folio *existing_folio = NULL;
2859 int ret;
2860
2861 ASSERT(found_eb_ret);
2862
2863 /* Caller should ensure the folio exists. */
2864 ASSERT(eb->folios[i]);
2865
2866 retry:
2867 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2868 GFP_NOFS | __GFP_NOFAIL);
2869 if (!ret)
2870 goto finish;
2871
2872 existing_folio = filemap_lock_folio(mapping, index + i);
2873 /* The page cache only exists for a very short time, just retry. */
2874 if (IS_ERR(existing_folio)) {
2875 existing_folio = NULL;
2876 goto retry;
2877 }
2878
2879 /* For now, we should only have single-page folios for btree inode. */
2880 ASSERT(folio_nr_pages(existing_folio) == 1);
2881
2882 if (folio_size(existing_folio) != eb->folio_size) {
2883 folio_unlock(existing_folio);
2884 folio_put(existing_folio);
2885 return -EAGAIN;
2886 }
2887
2888 finish:
2889 spin_lock(&mapping->i_private_lock);
2890 if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
2891 /* We're going to reuse the existing page, can drop our folio now. */
2892 __free_page(folio_page(eb->folios[i], 0));
2893 eb->folios[i] = existing_folio;
2894 } else if (existing_folio) {
2895 struct extent_buffer *existing_eb;
2896
2897 existing_eb = grab_extent_buffer(fs_info,
2898 folio_page(existing_folio, 0));
2899 if (existing_eb) {
2900 /* The extent buffer still exists, we can use it directly. */
2901 *found_eb_ret = existing_eb;
2902 spin_unlock(&mapping->i_private_lock);
2903 folio_unlock(existing_folio);
2904 folio_put(existing_folio);
2905 return 1;
2906 }
2907 /* The extent buffer no longer exists, we can reuse the folio. */
2908 __free_page(folio_page(eb->folios[i], 0));
2909 eb->folios[i] = existing_folio;
2910 }
2911 eb->folio_size = folio_size(eb->folios[i]);
2912 eb->folio_shift = folio_shift(eb->folios[i]);
2913 /* Should not fail, as we have preallocated the memory. */
2914 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
2915 ASSERT(!ret);
2916 /*
2917 * To inform we have an extra eb under allocation, so that
2918 * detach_extent_buffer_page() won't release the folio private when the
2919 * eb hasn't been inserted into radix tree yet.
2920 *
2921 * The ref will be decreased when the eb releases the page, in
2922 * detach_extent_buffer_page(). Thus needs no special handling in the
2923 * error path.
2924 */
2925 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
2926 spin_unlock(&mapping->i_private_lock);
2927 return 0;
2928 }
2929
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,u64 owner_root,int level)2930 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2931 u64 start, u64 owner_root, int level)
2932 {
2933 unsigned long len = fs_info->nodesize;
2934 int num_folios;
2935 int attached = 0;
2936 struct extent_buffer *eb;
2937 struct extent_buffer *existing_eb = NULL;
2938 struct btrfs_subpage *prealloc = NULL;
2939 u64 lockdep_owner = owner_root;
2940 bool page_contig = true;
2941 int uptodate = 1;
2942 int ret;
2943
2944 if (check_eb_alignment(fs_info, start))
2945 return ERR_PTR(-EINVAL);
2946
2947 #if BITS_PER_LONG == 32
2948 if (start >= MAX_LFS_FILESIZE) {
2949 btrfs_err_rl(fs_info,
2950 "extent buffer %llu is beyond 32bit page cache limit", start);
2951 btrfs_err_32bit_limit(fs_info);
2952 return ERR_PTR(-EOVERFLOW);
2953 }
2954 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
2955 btrfs_warn_32bit_limit(fs_info);
2956 #endif
2957
2958 eb = find_extent_buffer(fs_info, start);
2959 if (eb)
2960 return eb;
2961
2962 eb = __alloc_extent_buffer(fs_info, start, len);
2963 if (!eb)
2964 return ERR_PTR(-ENOMEM);
2965
2966 /*
2967 * The reloc trees are just snapshots, so we need them to appear to be
2968 * just like any other fs tree WRT lockdep.
2969 */
2970 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
2971 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
2972
2973 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
2974
2975 /*
2976 * Preallocate folio private for subpage case, so that we won't
2977 * allocate memory with i_private_lock nor page lock hold.
2978 *
2979 * The memory will be freed by attach_extent_buffer_page() or freed
2980 * manually if we exit earlier.
2981 */
2982 if (fs_info->nodesize < PAGE_SIZE) {
2983 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
2984 if (IS_ERR(prealloc)) {
2985 ret = PTR_ERR(prealloc);
2986 goto out;
2987 }
2988 }
2989
2990 reallocate:
2991 /* Allocate all pages first. */
2992 ret = alloc_eb_folio_array(eb, true);
2993 if (ret < 0) {
2994 btrfs_free_subpage(prealloc);
2995 goto out;
2996 }
2997
2998 num_folios = num_extent_folios(eb);
2999 /* Attach all pages to the filemap. */
3000 for (int i = 0; i < num_folios; i++) {
3001 struct folio *folio;
3002
3003 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3004 if (ret > 0) {
3005 ASSERT(existing_eb);
3006 goto out;
3007 }
3008
3009 /*
3010 * TODO: Special handling for a corner case where the order of
3011 * folios mismatch between the new eb and filemap.
3012 *
3013 * This happens when:
3014 *
3015 * - the new eb is using higher order folio
3016 *
3017 * - the filemap is still using 0-order folios for the range
3018 * This can happen at the previous eb allocation, and we don't
3019 * have higher order folio for the call.
3020 *
3021 * - the existing eb has already been freed
3022 *
3023 * In this case, we have to free the existing folios first, and
3024 * re-allocate using the same order.
3025 * Thankfully this is not going to happen yet, as we're still
3026 * using 0-order folios.
3027 */
3028 if (unlikely(ret == -EAGAIN)) {
3029 ASSERT(0);
3030 goto reallocate;
3031 }
3032 attached++;
3033
3034 /*
3035 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3036 * reliable, as we may choose to reuse the existing page cache
3037 * and free the allocated page.
3038 */
3039 folio = eb->folios[i];
3040 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3041
3042 /*
3043 * Check if the current page is physically contiguous with previous eb
3044 * page.
3045 * At this stage, either we allocated a large folio, thus @i
3046 * would only be 0, or we fall back to per-page allocation.
3047 */
3048 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3049 page_contig = false;
3050
3051 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3052 uptodate = 0;
3053
3054 /*
3055 * We can't unlock the pages just yet since the extent buffer
3056 * hasn't been properly inserted in the radix tree, this
3057 * opens a race with btree_release_folio which can free a page
3058 * while we are still filling in all pages for the buffer and
3059 * we could crash.
3060 */
3061 }
3062 if (uptodate)
3063 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3064 /* All pages are physically contiguous, can skip cross page handling. */
3065 if (page_contig)
3066 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3067 again:
3068 ret = radix_tree_preload(GFP_NOFS);
3069 if (ret)
3070 goto out;
3071
3072 spin_lock(&fs_info->buffer_lock);
3073 ret = radix_tree_insert(&fs_info->buffer_radix,
3074 start >> fs_info->sectorsize_bits, eb);
3075 spin_unlock(&fs_info->buffer_lock);
3076 radix_tree_preload_end();
3077 if (ret == -EEXIST) {
3078 ret = 0;
3079 existing_eb = find_extent_buffer(fs_info, start);
3080 if (existing_eb)
3081 goto out;
3082 else
3083 goto again;
3084 }
3085 /* add one reference for the tree */
3086 check_buffer_tree_ref(eb);
3087 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3088
3089 /*
3090 * Now it's safe to unlock the pages because any calls to
3091 * btree_release_folio will correctly detect that a page belongs to a
3092 * live buffer and won't free them prematurely.
3093 */
3094 for (int i = 0; i < num_folios; i++)
3095 unlock_page(folio_page(eb->folios[i], 0));
3096 return eb;
3097
3098 out:
3099 WARN_ON(!atomic_dec_and_test(&eb->refs));
3100
3101 /*
3102 * Any attached folios need to be detached before we unlock them. This
3103 * is because when we're inserting our new folios into the mapping, and
3104 * then attaching our eb to that folio. If we fail to insert our folio
3105 * we'll lookup the folio for that index, and grab that EB. We do not
3106 * want that to grab this eb, as we're getting ready to free it. So we
3107 * have to detach it first and then unlock it.
3108 *
3109 * We have to drop our reference and NULL it out here because in the
3110 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3111 * Below when we call btrfs_release_extent_buffer() we will call
3112 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3113 * case. If we left eb->folios[i] populated in the subpage case we'd
3114 * double put our reference and be super sad.
3115 */
3116 for (int i = 0; i < attached; i++) {
3117 ASSERT(eb->folios[i]);
3118 detach_extent_buffer_folio(eb, eb->folios[i]);
3119 unlock_page(folio_page(eb->folios[i], 0));
3120 folio_put(eb->folios[i]);
3121 eb->folios[i] = NULL;
3122 }
3123 /*
3124 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3125 * so it can be cleaned up without utlizing page->mapping.
3126 */
3127 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3128
3129 btrfs_release_extent_buffer(eb);
3130 if (ret < 0)
3131 return ERR_PTR(ret);
3132 ASSERT(existing_eb);
3133 return existing_eb;
3134 }
3135
btrfs_release_extent_buffer_rcu(struct rcu_head * head)3136 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3137 {
3138 struct extent_buffer *eb =
3139 container_of(head, struct extent_buffer, rcu_head);
3140
3141 __free_extent_buffer(eb);
3142 }
3143
release_extent_buffer(struct extent_buffer * eb)3144 static int release_extent_buffer(struct extent_buffer *eb)
3145 __releases(&eb->refs_lock)
3146 {
3147 lockdep_assert_held(&eb->refs_lock);
3148
3149 WARN_ON(atomic_read(&eb->refs) == 0);
3150 if (atomic_dec_and_test(&eb->refs)) {
3151 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3152 struct btrfs_fs_info *fs_info = eb->fs_info;
3153
3154 spin_unlock(&eb->refs_lock);
3155
3156 spin_lock(&fs_info->buffer_lock);
3157 radix_tree_delete(&fs_info->buffer_radix,
3158 eb->start >> fs_info->sectorsize_bits);
3159 spin_unlock(&fs_info->buffer_lock);
3160 } else {
3161 spin_unlock(&eb->refs_lock);
3162 }
3163
3164 btrfs_leak_debug_del_eb(eb);
3165 /* Should be safe to release our pages at this point */
3166 btrfs_release_extent_buffer_pages(eb);
3167 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3168 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3169 __free_extent_buffer(eb);
3170 return 1;
3171 }
3172 #endif
3173 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3174 return 1;
3175 }
3176 spin_unlock(&eb->refs_lock);
3177
3178 return 0;
3179 }
3180
free_extent_buffer(struct extent_buffer * eb)3181 void free_extent_buffer(struct extent_buffer *eb)
3182 {
3183 int refs;
3184 if (!eb)
3185 return;
3186
3187 refs = atomic_read(&eb->refs);
3188 while (1) {
3189 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3190 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3191 refs == 1))
3192 break;
3193 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3194 return;
3195 }
3196
3197 spin_lock(&eb->refs_lock);
3198 if (atomic_read(&eb->refs) == 2 &&
3199 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3200 !extent_buffer_under_io(eb) &&
3201 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3202 atomic_dec(&eb->refs);
3203
3204 /*
3205 * I know this is terrible, but it's temporary until we stop tracking
3206 * the uptodate bits and such for the extent buffers.
3207 */
3208 release_extent_buffer(eb);
3209 }
3210
free_extent_buffer_stale(struct extent_buffer * eb)3211 void free_extent_buffer_stale(struct extent_buffer *eb)
3212 {
3213 if (!eb)
3214 return;
3215
3216 spin_lock(&eb->refs_lock);
3217 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3218
3219 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3220 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3221 atomic_dec(&eb->refs);
3222 release_extent_buffer(eb);
3223 }
3224
btree_clear_folio_dirty(struct folio * folio)3225 static void btree_clear_folio_dirty(struct folio *folio)
3226 {
3227 ASSERT(folio_test_dirty(folio));
3228 ASSERT(folio_test_locked(folio));
3229 folio_clear_dirty_for_io(folio);
3230 xa_lock_irq(&folio->mapping->i_pages);
3231 if (!folio_test_dirty(folio))
3232 __xa_clear_mark(&folio->mapping->i_pages,
3233 folio_index(folio), PAGECACHE_TAG_DIRTY);
3234 xa_unlock_irq(&folio->mapping->i_pages);
3235 }
3236
clear_subpage_extent_buffer_dirty(const struct extent_buffer * eb)3237 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3238 {
3239 struct btrfs_fs_info *fs_info = eb->fs_info;
3240 struct folio *folio = eb->folios[0];
3241 bool last;
3242
3243 /* btree_clear_folio_dirty() needs page locked. */
3244 folio_lock(folio);
3245 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3246 if (last)
3247 btree_clear_folio_dirty(folio);
3248 folio_unlock(folio);
3249 WARN_ON(atomic_read(&eb->refs) == 0);
3250 }
3251
btrfs_clear_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * eb)3252 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3253 struct extent_buffer *eb)
3254 {
3255 struct btrfs_fs_info *fs_info = eb->fs_info;
3256 int num_folios;
3257
3258 btrfs_assert_tree_write_locked(eb);
3259
3260 if (trans && btrfs_header_generation(eb) != trans->transid)
3261 return;
3262
3263 /*
3264 * Instead of clearing the dirty flag off of the buffer, mark it as
3265 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3266 * write-ordering in zoned mode, without the need to later re-dirty
3267 * the extent_buffer.
3268 *
3269 * The actual zeroout of the buffer will happen later in
3270 * btree_csum_one_bio.
3271 */
3272 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3273 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3274 return;
3275 }
3276
3277 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3278 return;
3279
3280 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3281 fs_info->dirty_metadata_batch);
3282
3283 if (eb->fs_info->nodesize < PAGE_SIZE)
3284 return clear_subpage_extent_buffer_dirty(eb);
3285
3286 num_folios = num_extent_folios(eb);
3287 for (int i = 0; i < num_folios; i++) {
3288 struct folio *folio = eb->folios[i];
3289
3290 if (!folio_test_dirty(folio))
3291 continue;
3292 folio_lock(folio);
3293 btree_clear_folio_dirty(folio);
3294 folio_unlock(folio);
3295 }
3296 WARN_ON(atomic_read(&eb->refs) == 0);
3297 }
3298
set_extent_buffer_dirty(struct extent_buffer * eb)3299 void set_extent_buffer_dirty(struct extent_buffer *eb)
3300 {
3301 int num_folios;
3302 bool was_dirty;
3303
3304 check_buffer_tree_ref(eb);
3305
3306 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3307
3308 num_folios = num_extent_folios(eb);
3309 WARN_ON(atomic_read(&eb->refs) == 0);
3310 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3311 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3312
3313 if (!was_dirty) {
3314 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3315
3316 /*
3317 * For subpage case, we can have other extent buffers in the
3318 * same page, and in clear_subpage_extent_buffer_dirty() we
3319 * have to clear page dirty without subpage lock held.
3320 * This can cause race where our page gets dirty cleared after
3321 * we just set it.
3322 *
3323 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3324 * its page for other reasons, we can use page lock to prevent
3325 * the above race.
3326 */
3327 if (subpage)
3328 lock_page(folio_page(eb->folios[0], 0));
3329 for (int i = 0; i < num_folios; i++)
3330 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3331 eb->start, eb->len);
3332 if (subpage)
3333 unlock_page(folio_page(eb->folios[0], 0));
3334 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3335 eb->len,
3336 eb->fs_info->dirty_metadata_batch);
3337 }
3338 #ifdef CONFIG_BTRFS_DEBUG
3339 for (int i = 0; i < num_folios; i++)
3340 ASSERT(folio_test_dirty(eb->folios[i]));
3341 #endif
3342 }
3343
clear_extent_buffer_uptodate(struct extent_buffer * eb)3344 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3345 {
3346 struct btrfs_fs_info *fs_info = eb->fs_info;
3347 int num_folios = num_extent_folios(eb);
3348
3349 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3350 for (int i = 0; i < num_folios; i++) {
3351 struct folio *folio = eb->folios[i];
3352
3353 if (!folio)
3354 continue;
3355
3356 /*
3357 * This is special handling for metadata subpage, as regular
3358 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3359 */
3360 if (fs_info->nodesize >= PAGE_SIZE)
3361 folio_clear_uptodate(folio);
3362 else
3363 btrfs_subpage_clear_uptodate(fs_info, folio,
3364 eb->start, eb->len);
3365 }
3366 }
3367
set_extent_buffer_uptodate(struct extent_buffer * eb)3368 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3369 {
3370 struct btrfs_fs_info *fs_info = eb->fs_info;
3371 int num_folios = num_extent_folios(eb);
3372
3373 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3374 for (int i = 0; i < num_folios; i++) {
3375 struct folio *folio = eb->folios[i];
3376
3377 /*
3378 * This is special handling for metadata subpage, as regular
3379 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3380 */
3381 if (fs_info->nodesize >= PAGE_SIZE)
3382 folio_mark_uptodate(folio);
3383 else
3384 btrfs_subpage_set_uptodate(fs_info, folio,
3385 eb->start, eb->len);
3386 }
3387 }
3388
clear_extent_buffer_reading(struct extent_buffer * eb)3389 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3390 {
3391 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3392 smp_mb__after_atomic();
3393 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3394 }
3395
end_bbio_meta_read(struct btrfs_bio * bbio)3396 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3397 {
3398 struct extent_buffer *eb = bbio->private;
3399 struct btrfs_fs_info *fs_info = eb->fs_info;
3400 bool uptodate = !bbio->bio.bi_status;
3401 struct folio_iter fi;
3402 u32 bio_offset = 0;
3403
3404 /*
3405 * If the extent buffer is marked UPTODATE before the read operation
3406 * completes, other calls to read_extent_buffer_pages() will return
3407 * early without waiting for the read to finish, causing data races.
3408 */
3409 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3410
3411 eb->read_mirror = bbio->mirror_num;
3412
3413 if (uptodate &&
3414 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3415 uptodate = false;
3416
3417 if (uptodate) {
3418 set_extent_buffer_uptodate(eb);
3419 } else {
3420 clear_extent_buffer_uptodate(eb);
3421 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3422 }
3423
3424 bio_for_each_folio_all(fi, &bbio->bio) {
3425 struct folio *folio = fi.folio;
3426 u64 start = eb->start + bio_offset;
3427 u32 len = fi.length;
3428
3429 if (uptodate)
3430 btrfs_folio_set_uptodate(fs_info, folio, start, len);
3431 else
3432 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3433
3434 bio_offset += len;
3435 }
3436
3437 clear_extent_buffer_reading(eb);
3438 free_extent_buffer(eb);
3439
3440 bio_put(&bbio->bio);
3441 }
3442
read_extent_buffer_pages(struct extent_buffer * eb,int wait,int mirror_num,const struct btrfs_tree_parent_check * check)3443 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3444 const struct btrfs_tree_parent_check *check)
3445 {
3446 struct btrfs_bio *bbio;
3447 bool ret;
3448
3449 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3450 return 0;
3451
3452 /*
3453 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3454 * operation, which could potentially still be in flight. In this case
3455 * we simply want to return an error.
3456 */
3457 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3458 return -EIO;
3459
3460 /* Someone else is already reading the buffer, just wait for it. */
3461 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3462 goto done;
3463
3464 /*
3465 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3466 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3467 * started and finished reading the same eb. In this case, UPTODATE
3468 * will now be set, and we shouldn't read it in again.
3469 */
3470 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3471 clear_extent_buffer_reading(eb);
3472 return 0;
3473 }
3474
3475 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3476 eb->read_mirror = 0;
3477 check_buffer_tree_ref(eb);
3478 atomic_inc(&eb->refs);
3479
3480 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3481 REQ_OP_READ | REQ_META, eb->fs_info,
3482 end_bbio_meta_read, eb);
3483 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3484 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3485 bbio->file_offset = eb->start;
3486 memcpy(&bbio->parent_check, check, sizeof(*check));
3487 if (eb->fs_info->nodesize < PAGE_SIZE) {
3488 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3489 eb->start - folio_pos(eb->folios[0]));
3490 ASSERT(ret);
3491 } else {
3492 int num_folios = num_extent_folios(eb);
3493
3494 for (int i = 0; i < num_folios; i++) {
3495 struct folio *folio = eb->folios[i];
3496
3497 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3498 ASSERT(ret);
3499 }
3500 }
3501 btrfs_submit_bbio(bbio, mirror_num);
3502
3503 done:
3504 if (wait == WAIT_COMPLETE) {
3505 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3506 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3507 return -EIO;
3508 }
3509
3510 return 0;
3511 }
3512
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3513 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3514 unsigned long len)
3515 {
3516 btrfs_warn(eb->fs_info,
3517 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3518 eb->start, eb->len, start, len);
3519 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3520
3521 return true;
3522 }
3523
3524 /*
3525 * Check if the [start, start + len) range is valid before reading/writing
3526 * the eb.
3527 * NOTE: @start and @len are offset inside the eb, not logical address.
3528 *
3529 * Caller should not touch the dst/src memory if this function returns error.
3530 */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)3531 static inline int check_eb_range(const struct extent_buffer *eb,
3532 unsigned long start, unsigned long len)
3533 {
3534 unsigned long offset;
3535
3536 /* start, start + len should not go beyond eb->len nor overflow */
3537 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3538 return report_eb_range(eb, start, len);
3539
3540 return false;
3541 }
3542
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)3543 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3544 unsigned long start, unsigned long len)
3545 {
3546 const int unit_size = eb->folio_size;
3547 size_t cur;
3548 size_t offset;
3549 char *dst = (char *)dstv;
3550 unsigned long i = get_eb_folio_index(eb, start);
3551
3552 if (check_eb_range(eb, start, len)) {
3553 /*
3554 * Invalid range hit, reset the memory, so callers won't get
3555 * some random garbage for their uninitialized memory.
3556 */
3557 memset(dstv, 0, len);
3558 return;
3559 }
3560
3561 if (eb->addr) {
3562 memcpy(dstv, eb->addr + start, len);
3563 return;
3564 }
3565
3566 offset = get_eb_offset_in_folio(eb, start);
3567
3568 while (len > 0) {
3569 char *kaddr;
3570
3571 cur = min(len, unit_size - offset);
3572 kaddr = folio_address(eb->folios[i]);
3573 memcpy(dst, kaddr + offset, cur);
3574
3575 dst += cur;
3576 len -= cur;
3577 offset = 0;
3578 i++;
3579 }
3580 }
3581
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)3582 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3583 void __user *dstv,
3584 unsigned long start, unsigned long len)
3585 {
3586 const int unit_size = eb->folio_size;
3587 size_t cur;
3588 size_t offset;
3589 char __user *dst = (char __user *)dstv;
3590 unsigned long i = get_eb_folio_index(eb, start);
3591 int ret = 0;
3592
3593 WARN_ON(start > eb->len);
3594 WARN_ON(start + len > eb->start + eb->len);
3595
3596 if (eb->addr) {
3597 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3598 ret = -EFAULT;
3599 return ret;
3600 }
3601
3602 offset = get_eb_offset_in_folio(eb, start);
3603
3604 while (len > 0) {
3605 char *kaddr;
3606
3607 cur = min(len, unit_size - offset);
3608 kaddr = folio_address(eb->folios[i]);
3609 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3610 ret = -EFAULT;
3611 break;
3612 }
3613
3614 dst += cur;
3615 len -= cur;
3616 offset = 0;
3617 i++;
3618 }
3619
3620 return ret;
3621 }
3622
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)3623 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3624 unsigned long start, unsigned long len)
3625 {
3626 const int unit_size = eb->folio_size;
3627 size_t cur;
3628 size_t offset;
3629 char *kaddr;
3630 char *ptr = (char *)ptrv;
3631 unsigned long i = get_eb_folio_index(eb, start);
3632 int ret = 0;
3633
3634 if (check_eb_range(eb, start, len))
3635 return -EINVAL;
3636
3637 if (eb->addr)
3638 return memcmp(ptrv, eb->addr + start, len);
3639
3640 offset = get_eb_offset_in_folio(eb, start);
3641
3642 while (len > 0) {
3643 cur = min(len, unit_size - offset);
3644 kaddr = folio_address(eb->folios[i]);
3645 ret = memcmp(ptr, kaddr + offset, cur);
3646 if (ret)
3647 break;
3648
3649 ptr += cur;
3650 len -= cur;
3651 offset = 0;
3652 i++;
3653 }
3654 return ret;
3655 }
3656
3657 /*
3658 * Check that the extent buffer is uptodate.
3659 *
3660 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3661 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3662 */
assert_eb_folio_uptodate(const struct extent_buffer * eb,int i)3663 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3664 {
3665 struct btrfs_fs_info *fs_info = eb->fs_info;
3666 struct folio *folio = eb->folios[i];
3667
3668 ASSERT(folio);
3669
3670 /*
3671 * If we are using the commit root we could potentially clear a page
3672 * Uptodate while we're using the extent buffer that we've previously
3673 * looked up. We don't want to complain in this case, as the page was
3674 * valid before, we just didn't write it out. Instead we want to catch
3675 * the case where we didn't actually read the block properly, which
3676 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3677 */
3678 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3679 return;
3680
3681 if (fs_info->nodesize < PAGE_SIZE) {
3682 folio = eb->folios[0];
3683 ASSERT(i == 0);
3684 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3685 eb->start, eb->len)))
3686 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3687 } else {
3688 WARN_ON(!folio_test_uptodate(folio));
3689 }
3690 }
3691
__write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len,bool use_memmove)3692 static void __write_extent_buffer(const struct extent_buffer *eb,
3693 const void *srcv, unsigned long start,
3694 unsigned long len, bool use_memmove)
3695 {
3696 const int unit_size = eb->folio_size;
3697 size_t cur;
3698 size_t offset;
3699 char *kaddr;
3700 const char *src = (const char *)srcv;
3701 unsigned long i = get_eb_folio_index(eb, start);
3702 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3703 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3704
3705 if (check_eb_range(eb, start, len))
3706 return;
3707
3708 if (eb->addr) {
3709 if (use_memmove)
3710 memmove(eb->addr + start, srcv, len);
3711 else
3712 memcpy(eb->addr + start, srcv, len);
3713 return;
3714 }
3715
3716 offset = get_eb_offset_in_folio(eb, start);
3717
3718 while (len > 0) {
3719 if (check_uptodate)
3720 assert_eb_folio_uptodate(eb, i);
3721
3722 cur = min(len, unit_size - offset);
3723 kaddr = folio_address(eb->folios[i]);
3724 if (use_memmove)
3725 memmove(kaddr + offset, src, cur);
3726 else
3727 memcpy(kaddr + offset, src, cur);
3728
3729 src += cur;
3730 len -= cur;
3731 offset = 0;
3732 i++;
3733 }
3734 }
3735
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)3736 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3737 unsigned long start, unsigned long len)
3738 {
3739 return __write_extent_buffer(eb, srcv, start, len, false);
3740 }
3741
memset_extent_buffer(const struct extent_buffer * eb,int c,unsigned long start,unsigned long len)3742 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3743 unsigned long start, unsigned long len)
3744 {
3745 const int unit_size = eb->folio_size;
3746 unsigned long cur = start;
3747
3748 if (eb->addr) {
3749 memset(eb->addr + start, c, len);
3750 return;
3751 }
3752
3753 while (cur < start + len) {
3754 unsigned long index = get_eb_folio_index(eb, cur);
3755 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3756 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3757
3758 assert_eb_folio_uptodate(eb, index);
3759 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3760
3761 cur += cur_len;
3762 }
3763 }
3764
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)3765 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3766 unsigned long len)
3767 {
3768 if (check_eb_range(eb, start, len))
3769 return;
3770 return memset_extent_buffer(eb, 0, start, len);
3771 }
3772
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)3773 void copy_extent_buffer_full(const struct extent_buffer *dst,
3774 const struct extent_buffer *src)
3775 {
3776 const int unit_size = src->folio_size;
3777 unsigned long cur = 0;
3778
3779 ASSERT(dst->len == src->len);
3780
3781 while (cur < src->len) {
3782 unsigned long index = get_eb_folio_index(src, cur);
3783 unsigned long offset = get_eb_offset_in_folio(src, cur);
3784 unsigned long cur_len = min(src->len, unit_size - offset);
3785 void *addr = folio_address(src->folios[index]) + offset;
3786
3787 write_extent_buffer(dst, addr, cur, cur_len);
3788
3789 cur += cur_len;
3790 }
3791 }
3792
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3793 void copy_extent_buffer(const struct extent_buffer *dst,
3794 const struct extent_buffer *src,
3795 unsigned long dst_offset, unsigned long src_offset,
3796 unsigned long len)
3797 {
3798 const int unit_size = dst->folio_size;
3799 u64 dst_len = dst->len;
3800 size_t cur;
3801 size_t offset;
3802 char *kaddr;
3803 unsigned long i = get_eb_folio_index(dst, dst_offset);
3804
3805 if (check_eb_range(dst, dst_offset, len) ||
3806 check_eb_range(src, src_offset, len))
3807 return;
3808
3809 WARN_ON(src->len != dst_len);
3810
3811 offset = get_eb_offset_in_folio(dst, dst_offset);
3812
3813 while (len > 0) {
3814 assert_eb_folio_uptodate(dst, i);
3815
3816 cur = min(len, (unsigned long)(unit_size - offset));
3817
3818 kaddr = folio_address(dst->folios[i]);
3819 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3820
3821 src_offset += cur;
3822 len -= cur;
3823 offset = 0;
3824 i++;
3825 }
3826 }
3827
3828 /*
3829 * Calculate the folio and offset of the byte containing the given bit number.
3830 *
3831 * @eb: the extent buffer
3832 * @start: offset of the bitmap item in the extent buffer
3833 * @nr: bit number
3834 * @folio_index: return index of the folio in the extent buffer that contains
3835 * the given bit number
3836 * @folio_offset: return offset into the folio given by folio_index
3837 *
3838 * This helper hides the ugliness of finding the byte in an extent buffer which
3839 * contains a given bit.
3840 */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * folio_index,size_t * folio_offset)3841 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3842 unsigned long start, unsigned long nr,
3843 unsigned long *folio_index,
3844 size_t *folio_offset)
3845 {
3846 size_t byte_offset = BIT_BYTE(nr);
3847 size_t offset;
3848
3849 /*
3850 * The byte we want is the offset of the extent buffer + the offset of
3851 * the bitmap item in the extent buffer + the offset of the byte in the
3852 * bitmap item.
3853 */
3854 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3855
3856 *folio_index = offset >> eb->folio_shift;
3857 *folio_offset = offset_in_eb_folio(eb, offset);
3858 }
3859
3860 /*
3861 * Determine whether a bit in a bitmap item is set.
3862 *
3863 * @eb: the extent buffer
3864 * @start: offset of the bitmap item in the extent buffer
3865 * @nr: bit number to test
3866 */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)3867 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3868 unsigned long nr)
3869 {
3870 unsigned long i;
3871 size_t offset;
3872 u8 *kaddr;
3873
3874 eb_bitmap_offset(eb, start, nr, &i, &offset);
3875 assert_eb_folio_uptodate(eb, i);
3876 kaddr = folio_address(eb->folios[i]);
3877 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3878 }
3879
extent_buffer_get_byte(const struct extent_buffer * eb,unsigned long bytenr)3880 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3881 {
3882 unsigned long index = get_eb_folio_index(eb, bytenr);
3883
3884 if (check_eb_range(eb, bytenr, 1))
3885 return NULL;
3886 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
3887 }
3888
3889 /*
3890 * Set an area of a bitmap to 1.
3891 *
3892 * @eb: the extent buffer
3893 * @start: offset of the bitmap item in the extent buffer
3894 * @pos: bit number of the first bit
3895 * @len: number of bits to set
3896 */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3897 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
3898 unsigned long pos, unsigned long len)
3899 {
3900 unsigned int first_byte = start + BIT_BYTE(pos);
3901 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3902 const bool same_byte = (first_byte == last_byte);
3903 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3904 u8 *kaddr;
3905
3906 if (same_byte)
3907 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3908
3909 /* Handle the first byte. */
3910 kaddr = extent_buffer_get_byte(eb, first_byte);
3911 *kaddr |= mask;
3912 if (same_byte)
3913 return;
3914
3915 /* Handle the byte aligned part. */
3916 ASSERT(first_byte + 1 <= last_byte);
3917 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
3918
3919 /* Handle the last byte. */
3920 kaddr = extent_buffer_get_byte(eb, last_byte);
3921 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
3922 }
3923
3924
3925 /*
3926 * Clear an area of a bitmap.
3927 *
3928 * @eb: the extent buffer
3929 * @start: offset of the bitmap item in the extent buffer
3930 * @pos: bit number of the first bit
3931 * @len: number of bits to clear
3932 */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)3933 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
3934 unsigned long start, unsigned long pos,
3935 unsigned long len)
3936 {
3937 unsigned int first_byte = start + BIT_BYTE(pos);
3938 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
3939 const bool same_byte = (first_byte == last_byte);
3940 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
3941 u8 *kaddr;
3942
3943 if (same_byte)
3944 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
3945
3946 /* Handle the first byte. */
3947 kaddr = extent_buffer_get_byte(eb, first_byte);
3948 *kaddr &= ~mask;
3949 if (same_byte)
3950 return;
3951
3952 /* Handle the byte aligned part. */
3953 ASSERT(first_byte + 1 <= last_byte);
3954 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
3955
3956 /* Handle the last byte. */
3957 kaddr = extent_buffer_get_byte(eb, last_byte);
3958 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
3959 }
3960
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)3961 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3962 {
3963 unsigned long distance = (src > dst) ? src - dst : dst - src;
3964 return distance < len;
3965 }
3966
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)3967 void memcpy_extent_buffer(const struct extent_buffer *dst,
3968 unsigned long dst_offset, unsigned long src_offset,
3969 unsigned long len)
3970 {
3971 const int unit_size = dst->folio_size;
3972 unsigned long cur_off = 0;
3973
3974 if (check_eb_range(dst, dst_offset, len) ||
3975 check_eb_range(dst, src_offset, len))
3976 return;
3977
3978 if (dst->addr) {
3979 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
3980
3981 if (use_memmove)
3982 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
3983 else
3984 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
3985 return;
3986 }
3987
3988 while (cur_off < len) {
3989 unsigned long cur_src = cur_off + src_offset;
3990 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
3991 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
3992 unsigned long cur_len = min(src_offset + len - cur_src,
3993 unit_size - folio_off);
3994 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
3995 const bool use_memmove = areas_overlap(src_offset + cur_off,
3996 dst_offset + cur_off, cur_len);
3997
3998 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
3999 use_memmove);
4000 cur_off += cur_len;
4001 }
4002 }
4003
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)4004 void memmove_extent_buffer(const struct extent_buffer *dst,
4005 unsigned long dst_offset, unsigned long src_offset,
4006 unsigned long len)
4007 {
4008 unsigned long dst_end = dst_offset + len - 1;
4009 unsigned long src_end = src_offset + len - 1;
4010
4011 if (check_eb_range(dst, dst_offset, len) ||
4012 check_eb_range(dst, src_offset, len))
4013 return;
4014
4015 if (dst_offset < src_offset) {
4016 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4017 return;
4018 }
4019
4020 if (dst->addr) {
4021 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4022 return;
4023 }
4024
4025 while (len > 0) {
4026 unsigned long src_i;
4027 size_t cur;
4028 size_t dst_off_in_folio;
4029 size_t src_off_in_folio;
4030 void *src_addr;
4031 bool use_memmove;
4032
4033 src_i = get_eb_folio_index(dst, src_end);
4034
4035 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4036 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4037
4038 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4039 cur = min(cur, dst_off_in_folio + 1);
4040
4041 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4042 cur + 1;
4043 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4044 cur);
4045
4046 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4047 use_memmove);
4048
4049 dst_end -= cur;
4050 src_end -= cur;
4051 len -= cur;
4052 }
4053 }
4054
4055 #define GANG_LOOKUP_SIZE 16
get_next_extent_buffer(const struct btrfs_fs_info * fs_info,struct folio * folio,u64 bytenr)4056 static struct extent_buffer *get_next_extent_buffer(
4057 const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4058 {
4059 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4060 struct extent_buffer *found = NULL;
4061 u64 folio_start = folio_pos(folio);
4062 u64 cur = folio_start;
4063
4064 ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4065 lockdep_assert_held(&fs_info->buffer_lock);
4066
4067 while (cur < folio_start + PAGE_SIZE) {
4068 int ret;
4069 int i;
4070
4071 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4072 (void **)gang, cur >> fs_info->sectorsize_bits,
4073 min_t(unsigned int, GANG_LOOKUP_SIZE,
4074 PAGE_SIZE / fs_info->nodesize));
4075 if (ret == 0)
4076 goto out;
4077 for (i = 0; i < ret; i++) {
4078 /* Already beyond page end */
4079 if (gang[i]->start >= folio_start + PAGE_SIZE)
4080 goto out;
4081 /* Found one */
4082 if (gang[i]->start >= bytenr) {
4083 found = gang[i];
4084 goto out;
4085 }
4086 }
4087 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4088 }
4089 out:
4090 return found;
4091 }
4092
try_release_subpage_extent_buffer(struct folio * folio)4093 static int try_release_subpage_extent_buffer(struct folio *folio)
4094 {
4095 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4096 u64 cur = folio_pos(folio);
4097 const u64 end = cur + PAGE_SIZE;
4098 int ret;
4099
4100 while (cur < end) {
4101 struct extent_buffer *eb = NULL;
4102
4103 /*
4104 * Unlike try_release_extent_buffer() which uses folio private
4105 * to grab buffer, for subpage case we rely on radix tree, thus
4106 * we need to ensure radix tree consistency.
4107 *
4108 * We also want an atomic snapshot of the radix tree, thus go
4109 * with spinlock rather than RCU.
4110 */
4111 spin_lock(&fs_info->buffer_lock);
4112 eb = get_next_extent_buffer(fs_info, folio, cur);
4113 if (!eb) {
4114 /* No more eb in the page range after or at cur */
4115 spin_unlock(&fs_info->buffer_lock);
4116 break;
4117 }
4118 cur = eb->start + eb->len;
4119
4120 /*
4121 * The same as try_release_extent_buffer(), to ensure the eb
4122 * won't disappear out from under us.
4123 */
4124 spin_lock(&eb->refs_lock);
4125 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4126 spin_unlock(&eb->refs_lock);
4127 spin_unlock(&fs_info->buffer_lock);
4128 break;
4129 }
4130 spin_unlock(&fs_info->buffer_lock);
4131
4132 /*
4133 * If tree ref isn't set then we know the ref on this eb is a
4134 * real ref, so just return, this eb will likely be freed soon
4135 * anyway.
4136 */
4137 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4138 spin_unlock(&eb->refs_lock);
4139 break;
4140 }
4141
4142 /*
4143 * Here we don't care about the return value, we will always
4144 * check the folio private at the end. And
4145 * release_extent_buffer() will release the refs_lock.
4146 */
4147 release_extent_buffer(eb);
4148 }
4149 /*
4150 * Finally to check if we have cleared folio private, as if we have
4151 * released all ebs in the page, the folio private should be cleared now.
4152 */
4153 spin_lock(&folio->mapping->i_private_lock);
4154 if (!folio_test_private(folio))
4155 ret = 1;
4156 else
4157 ret = 0;
4158 spin_unlock(&folio->mapping->i_private_lock);
4159 return ret;
4160
4161 }
4162
try_release_extent_buffer(struct folio * folio)4163 int try_release_extent_buffer(struct folio *folio)
4164 {
4165 struct extent_buffer *eb;
4166
4167 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4168 return try_release_subpage_extent_buffer(folio);
4169
4170 /*
4171 * We need to make sure nobody is changing folio private, as we rely on
4172 * folio private as the pointer to extent buffer.
4173 */
4174 spin_lock(&folio->mapping->i_private_lock);
4175 if (!folio_test_private(folio)) {
4176 spin_unlock(&folio->mapping->i_private_lock);
4177 return 1;
4178 }
4179
4180 eb = folio_get_private(folio);
4181 BUG_ON(!eb);
4182
4183 /*
4184 * This is a little awful but should be ok, we need to make sure that
4185 * the eb doesn't disappear out from under us while we're looking at
4186 * this page.
4187 */
4188 spin_lock(&eb->refs_lock);
4189 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4190 spin_unlock(&eb->refs_lock);
4191 spin_unlock(&folio->mapping->i_private_lock);
4192 return 0;
4193 }
4194 spin_unlock(&folio->mapping->i_private_lock);
4195
4196 /*
4197 * If tree ref isn't set then we know the ref on this eb is a real ref,
4198 * so just return, this page will likely be freed soon anyway.
4199 */
4200 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4201 spin_unlock(&eb->refs_lock);
4202 return 0;
4203 }
4204
4205 return release_extent_buffer(eb);
4206 }
4207
4208 /*
4209 * Attempt to readahead a child block.
4210 *
4211 * @fs_info: the fs_info
4212 * @bytenr: bytenr to read
4213 * @owner_root: objectid of the root that owns this eb
4214 * @gen: generation for the uptodate check, can be 0
4215 * @level: level for the eb
4216 *
4217 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4218 * normal uptodate check of the eb, without checking the generation. If we have
4219 * to read the block we will not block on anything.
4220 */
btrfs_readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,u64 gen,int level)4221 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4222 u64 bytenr, u64 owner_root, u64 gen, int level)
4223 {
4224 struct btrfs_tree_parent_check check = {
4225 .has_first_key = 0,
4226 .level = level,
4227 .transid = gen
4228 };
4229 struct extent_buffer *eb;
4230 int ret;
4231
4232 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4233 if (IS_ERR(eb))
4234 return;
4235
4236 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4237 free_extent_buffer(eb);
4238 return;
4239 }
4240
4241 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4242 if (ret < 0)
4243 free_extent_buffer_stale(eb);
4244 else
4245 free_extent_buffer(eb);
4246 }
4247
4248 /*
4249 * Readahead a node's child block.
4250 *
4251 * @node: parent node we're reading from
4252 * @slot: slot in the parent node for the child we want to read
4253 *
4254 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4255 * the slot in the node provided.
4256 */
btrfs_readahead_node_child(struct extent_buffer * node,int slot)4257 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4258 {
4259 btrfs_readahead_tree_block(node->fs_info,
4260 btrfs_node_blockptr(node, slot),
4261 btrfs_header_owner(node),
4262 btrfs_node_ptr_generation(node, slot),
4263 btrfs_header_level(node) - 1);
4264 }
4265