Lines Matching full:eb

41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)  in btrfs_leak_debug_add_eb()  argument
43 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
47 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
53 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
57 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
63 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, refcount_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
81 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
84 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
695 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail) in alloc_eb_folio_array() argument
698 int num_pages = num_extent_pages(eb); in alloc_eb_folio_array()
706 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array()
707 eb->folio_size = PAGE_SIZE; in alloc_eb_folio_array()
708 eb->folio_shift = PAGE_SHIFT; in alloc_eb_folio_array()
865 static int attach_extent_buffer_folio(struct extent_buffer *eb, in attach_extent_buffer_folio() argument
869 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_folio()
883 folio_attach_private(folio, eb); in attach_extent_buffer_folio()
885 WARN_ON(folio_get_private(folio) != eb); in attach_extent_buffer_folio()
1902 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
1905 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1908 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1909 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1910 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1913 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
1914 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1918 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
1922 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1923 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1924 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); in lock_extent_buffer_for_io()
1927 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1928 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1937 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
1939 -eb->len, in lock_extent_buffer_for_io()
1943 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1945 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1949 static void set_btree_ioerr(struct extent_buffer *eb) in set_btree_ioerr() argument
1951 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1953 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1959 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1967 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1993 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
1994 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
2007 switch (eb->log_index) { in set_btree_ioerr()
2022 static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark) in buffer_tree_set_mark() argument
2024 struct btrfs_fs_info *fs_info = eb->fs_info; in buffer_tree_set_mark()
2025 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); in buffer_tree_set_mark()
2034 static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark) in buffer_tree_clear_mark() argument
2036 struct btrfs_fs_info *fs_info = eb->fs_info; in buffer_tree_clear_mark()
2037 XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); in buffer_tree_clear_mark()
2051 void *eb; in buffer_tree_tag_for_writeback() local
2054 xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) { in buffer_tree_tag_for_writeback()
2072 static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb) in eb_batch_add() argument
2074 batch->ebs[batch->nr++] = eb; in eb_batch_add()
2101 struct extent_buffer *eb; in find_get_eb() local
2104 eb = xas_find_marked(xas, max, mark); in find_get_eb()
2106 if (xas_retry(xas, eb)) in find_get_eb()
2109 if (!eb) in find_get_eb()
2112 if (!refcount_inc_not_zero(&eb->refs)) { in find_get_eb()
2117 if (unlikely(eb != xas_reload(xas))) { in find_get_eb()
2118 free_extent_buffer(eb); in find_get_eb()
2123 return eb; in find_get_eb()
2132 struct extent_buffer *eb; in buffer_tree_get_ebs_tag() local
2135 while ((eb = find_get_eb(&xas, end, tag)) != NULL) { in buffer_tree_get_ebs_tag()
2136 if (!eb_batch_add(batch, eb)) { in buffer_tree_get_ebs_tag()
2137 *start = ((eb->start + eb->len) >> fs_info->nodesize_bits); in buffer_tree_get_ebs_tag()
2158 struct extent_buffer *eb; in find_extent_buffer_nolock() local
2162 eb = xa_load(&fs_info->buffer_tree, index); in find_extent_buffer_nolock()
2163 if (eb && !refcount_inc_not_zero(&eb->refs)) in find_extent_buffer_nolock()
2164 eb = NULL; in find_extent_buffer_nolock()
2166 return eb; in find_extent_buffer_nolock()
2171 struct extent_buffer *eb = bbio->private; in end_bbio_meta_write() local
2175 set_btree_ioerr(eb); in end_bbio_meta_write()
2178 btrfs_meta_folio_clear_writeback(fi.folio, eb); in end_bbio_meta_write()
2181 buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK); in end_bbio_meta_write()
2182 clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_bbio_meta_write()
2186 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
2192 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
2195 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
2196 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
2197 end = btrfs_node_key_ptr_offset(eb, nritems); in prepare_eb_write()
2198 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
2204 start = btrfs_item_nr_offset(eb, nritems); in prepare_eb_write()
2205 end = btrfs_item_nr_offset(eb, 0); in prepare_eb_write()
2207 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
2209 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
2210 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
2214 static noinline_for_stack void write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
2217 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
2220 prepare_eb_write(eb); in write_one_eb()
2224 BTRFS_I(fs_info->btree_inode), eb->start, in write_one_eb()
2225 end_bbio_meta_write, eb); in write_one_eb()
2226 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
2229 for (int i = 0; i < num_extent_folios(eb); i++) { in write_one_eb()
2230 struct folio *folio = eb->folios[i]; in write_one_eb()
2231 u64 range_start = max_t(u64, eb->start, folio_pos(folio)); in write_one_eb()
2233 eb->start + eb->len) - range_start; in write_one_eb()
2236 btrfs_meta_folio_clear_dirty(folio, eb); in write_one_eb()
2237 btrfs_meta_folio_set_writeback(folio, eb); in write_one_eb()
2257 * Wait for all eb writeback in the given range to finish.
2274 struct extent_buffer *eb; in btrfs_btree_wait_writeback_range() local
2282 while ((eb = eb_batch_next(&batch)) != NULL) in btrfs_btree_wait_writeback_range()
2283 wait_on_extent_buffer_writeback(eb); in btrfs_btree_wait_writeback_range()
2330 struct extent_buffer *eb; in btree_write_cache_pages() local
2332 while ((eb = eb_batch_next(&batch)) != NULL) { in btree_write_cache_pages()
2333 ctx.eb = eb; in btree_write_cache_pages()
2335 ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx); in btree_write_cache_pages()
2347 if (!lock_extent_buffer_for_io(eb, wbc)) in btree_write_cache_pages()
2352 /* Mark the last eb in the block group. */ in btree_write_cache_pages()
2353 btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb); in btree_write_cache_pages()
2354 ctx.zoned_bg->meta_write_pointer += eb->len; in btree_write_cache_pages()
2356 write_one_eb(eb, wbc); in btree_write_cache_pages()
2395 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
2878 static int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
2880 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
2881 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
2898 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio) in detach_extent_buffer_folio() argument
2900 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_folio()
2902 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_folio()
2905 * For mapped eb, we're going to change the folio private, which should in detach_extent_buffer_folio()
2920 * the eb from the xarray, so we could race and have this page in detach_extent_buffer_folio()
2921 * now attached to the new eb. So only clear folio if it's in detach_extent_buffer_folio()
2922 * still connected to this eb. in detach_extent_buffer_folio()
2924 if (folio_test_private(folio) && folio_get_private(folio) == eb) { in detach_extent_buffer_folio()
2925 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_folio()
2928 /* We need to make sure we haven't be attached to a new eb. */ in detach_extent_buffer_folio()
2937 * For subpage, we can have dummy eb with folio private attached. In in detach_extent_buffer_folio()
2939 * attached to one dummy eb, no sharing. in detach_extent_buffer_folio()
2959 static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb) in btrfs_release_extent_buffer_folios() argument
2961 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_folios()
2964 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_folios()
2969 detach_extent_buffer_folio(eb, folio); in btrfs_release_extent_buffer_folios()
2976 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
2978 btrfs_release_extent_buffer_folios(eb); in btrfs_release_extent_buffer()
2979 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
2980 kmem_cache_free(extent_buffer_cache, eb); in btrfs_release_extent_buffer()
2986 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
2988 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
2989 eb->start = start; in __alloc_extent_buffer()
2990 eb->len = fs_info->nodesize; in __alloc_extent_buffer()
2991 eb->fs_info = fs_info; in __alloc_extent_buffer()
2992 init_rwsem(&eb->lock); in __alloc_extent_buffer()
2994 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
2996 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
2997 refcount_set(&eb->refs, 1); in __alloc_extent_buffer()
2999 ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE); in __alloc_extent_buffer()
3001 return eb; in __alloc_extent_buffer()
3005 * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer()
3009 static void cleanup_extent_buffer_folios(struct extent_buffer *eb) in cleanup_extent_buffer_folios() argument
3011 const int num_folios = num_extent_folios(eb); in cleanup_extent_buffer_folios()
3013 /* We cannot use num_extent_folios() as loop bound as eb->folios changes. */ in cleanup_extent_buffer_folios()
3015 ASSERT(eb->folios[i]); in cleanup_extent_buffer_folios()
3016 detach_extent_buffer_folio(eb, eb->folios[i]); in cleanup_extent_buffer_folios()
3017 folio_put(eb->folios[i]); in cleanup_extent_buffer_folios()
3018 eb->folios[i] = NULL; in cleanup_extent_buffer_folios()
3073 struct extent_buffer *eb; in alloc_dummy_extent_buffer() local
3076 eb = __alloc_extent_buffer(fs_info, start); in alloc_dummy_extent_buffer()
3077 if (!eb) in alloc_dummy_extent_buffer()
3080 ret = alloc_eb_folio_array(eb, false); in alloc_dummy_extent_buffer()
3084 for (int i = 0; i < num_extent_folios(eb); i++) { in alloc_dummy_extent_buffer()
3085 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); in alloc_dummy_extent_buffer()
3089 for (int i = 0; i < num_extent_folios(eb); i++) in alloc_dummy_extent_buffer()
3090 folio_put(eb->folios[i]); in alloc_dummy_extent_buffer()
3092 set_extent_buffer_uptodate(eb); in alloc_dummy_extent_buffer()
3093 btrfs_set_header_nritems(eb, 0); in alloc_dummy_extent_buffer()
3094 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in alloc_dummy_extent_buffer()
3096 return eb; in alloc_dummy_extent_buffer()
3099 cleanup_extent_buffer_folios(eb); in alloc_dummy_extent_buffer()
3101 btrfs_release_extent_buffer(eb); in alloc_dummy_extent_buffer()
3105 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
3130 refs = refcount_read(&eb->refs); in check_buffer_tree_ref()
3131 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3134 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
3135 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3136 refcount_inc(&eb->refs); in check_buffer_tree_ref()
3137 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
3140 static void mark_extent_buffer_accessed(struct extent_buffer *eb) in mark_extent_buffer_accessed() argument
3142 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
3144 for (int i = 0; i < num_extent_folios(eb); i++) in mark_extent_buffer_accessed()
3145 folio_mark_accessed(eb->folios[i]); in mark_extent_buffer_accessed()
3151 struct extent_buffer *eb; in find_extent_buffer() local
3153 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
3154 if (!eb) in find_extent_buffer()
3157 * Lock our eb's refs_lock to avoid races with free_extent_buffer(). in find_extent_buffer()
3158 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and in find_extent_buffer()
3160 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
3164 * could race and increment the eb's reference count, clear its stale in find_extent_buffer()
3169 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
3170 spin_lock(&eb->refs_lock); in find_extent_buffer()
3171 spin_unlock(&eb->refs_lock); in find_extent_buffer()
3173 mark_extent_buffer_accessed(eb); in find_extent_buffer()
3174 return eb; in find_extent_buffer()
3181 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
3184 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3185 if (eb) in alloc_test_extent_buffer()
3186 return eb; in alloc_test_extent_buffer()
3187 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3188 if (!eb) in alloc_test_extent_buffer()
3190 eb->fs_info = fs_info; in alloc_test_extent_buffer()
3194 NULL, eb, GFP_NOFS); in alloc_test_extent_buffer()
3198 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
3208 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
3212 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
3214 return eb; in alloc_test_extent_buffer()
3241 * We could have already allocated an eb for this folio and attached one in grab_extent_buffer()
3242 * so lets see if we can get a ref on the existing eb, and if we can we in grab_extent_buffer()
3256 * Validate alignment constraints of eb at logical address @start.
3289 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3293 * than @eb.
3296 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, in attach_eb_folio_to_filemap() argument
3301 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_eb_folio_to_filemap()
3303 const pgoff_t index = eb->start >> PAGE_SHIFT; in attach_eb_folio_to_filemap()
3310 ASSERT(eb->folios[i]); in attach_eb_folio_to_filemap()
3314 ret = filemap_add_folio(mapping, eb->folios[i], index + i, in attach_eb_folio_to_filemap()
3327 if (folio_size(existing_folio) != eb->folio_size) { in attach_eb_folio_to_filemap()
3337 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
3338 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
3352 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
3353 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
3355 eb->folio_size = folio_size(eb->folios[i]); in attach_eb_folio_to_filemap()
3356 eb->folio_shift = folio_shift(eb->folios[i]); in attach_eb_folio_to_filemap()
3358 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); in attach_eb_folio_to_filemap()
3361 * To inform we have an extra eb under allocation, so that in attach_eb_folio_to_filemap()
3363 * eb hasn't been inserted into the xarray yet. in attach_eb_folio_to_filemap()
3365 * The ref will be decreased when the eb releases the page, in in attach_eb_folio_to_filemap()
3369 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]); in attach_eb_folio_to_filemap()
3378 struct extent_buffer *eb; in alloc_extent_buffer() local
3400 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
3401 if (eb) in alloc_extent_buffer()
3402 return eb; in alloc_extent_buffer()
3404 eb = __alloc_extent_buffer(fs_info, start); in alloc_extent_buffer()
3405 if (!eb) in alloc_extent_buffer()
3415 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
3434 ret = alloc_eb_folio_array(eb, true); in alloc_extent_buffer()
3441 for (int i = 0; i < num_extent_folios(eb); i++) { in alloc_extent_buffer()
3444 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb); in alloc_extent_buffer()
3452 * folios mismatch between the new eb and filemap. in alloc_extent_buffer()
3456 * - the new eb is using higher order folio in alloc_extent_buffer()
3459 * This can happen at the previous eb allocation, and we don't in alloc_extent_buffer()
3462 * - the existing eb has already been freed in alloc_extent_buffer()
3470 DEBUG_WARN("folio order mismatch between new eb and filemap"); in alloc_extent_buffer()
3476 * Only after attach_eb_folio_to_filemap(), eb->folios[] is in alloc_extent_buffer()
3480 folio = eb->folios[i]; in alloc_extent_buffer()
3481 WARN_ON(btrfs_meta_folio_test_dirty(folio, eb)); in alloc_extent_buffer()
3484 * Check if the current page is physically contiguous with previous eb in alloc_extent_buffer()
3489 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0)) in alloc_extent_buffer()
3492 if (!btrfs_meta_folio_test_uptodate(folio, eb)) in alloc_extent_buffer()
3503 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3506 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start); in alloc_extent_buffer()
3510 start >> fs_info->nodesize_bits, NULL, eb, in alloc_extent_buffer()
3528 check_buffer_tree_ref(eb); in alloc_extent_buffer()
3535 for (int i = 0; i < num_extent_folios(eb); i++) { in alloc_extent_buffer()
3536 folio_unlock(eb->folios[i]); in alloc_extent_buffer()
3542 folio_put(eb->folios[i]); in alloc_extent_buffer()
3544 return eb; in alloc_extent_buffer()
3547 WARN_ON(!refcount_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3552 * then attaching our eb to that folio. If we fail to insert our folio in alloc_extent_buffer()
3553 * we'll lookup the folio for that index, and grab that EB. We do not in alloc_extent_buffer()
3554 * want that to grab this eb, as we're getting ready to free it. So we in alloc_extent_buffer()
3559 for (int i = 0; i < num_extent_pages(eb); i++) { in alloc_extent_buffer()
3560 struct folio *folio = eb->folios[i]; in alloc_extent_buffer()
3564 detach_extent_buffer_folio(eb, folio); in alloc_extent_buffer()
3571 eb->folios[i] = NULL; in alloc_extent_buffer()
3573 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
3582 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
3585 kmem_cache_free(extent_buffer_cache, eb); in btrfs_release_extent_buffer_rcu()
3588 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
3589 __releases(&eb->refs_lock) in release_extent_buffer()
3591 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3593 if (refcount_dec_and_test(&eb->refs)) { in release_extent_buffer()
3594 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3596 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3603 * this eb is actually in the tree or not, we could be cleaning in release_extent_buffer()
3604 * up an eb that we allocated but never inserted into the tree. in release_extent_buffer()
3613 eb->start >> fs_info->nodesize_bits, eb, NULL, in release_extent_buffer()
3616 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
3618 btrfs_release_extent_buffer_folios(eb); in release_extent_buffer()
3620 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3621 kmem_cache_free(extent_buffer_cache, eb); in release_extent_buffer()
3625 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3628 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3633 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
3636 if (!eb) in free_extent_buffer()
3639 refs = refcount_read(&eb->refs); in free_extent_buffer()
3641 if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) { in free_extent_buffer()
3648 /* Optimization to avoid locking eb->refs_lock. */ in free_extent_buffer()
3649 if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1)) in free_extent_buffer()
3653 spin_lock(&eb->refs_lock); in free_extent_buffer()
3654 if (refcount_read(&eb->refs) == 2 && in free_extent_buffer()
3655 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3656 !extent_buffer_under_io(eb) && in free_extent_buffer()
3657 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3658 refcount_dec(&eb->refs); in free_extent_buffer()
3664 release_extent_buffer(eb); in free_extent_buffer()
3667 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
3669 if (!eb) in free_extent_buffer_stale()
3672 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3673 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3675 if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3676 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3677 refcount_dec(&eb->refs); in free_extent_buffer_stale()
3678 release_extent_buffer(eb); in free_extent_buffer_stale()
3693 struct extent_buffer *eb) in btrfs_clear_buffer_dirty() argument
3695 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3697 btrfs_assert_tree_write_locked(eb); in btrfs_clear_buffer_dirty()
3699 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3711 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in btrfs_clear_buffer_dirty()
3712 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags); in btrfs_clear_buffer_dirty()
3716 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3719 buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY); in btrfs_clear_buffer_dirty()
3720 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3723 for (int i = 0; i < num_extent_folios(eb); i++) { in btrfs_clear_buffer_dirty()
3724 struct folio *folio = eb->folios[i]; in btrfs_clear_buffer_dirty()
3730 last = btrfs_meta_folio_clear_and_test_dirty(folio, eb); in btrfs_clear_buffer_dirty()
3735 WARN_ON(refcount_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3738 void set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
3742 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
3744 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3746 WARN_ON(refcount_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3747 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3748 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)); in set_extent_buffer_dirty()
3751 bool subpage = btrfs_meta_is_subpage(eb->fs_info); in set_extent_buffer_dirty()
3765 folio_lock(eb->folios[0]); in set_extent_buffer_dirty()
3766 for (int i = 0; i < num_extent_folios(eb); i++) in set_extent_buffer_dirty()
3767 btrfs_meta_folio_set_dirty(eb->folios[i], eb); in set_extent_buffer_dirty()
3768 buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY); in set_extent_buffer_dirty()
3770 folio_unlock(eb->folios[0]); in set_extent_buffer_dirty()
3771 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
3772 eb->len, in set_extent_buffer_dirty()
3773 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
3776 for (int i = 0; i < num_extent_folios(eb); i++) in set_extent_buffer_dirty()
3777 ASSERT(folio_test_dirty(eb->folios[i])); in set_extent_buffer_dirty()
3781 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
3784 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
3785 for (int i = 0; i < num_extent_folios(eb); i++) { in clear_extent_buffer_uptodate()
3786 struct folio *folio = eb->folios[i]; in clear_extent_buffer_uptodate()
3791 btrfs_meta_folio_clear_uptodate(folio, eb); in clear_extent_buffer_uptodate()
3795 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
3798 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
3799 for (int i = 0; i < num_extent_folios(eb); i++) in set_extent_buffer_uptodate()
3800 btrfs_meta_folio_set_uptodate(eb->folios[i], eb); in set_extent_buffer_uptodate()
3803 static void clear_extent_buffer_reading(struct extent_buffer *eb) in clear_extent_buffer_reading() argument
3805 clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags); in clear_extent_buffer_reading()
3810 struct extent_buffer *eb = bbio->private; in end_bbio_meta_read() local
3818 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)); in end_bbio_meta_read()
3820 eb->read_mirror = bbio->mirror_num; in end_bbio_meta_read()
3823 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in end_bbio_meta_read()
3827 set_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3829 clear_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3831 clear_extent_buffer_reading(eb); in end_bbio_meta_read()
3832 free_extent_buffer(eb); in end_bbio_meta_read()
3837 int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, in read_extent_buffer_pages_nowait() argument
3840 struct btrfs_fs_info *fs_info = eb->fs_info; in read_extent_buffer_pages_nowait()
3843 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages_nowait()
3851 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages_nowait()
3855 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages_nowait()
3861 * started and finished reading the same eb. In this case, UPTODATE in read_extent_buffer_pages_nowait()
3864 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { in read_extent_buffer_pages_nowait()
3865 clear_extent_buffer_reading(eb); in read_extent_buffer_pages_nowait()
3869 eb->read_mirror = 0; in read_extent_buffer_pages_nowait()
3870 check_buffer_tree_ref(eb); in read_extent_buffer_pages_nowait()
3871 refcount_inc(&eb->refs); in read_extent_buffer_pages_nowait()
3875 eb->start, end_bbio_meta_read, eb); in read_extent_buffer_pages_nowait()
3876 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages_nowait()
3878 for (int i = 0; i < num_extent_folios(eb); i++) { in read_extent_buffer_pages_nowait()
3879 struct folio *folio = eb->folios[i]; in read_extent_buffer_pages_nowait()
3880 u64 range_start = max_t(u64, eb->start, folio_pos(folio)); in read_extent_buffer_pages_nowait()
3882 eb->start + eb->len) - range_start; in read_extent_buffer_pages_nowait()
3891 int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, in read_extent_buffer_pages() argument
3896 ret = read_extent_buffer_pages_nowait(eb, mirror_num, check); in read_extent_buffer_pages()
3900 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
3901 if (unlikely(!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) in read_extent_buffer_pages()
3906 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
3909 btrfs_warn(eb->fs_info, in report_eb_range()
3910 "access to eb bytenr %llu len %u out of range start %lu len %lu", in report_eb_range()
3911 eb->start, eb->len, start, len); in report_eb_range()
3919 * the eb.
3920 * NOTE: @start and @len are offset inside the eb, not logical address.
3924 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
3929 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
3930 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
3931 return report_eb_range(eb, start, len); in check_eb_range()
3936 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
3939 const int unit_size = eb->folio_size; in read_extent_buffer()
3943 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer()
3945 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
3954 if (eb->addr) { in read_extent_buffer()
3955 memcpy(dstv, eb->addr + start, len); in read_extent_buffer()
3959 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer()
3965 kaddr = folio_address(eb->folios[i]); in read_extent_buffer()
3975 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
3979 const int unit_size = eb->folio_size; in read_extent_buffer_to_user_nofault()
3983 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer_to_user_nofault()
3986 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
3987 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
3989 if (eb->addr) { in read_extent_buffer_to_user_nofault()
3990 if (copy_to_user_nofault(dstv, eb->addr + start, len)) in read_extent_buffer_to_user_nofault()
3995 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer_to_user_nofault()
4001 kaddr = folio_address(eb->folios[i]); in read_extent_buffer_to_user_nofault()
4016 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
4019 const int unit_size = eb->folio_size; in memcmp_extent_buffer()
4024 unsigned long i = get_eb_folio_index(eb, start); in memcmp_extent_buffer()
4027 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
4030 if (eb->addr) in memcmp_extent_buffer()
4031 return memcmp(ptrv, eb->addr + start, len); in memcmp_extent_buffer()
4033 offset = get_eb_offset_in_folio(eb, start); in memcmp_extent_buffer()
4037 kaddr = folio_address(eb->folios[i]); in memcmp_extent_buffer()
4054 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4056 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i) in assert_eb_folio_uptodate() argument
4058 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_folio_uptodate()
4059 struct folio *folio = eb->folios[i]; in assert_eb_folio_uptodate()
4071 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_folio_uptodate()
4075 folio = eb->folios[0]; in assert_eb_folio_uptodate()
4078 eb->start, eb->len))) in assert_eb_folio_uptodate()
4079 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len); in assert_eb_folio_uptodate()
4085 static void __write_extent_buffer(const struct extent_buffer *eb, in __write_extent_buffer() argument
4089 const int unit_size = eb->folio_size; in __write_extent_buffer()
4094 unsigned long i = get_eb_folio_index(eb, start); in __write_extent_buffer()
4096 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
4098 if (check_eb_range(eb, start, len)) in __write_extent_buffer()
4101 if (eb->addr) { in __write_extent_buffer()
4103 memmove(eb->addr + start, srcv, len); in __write_extent_buffer()
4105 memcpy(eb->addr + start, srcv, len); in __write_extent_buffer()
4109 offset = get_eb_offset_in_folio(eb, start); in __write_extent_buffer()
4113 assert_eb_folio_uptodate(eb, i); in __write_extent_buffer()
4116 kaddr = folio_address(eb->folios[i]); in __write_extent_buffer()
4129 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
4132 return __write_extent_buffer(eb, srcv, start, len, false); in write_extent_buffer()
4135 static void memset_extent_buffer(const struct extent_buffer *eb, int c, in memset_extent_buffer() argument
4138 const int unit_size = eb->folio_size; in memset_extent_buffer()
4141 if (eb->addr) { in memset_extent_buffer()
4142 memset(eb->addr + start, c, len); in memset_extent_buffer()
4147 unsigned long index = get_eb_folio_index(eb, cur); in memset_extent_buffer()
4148 unsigned int offset = get_eb_offset_in_folio(eb, cur); in memset_extent_buffer()
4151 assert_eb_folio_uptodate(eb, index); in memset_extent_buffer()
4152 memset(folio_address(eb->folios[index]) + offset, c, cur_len); in memset_extent_buffer()
4158 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
4161 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
4163 return memset_extent_buffer(eb, 0, start, len); in memzero_extent_buffer()
4224 * @eb: the extent buffer
4234 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
4247 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset; in eb_bitmap_offset()
4249 *folio_index = offset >> eb->folio_shift; in eb_bitmap_offset()
4250 *folio_offset = offset_in_eb_folio(eb, offset); in eb_bitmap_offset()
4256 * @eb: the extent buffer
4260 bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
4267 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
4268 assert_eb_folio_uptodate(eb, i); in extent_buffer_test_bit()
4269 kaddr = folio_address(eb->folios[i]); in extent_buffer_test_bit()
4273 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) in extent_buffer_get_byte() argument
4275 unsigned long index = get_eb_folio_index(eb, bytenr); in extent_buffer_get_byte()
4277 if (check_eb_range(eb, bytenr, 1)) in extent_buffer_get_byte()
4279 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr); in extent_buffer_get_byte()
4285 * @eb: the extent buffer
4290 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
4303 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_set()
4310 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
4313 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_set()
4321 * @eb: the extent buffer
4326 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
4340 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_clear()
4347 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
4350 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_clear()
4451 struct extent_buffer *eb; in try_release_subpage_extent_buffer() local
4458 xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) { in try_release_subpage_extent_buffer()
4460 * The same as try_release_extent_buffer(), to ensure the eb in try_release_subpage_extent_buffer()
4463 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4466 if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4467 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4473 * If tree ref isn't set then we know the ref on this eb is a in try_release_subpage_extent_buffer()
4474 * real ref, so just return, this eb will likely be freed soon in try_release_subpage_extent_buffer()
4477 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4478 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4487 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
4507 struct extent_buffer *eb; in try_release_extent_buffer() local
4522 eb = folio_get_private(folio); in try_release_extent_buffer()
4523 BUG_ON(!eb); in try_release_extent_buffer()
4527 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
4530 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4531 if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4532 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4539 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
4542 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4543 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4547 return release_extent_buffer(eb); in try_release_extent_buffer()
4555 * @owner_root: objectid of the root that owns this eb
4557 * @level: level for the eb
4560 * normal uptodate check of the eb, without checking the generation. If we have
4570 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
4573 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
4574 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
4577 if (btrfs_buffer_uptodate(eb, gen, true)) { in btrfs_readahead_tree_block()
4578 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4582 ret = read_extent_buffer_pages_nowait(eb, 0, &check); in btrfs_readahead_tree_block()
4584 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
4586 free_extent_buffer(eb); in btrfs_readahead_tree_block()