1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 49b569ea0SJosef Bacik #include "messages.h" 5cac06d84SQu Wenruo #include "ctree.h" 6cac06d84SQu Wenruo #include "subpage.h" 73d078efaSQu Wenruo #include "btrfs_inode.h" 8cac06d84SQu Wenruo 9894d1378SQu Wenruo /* 10894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview: 11894d1378SQu Wenruo * 12894d1378SQu Wenruo * Limitations: 13894d1378SQu Wenruo * 14894d1378SQu Wenruo * - Only support 64K page size for now 15894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure 16894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle 17894d1378SQu Wenruo * cases where a tree block crosses several pages. 18894d1378SQu Wenruo * 19894d1378SQu Wenruo * - Only metadata read-write for now 20894d1378SQu Wenruo * The data read-write part is in development. 21894d1378SQu Wenruo * 22894d1378SQu Wenruo * - Metadata can't cross 64K page boundary 23894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient 24894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful 25894d1378SQu Wenruo * rejection. 26894d1378SQu Wenruo * 27894d1378SQu Wenruo * Special behavior: 28894d1378SQu Wenruo * 29894d1378SQu Wenruo * - Metadata 30894d1378SQu Wenruo * Metadata read is fully supported. 31894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the 32894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched. 33894d1378SQu Wenruo * 34894d1378SQu Wenruo * Metadata write support is partial. 35894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit 36894d1378SQu Wenruo * the dirty extent buffers in the page. 37894d1378SQu Wenruo * 38894d1378SQu Wenruo * This means, if we have a metadata page like this: 39894d1378SQu Wenruo * 40894d1378SQu Wenruo * Page offset 41894d1378SQu Wenruo * 0 16K 32K 48K 64K 42894d1378SQu Wenruo * |/////////| |///////////| 43894d1378SQu Wenruo * \- Tree block A \- Tree block B 44894d1378SQu Wenruo * 45894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback 46894d1378SQu Wenruo * tree block B if it's also dirty. 47894d1378SQu Wenruo * 48894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW. 49894d1378SQu Wenruo * 50894d1378SQu Wenruo * Implementation: 51894d1378SQu Wenruo * 52894d1378SQu Wenruo * - Common 53894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to 54894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra 55894d1378SQu Wenruo * granularity needed. 56894d1378SQu Wenruo * 57894d1378SQu Wenruo * - Metadata 58894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page 59894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even 60894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in 61894d1378SQu Wenruo * the same page). 62894d1378SQu Wenruo * 63894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only. 64894d1378SQu Wenruo * This means a slightly higher tree locking latency. 65894d1378SQu Wenruo */ 66894d1378SQu Wenruo 6713df3775SQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) 68fbca46ebSQu Wenruo { 69fbca46ebSQu Wenruo if (fs_info->sectorsize >= PAGE_SIZE) 70fbca46ebSQu Wenruo return false; 71fbca46ebSQu Wenruo 72fbca46ebSQu Wenruo /* 73fbca46ebSQu Wenruo * Only data pages (either through DIO or compression) can have no 74fbca46ebSQu Wenruo * mapping. And if page->mapping->host is data inode, it's subpage. 75fbca46ebSQu Wenruo * As we have ruled our sectorsize >= PAGE_SIZE case already. 76fbca46ebSQu Wenruo */ 7713df3775SQu Wenruo if (!mapping || !mapping->host || is_data_inode(mapping->host)) 78fbca46ebSQu Wenruo return true; 79fbca46ebSQu Wenruo 80fbca46ebSQu Wenruo /* 81fbca46ebSQu Wenruo * Now the only remaining case is metadata, which we only go subpage 82fbca46ebSQu Wenruo * routine if nodesize < PAGE_SIZE. 83fbca46ebSQu Wenruo */ 84fbca46ebSQu Wenruo if (fs_info->nodesize < PAGE_SIZE) 85fbca46ebSQu Wenruo return true; 86fbca46ebSQu Wenruo return false; 87fbca46ebSQu Wenruo } 88fbca46ebSQu Wenruo 898481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) 908481dd80SQu Wenruo { 918481dd80SQu Wenruo unsigned int cur = 0; 928481dd80SQu Wenruo unsigned int nr_bits; 938481dd80SQu Wenruo 948481dd80SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); 958481dd80SQu Wenruo 968481dd80SQu Wenruo nr_bits = PAGE_SIZE / sectorsize; 978481dd80SQu Wenruo subpage_info->bitmap_nr_bits = nr_bits; 988481dd80SQu Wenruo 998481dd80SQu Wenruo subpage_info->uptodate_offset = cur; 1008481dd80SQu Wenruo cur += nr_bits; 1018481dd80SQu Wenruo 1028481dd80SQu Wenruo subpage_info->dirty_offset = cur; 1038481dd80SQu Wenruo cur += nr_bits; 1048481dd80SQu Wenruo 1058481dd80SQu Wenruo subpage_info->writeback_offset = cur; 1068481dd80SQu Wenruo cur += nr_bits; 1078481dd80SQu Wenruo 1088481dd80SQu Wenruo subpage_info->ordered_offset = cur; 1098481dd80SQu Wenruo cur += nr_bits; 1108481dd80SQu Wenruo 111e4f94347SQu Wenruo subpage_info->checked_offset = cur; 112e4f94347SQu Wenruo cur += nr_bits; 113e4f94347SQu Wenruo 1148e7e9c67SQu Wenruo subpage_info->locked_offset = cur; 1158e7e9c67SQu Wenruo cur += nr_bits; 1168e7e9c67SQu Wenruo 1178481dd80SQu Wenruo subpage_info->total_nr_bits = cur; 1188481dd80SQu Wenruo } 1198481dd80SQu Wenruo 120cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 12155151ea9SQu Wenruo struct folio *folio, enum btrfs_subpage_type type) 122cac06d84SQu Wenruo { 123651fb419SQu Wenruo struct btrfs_subpage *subpage; 124cac06d84SQu Wenruo 125cac06d84SQu Wenruo /* 126143823cfSDavid Sterba * We have cases like a dummy extent buffer page, which is not mapped 127cac06d84SQu Wenruo * and doesn't need to be locked. 128cac06d84SQu Wenruo */ 12955151ea9SQu Wenruo if (folio->mapping) 13055151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 131651fb419SQu Wenruo 132cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 13355151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) 134cac06d84SQu Wenruo return 0; 135cac06d84SQu Wenruo 136651fb419SQu Wenruo subpage = btrfs_alloc_subpage(fs_info, type); 137651fb419SQu Wenruo if (IS_ERR(subpage)) 138651fb419SQu Wenruo return PTR_ERR(subpage); 139651fb419SQu Wenruo 140cfbf07e2SQu Wenruo folio_attach_private(folio, subpage); 141cac06d84SQu Wenruo return 0; 142cac06d84SQu Wenruo } 143cac06d84SQu Wenruo 14455151ea9SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) 145cac06d84SQu Wenruo { 146cac06d84SQu Wenruo struct btrfs_subpage *subpage; 147cac06d84SQu Wenruo 148cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 14955151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) 150cac06d84SQu Wenruo return; 151cac06d84SQu Wenruo 152cfbf07e2SQu Wenruo subpage = folio_detach_private(folio); 153cac06d84SQu Wenruo ASSERT(subpage); 154760f991fSQu Wenruo btrfs_free_subpage(subpage); 155760f991fSQu Wenruo } 156760f991fSQu Wenruo 157651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 158760f991fSQu Wenruo enum btrfs_subpage_type type) 159760f991fSQu Wenruo { 160651fb419SQu Wenruo struct btrfs_subpage *ret; 16172a69cd0SQu Wenruo unsigned int real_size; 162651fb419SQu Wenruo 163fdf250dbSQu Wenruo ASSERT(fs_info->sectorsize < PAGE_SIZE); 164760f991fSQu Wenruo 16572a69cd0SQu Wenruo real_size = struct_size(ret, bitmaps, 16672a69cd0SQu Wenruo BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); 16772a69cd0SQu Wenruo ret = kzalloc(real_size, GFP_NOFS); 168651fb419SQu Wenruo if (!ret) 169651fb419SQu Wenruo return ERR_PTR(-ENOMEM); 170651fb419SQu Wenruo 171651fb419SQu Wenruo spin_lock_init(&ret->lock); 1721e1de387SQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) { 173651fb419SQu Wenruo atomic_set(&ret->eb_refs, 0); 1741e1de387SQu Wenruo } else { 175651fb419SQu Wenruo atomic_set(&ret->readers, 0); 176651fb419SQu Wenruo atomic_set(&ret->writers, 0); 1771e1de387SQu Wenruo } 178651fb419SQu Wenruo return ret; 179760f991fSQu Wenruo } 180760f991fSQu Wenruo 181760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 182760f991fSQu Wenruo { 183cac06d84SQu Wenruo kfree(subpage); 184cac06d84SQu Wenruo } 1858ff8466dSQu Wenruo 1868ff8466dSQu Wenruo /* 1878ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 1888ff8466dSQu Wenruo * 1898ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 1908ff8466dSQu Wenruo * of the same page. 1918ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 192cfbf07e2SQu Wenruo * detach_extent_buffer_page() won't detach the folio private while we're still 1938ff8466dSQu Wenruo * allocating the extent buffer. 1948ff8466dSQu Wenruo */ 19513df3775SQu Wenruo void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 1968ff8466dSQu Wenruo { 1978ff8466dSQu Wenruo struct btrfs_subpage *subpage; 1988ff8466dSQu Wenruo 19913df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 2008ff8466dSQu Wenruo return; 2018ff8466dSQu Wenruo 20213df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 203affc5af3SLinus Torvalds lockdep_assert_held(&folio->mapping->i_private_lock); 2048ff8466dSQu Wenruo 205cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2068ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 2078ff8466dSQu Wenruo } 2088ff8466dSQu Wenruo 20913df3775SQu Wenruo void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 2108ff8466dSQu Wenruo { 2118ff8466dSQu Wenruo struct btrfs_subpage *subpage; 2128ff8466dSQu Wenruo 21313df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 2148ff8466dSQu Wenruo return; 2158ff8466dSQu Wenruo 21613df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 217affc5af3SLinus Torvalds lockdep_assert_held(&folio->mapping->i_private_lock); 2188ff8466dSQu Wenruo 219cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2208ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 2218ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 2228ff8466dSQu Wenruo } 223a1d767c1SQu Wenruo 22492082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 22555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 22692082d40SQu Wenruo { 22755151ea9SQu Wenruo /* For subpage support, the folio must be single page. */ 22855151ea9SQu Wenruo ASSERT(folio_order(folio) == 0); 229cfbf07e2SQu Wenruo 23092082d40SQu Wenruo /* Basic checks */ 231cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 23292082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 23392082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 23492082d40SQu Wenruo /* 23592082d40SQu Wenruo * The range check only works for mapped page, we can still have 23692082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 23792082d40SQu Wenruo */ 23855151ea9SQu Wenruo if (folio->mapping) 23955151ea9SQu Wenruo ASSERT(folio_pos(folio) <= start && 24055151ea9SQu Wenruo start + len <= folio_pos(folio) + PAGE_SIZE); 24192082d40SQu Wenruo } 24292082d40SQu Wenruo 2438e7e9c67SQu Wenruo #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ 2448e7e9c67SQu Wenruo ({ \ 2458e7e9c67SQu Wenruo unsigned int start_bit; \ 2468e7e9c67SQu Wenruo \ 2478e7e9c67SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); \ 2488e7e9c67SQu Wenruo start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ 2498e7e9c67SQu Wenruo start_bit += fs_info->subpage_info->name##_offset; \ 2508e7e9c67SQu Wenruo start_bit; \ 2518e7e9c67SQu Wenruo }) 2528e7e9c67SQu Wenruo 25392082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 25455151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 25592082d40SQu Wenruo { 256cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 2578e7e9c67SQu Wenruo const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); 25892082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 2598e7e9c67SQu Wenruo unsigned long flags; 2608e7e9c67SQu Wenruo 26192082d40SQu Wenruo 26255151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 26392082d40SQu Wenruo 2648e7e9c67SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 2658e7e9c67SQu Wenruo /* 2668e7e9c67SQu Wenruo * Even though it's just for reading the page, no one should have 2678e7e9c67SQu Wenruo * locked the subpage range. 2688e7e9c67SQu Wenruo */ 2698e7e9c67SQu Wenruo ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); 2708e7e9c67SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, nbits); 2713d078efaSQu Wenruo atomic_add(nbits, &subpage->readers); 2728e7e9c67SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 27392082d40SQu Wenruo } 27492082d40SQu Wenruo 27592082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 27655151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 27792082d40SQu Wenruo { 278cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 2798e7e9c67SQu Wenruo const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); 28092082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 2818e7e9c67SQu Wenruo unsigned long flags; 2823d078efaSQu Wenruo bool is_data; 2833d078efaSQu Wenruo bool last; 28492082d40SQu Wenruo 28555151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 28655151ea9SQu Wenruo is_data = is_data_inode(folio->mapping->host); 2878e7e9c67SQu Wenruo 2888e7e9c67SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 2898e7e9c67SQu Wenruo 2908e7e9c67SQu Wenruo /* The range should have already been locked. */ 2918e7e9c67SQu Wenruo ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits)); 29292082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 2938e7e9c67SQu Wenruo 2948e7e9c67SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, nbits); 2953d078efaSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->readers); 2963d078efaSQu Wenruo 2973d078efaSQu Wenruo /* 2983d078efaSQu Wenruo * For data we need to unlock the page if the last read has finished. 2993d078efaSQu Wenruo * 3003d078efaSQu Wenruo * And please don't replace @last with atomic_sub_and_test() call 3013d078efaSQu Wenruo * inside if () condition. 3023d078efaSQu Wenruo * As we want the atomic_sub_and_test() to be always executed. 3033d078efaSQu Wenruo */ 3043d078efaSQu Wenruo if (is_data && last) 30555151ea9SQu Wenruo folio_unlock(folio); 3068e7e9c67SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 30792082d40SQu Wenruo } 30892082d40SQu Wenruo 30955151ea9SQu Wenruo static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) 3101e1de387SQu Wenruo { 3111e1de387SQu Wenruo u64 orig_start = *start; 3121e1de387SQu Wenruo u32 orig_len = *len; 3131e1de387SQu Wenruo 31455151ea9SQu Wenruo *start = max_t(u64, folio_pos(folio), orig_start); 315e4f94347SQu Wenruo /* 316e4f94347SQu Wenruo * For certain call sites like btrfs_drop_pages(), we may have pages 317e4f94347SQu Wenruo * beyond the target range. In that case, just set @len to 0, subpage 318e4f94347SQu Wenruo * helpers can handle @len == 0 without any problem. 319e4f94347SQu Wenruo */ 32055151ea9SQu Wenruo if (folio_pos(folio) >= orig_start + orig_len) 321e4f94347SQu Wenruo *len = 0; 322e4f94347SQu Wenruo else 32355151ea9SQu Wenruo *len = min_t(u64, folio_pos(folio) + PAGE_SIZE, 3241e1de387SQu Wenruo orig_start + orig_len) - *start; 3251e1de387SQu Wenruo } 3261e1de387SQu Wenruo 327621b9ff1SQu Wenruo static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, 32855151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3291e1de387SQu Wenruo { 330cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 331*b086c5bdSQu Wenruo const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); 3321e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 333*b086c5bdSQu Wenruo unsigned long flags; 3341e1de387SQu Wenruo int ret; 3351e1de387SQu Wenruo 33655151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3371e1de387SQu Wenruo 338*b086c5bdSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 3391e1de387SQu Wenruo ASSERT(atomic_read(&subpage->readers) == 0); 340*b086c5bdSQu Wenruo ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); 341*b086c5bdSQu Wenruo bitmap_set(subpage->bitmaps, start_bit, nbits); 3421e1de387SQu Wenruo ret = atomic_add_return(nbits, &subpage->writers); 3431e1de387SQu Wenruo ASSERT(ret == nbits); 344*b086c5bdSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 3451e1de387SQu Wenruo } 3461e1de387SQu Wenruo 347621b9ff1SQu Wenruo static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, 34855151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3491e1de387SQu Wenruo { 350cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 351*b086c5bdSQu Wenruo const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); 3521e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 353*b086c5bdSQu Wenruo unsigned long flags; 354*b086c5bdSQu Wenruo bool last; 3551e1de387SQu Wenruo 35655151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3571e1de387SQu Wenruo 358*b086c5bdSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 359164674a7SQu Wenruo /* 360164674a7SQu Wenruo * We have call sites passing @lock_page into 361164674a7SQu Wenruo * extent_clear_unlock_delalloc() for compression path. 362164674a7SQu Wenruo * 363164674a7SQu Wenruo * This @locked_page is locked by plain lock_page(), thus its 364164674a7SQu Wenruo * subpage::writers is 0. Handle them in a special way. 365164674a7SQu Wenruo */ 366*b086c5bdSQu Wenruo if (atomic_read(&subpage->writers) == 0) { 367*b086c5bdSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 368164674a7SQu Wenruo return true; 369*b086c5bdSQu Wenruo } 370164674a7SQu Wenruo 3711e1de387SQu Wenruo ASSERT(atomic_read(&subpage->writers) >= nbits); 372*b086c5bdSQu Wenruo /* The target range should have been locked. */ 373*b086c5bdSQu Wenruo ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits)); 374*b086c5bdSQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, nbits); 375*b086c5bdSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->writers); 376*b086c5bdSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 377*b086c5bdSQu Wenruo return last; 3781e1de387SQu Wenruo } 3791e1de387SQu Wenruo 3801e1de387SQu Wenruo /* 38155151ea9SQu Wenruo * Lock a folio for delalloc page writeback. 3821e1de387SQu Wenruo * 3831e1de387SQu Wenruo * Return -EAGAIN if the page is not properly initialized. 3841e1de387SQu Wenruo * Return 0 with the page locked, and writer counter updated. 3851e1de387SQu Wenruo * 3861e1de387SQu Wenruo * Even with 0 returned, the page still need extra check to make sure 3871e1de387SQu Wenruo * it's really the correct page, as the caller is using 38847d55419SVishal Moola (Oracle) * filemap_get_folios_contig(), which can race with page invalidating. 3891e1de387SQu Wenruo */ 39055151ea9SQu Wenruo int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, 39155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3921e1de387SQu Wenruo { 39355151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 39455151ea9SQu Wenruo folio_lock(folio); 3951e1de387SQu Wenruo return 0; 3961e1de387SQu Wenruo } 39755151ea9SQu Wenruo folio_lock(folio); 398cfbf07e2SQu Wenruo if (!folio_test_private(folio) || !folio_get_private(folio)) { 39955151ea9SQu Wenruo folio_unlock(folio); 4001e1de387SQu Wenruo return -EAGAIN; 4011e1de387SQu Wenruo } 40255151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 40355151ea9SQu Wenruo btrfs_subpage_start_writer(fs_info, folio, start, len); 4041e1de387SQu Wenruo return 0; 4051e1de387SQu Wenruo } 4061e1de387SQu Wenruo 40755151ea9SQu Wenruo void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, 40855151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 4091e1de387SQu Wenruo { 41055151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 41155151ea9SQu Wenruo folio_unlock(folio); 41255151ea9SQu Wenruo return; 41355151ea9SQu Wenruo } 41455151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 41555151ea9SQu Wenruo if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) 41655151ea9SQu Wenruo folio_unlock(folio); 4171e1de387SQu Wenruo } 4181e1de387SQu Wenruo 41972a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ 42072a69cd0SQu Wenruo bitmap_test_range_all_set(subpage->bitmaps, \ 42172a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 42272a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 42372a69cd0SQu Wenruo 42472a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ 42572a69cd0SQu Wenruo bitmap_test_range_all_zero(subpage->bitmaps, \ 42672a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 42772a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 42872a69cd0SQu Wenruo 429a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 43055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 431a1d767c1SQu Wenruo { 432cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 43355151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 43472a69cd0SQu Wenruo uptodate, start, len); 435a1d767c1SQu Wenruo unsigned long flags; 436a1d767c1SQu Wenruo 437a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 43872a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 43972a69cd0SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) 44055151ea9SQu Wenruo folio_mark_uptodate(folio); 441a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 442a1d767c1SQu Wenruo } 443a1d767c1SQu Wenruo 444a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 44555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 446a1d767c1SQu Wenruo { 447cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 44855151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 44972a69cd0SQu Wenruo uptodate, start, len); 450a1d767c1SQu Wenruo unsigned long flags; 451a1d767c1SQu Wenruo 452a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 45372a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 45455151ea9SQu Wenruo folio_clear_uptodate(folio); 455a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 456a1d767c1SQu Wenruo } 457a1d767c1SQu Wenruo 458d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 45955151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 460d8a5713eSQu Wenruo { 461cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 46255151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 46372a69cd0SQu Wenruo dirty, start, len); 464d8a5713eSQu Wenruo unsigned long flags; 465d8a5713eSQu Wenruo 466d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 46772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 468d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 46955151ea9SQu Wenruo folio_mark_dirty(folio); 470d8a5713eSQu Wenruo } 471d8a5713eSQu Wenruo 472d8a5713eSQu Wenruo /* 473d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 474d8a5713eSQu Wenruo * 475d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 476d8a5713eSQu Wenruo * dirty_bitmap. 477d8a5713eSQu Wenruo * Return false otherwise. 478d8a5713eSQu Wenruo * 479d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 480d8a5713eSQu Wenruo * extra handling for tree blocks. 481d8a5713eSQu Wenruo */ 482d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 48355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 484d8a5713eSQu Wenruo { 485cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 48655151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 48772a69cd0SQu Wenruo dirty, start, len); 488d8a5713eSQu Wenruo unsigned long flags; 489d8a5713eSQu Wenruo bool last = false; 490d8a5713eSQu Wenruo 491d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 49272a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 49372a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) 494d8a5713eSQu Wenruo last = true; 495d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 496d8a5713eSQu Wenruo return last; 497d8a5713eSQu Wenruo } 498d8a5713eSQu Wenruo 499d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 50055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 501d8a5713eSQu Wenruo { 502d8a5713eSQu Wenruo bool last; 503d8a5713eSQu Wenruo 50455151ea9SQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len); 505d8a5713eSQu Wenruo if (last) 50655151ea9SQu Wenruo folio_clear_dirty_for_io(folio); 507d8a5713eSQu Wenruo } 508d8a5713eSQu Wenruo 5093470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 51055151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5113470da3bSQu Wenruo { 512cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 51355151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 51472a69cd0SQu Wenruo writeback, start, len); 5153470da3bSQu Wenruo unsigned long flags; 5163470da3bSQu Wenruo 5173470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 51872a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 5191e61b8c6SJosef Bacik if (!folio_test_writeback(folio)) 52055151ea9SQu Wenruo folio_start_writeback(folio); 5213470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5223470da3bSQu Wenruo } 5233470da3bSQu Wenruo 5243470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 52555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5263470da3bSQu Wenruo { 527cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 52855151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 52972a69cd0SQu Wenruo writeback, start, len); 5303470da3bSQu Wenruo unsigned long flags; 5313470da3bSQu Wenruo 5323470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 53372a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 53472a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { 53555151ea9SQu Wenruo ASSERT(folio_test_writeback(folio)); 53655151ea9SQu Wenruo folio_end_writeback(folio); 5377c11d0aeSQu Wenruo } 5383470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5393470da3bSQu Wenruo } 5403470da3bSQu Wenruo 5416f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, 54255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5436f17400bSQu Wenruo { 544cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 54555151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 54672a69cd0SQu Wenruo ordered, start, len); 5476f17400bSQu Wenruo unsigned long flags; 5486f17400bSQu Wenruo 5496f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 55072a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 55155151ea9SQu Wenruo folio_set_ordered(folio); 5526f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5536f17400bSQu Wenruo } 5546f17400bSQu Wenruo 5556f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, 55655151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5576f17400bSQu Wenruo { 558cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 55955151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 56072a69cd0SQu Wenruo ordered, start, len); 5616f17400bSQu Wenruo unsigned long flags; 5626f17400bSQu Wenruo 5636f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 56472a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 56572a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) 56655151ea9SQu Wenruo folio_clear_ordered(folio); 5676f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5686f17400bSQu Wenruo } 569e4f94347SQu Wenruo 570e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, 57155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 572e4f94347SQu Wenruo { 573cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 57455151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 575e4f94347SQu Wenruo checked, start, len); 576e4f94347SQu Wenruo unsigned long flags; 577e4f94347SQu Wenruo 578e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 579e4f94347SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 580e4f94347SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) 58155151ea9SQu Wenruo folio_set_checked(folio); 582e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 583e4f94347SQu Wenruo } 584e4f94347SQu Wenruo 585e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, 58655151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 587e4f94347SQu Wenruo { 588cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 58955151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 590e4f94347SQu Wenruo checked, start, len); 591e4f94347SQu Wenruo unsigned long flags; 592e4f94347SQu Wenruo 593e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 594e4f94347SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 59555151ea9SQu Wenruo folio_clear_checked(folio); 596e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 597e4f94347SQu Wenruo } 598e4f94347SQu Wenruo 599a1d767c1SQu Wenruo /* 600a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 601a1d767c1SQu Wenruo * are tested in the same way. 602a1d767c1SQu Wenruo */ 603a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 604a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 60555151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 606a1d767c1SQu Wenruo { \ 607cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); \ 60855151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \ 60972a69cd0SQu Wenruo name, start, len); \ 610a1d767c1SQu Wenruo unsigned long flags; \ 611a1d767c1SQu Wenruo bool ret; \ 612a1d767c1SQu Wenruo \ 613a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 61472a69cd0SQu Wenruo ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ 61572a69cd0SQu Wenruo len >> fs_info->sectorsize_bits); \ 616a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 617a1d767c1SQu Wenruo return ret; \ 618a1d767c1SQu Wenruo } 619a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 620d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 6213470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 6226f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); 623e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); 624a1d767c1SQu Wenruo 625a1d767c1SQu Wenruo /* 626a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 627a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 628a1d767c1SQu Wenruo * back to regular sectorsize branch. 629a1d767c1SQu Wenruo */ 63055151ea9SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \ 63155151ea9SQu Wenruo folio_clear_func, folio_test_func) \ 63255151ea9SQu Wenruo void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \ 63355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 634a1d767c1SQu Wenruo { \ 63513df3775SQu Wenruo if (unlikely(!fs_info) || \ 63655151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 63755151ea9SQu Wenruo folio_set_func(folio); \ 638a1d767c1SQu Wenruo return; \ 639a1d767c1SQu Wenruo } \ 64055151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 641a1d767c1SQu Wenruo } \ 64255151ea9SQu Wenruo void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \ 64355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 644a1d767c1SQu Wenruo { \ 64513df3775SQu Wenruo if (unlikely(!fs_info) || \ 64655151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 64755151ea9SQu Wenruo folio_clear_func(folio); \ 648a1d767c1SQu Wenruo return; \ 649a1d767c1SQu Wenruo } \ 65055151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 651a1d767c1SQu Wenruo } \ 65255151ea9SQu Wenruo bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \ 65355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 654a1d767c1SQu Wenruo { \ 65513df3775SQu Wenruo if (unlikely(!fs_info) || \ 65655151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 65755151ea9SQu Wenruo return folio_test_func(folio); \ 65855151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 65960e2d255SQu Wenruo } \ 66055151ea9SQu Wenruo void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ 66155151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 66260e2d255SQu Wenruo { \ 66313df3775SQu Wenruo if (unlikely(!fs_info) || \ 66455151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 66555151ea9SQu Wenruo folio_set_func(folio); \ 66660e2d255SQu Wenruo return; \ 66760e2d255SQu Wenruo } \ 66855151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 66955151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 67060e2d255SQu Wenruo } \ 67155151ea9SQu Wenruo void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ 67255151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 67360e2d255SQu Wenruo { \ 67413df3775SQu Wenruo if (unlikely(!fs_info) || \ 67555151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 67655151ea9SQu Wenruo folio_clear_func(folio); \ 67760e2d255SQu Wenruo return; \ 67860e2d255SQu Wenruo } \ 67955151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 68055151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 68160e2d255SQu Wenruo } \ 68255151ea9SQu Wenruo bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ 68355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 68460e2d255SQu Wenruo { \ 68513df3775SQu Wenruo if (unlikely(!fs_info) || \ 68655151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 68755151ea9SQu Wenruo return folio_test_func(folio); \ 68855151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 68955151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 690a1d767c1SQu Wenruo } 69155151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate, 69255151ea9SQu Wenruo folio_test_uptodate); 69355151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io, 69455151ea9SQu Wenruo folio_test_dirty); 69555151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback, 69655151ea9SQu Wenruo folio_test_writeback); 69755151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered, 69855151ea9SQu Wenruo folio_test_ordered); 69955151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, 70055151ea9SQu Wenruo folio_test_checked); 701cc1d0d93SQu Wenruo 702cc1d0d93SQu Wenruo /* 703cc1d0d93SQu Wenruo * Make sure not only the page dirty bit is cleared, but also subpage dirty bit 704cc1d0d93SQu Wenruo * is cleared. 705cc1d0d93SQu Wenruo */ 70655151ea9SQu Wenruo void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio) 707cc1d0d93SQu Wenruo { 708cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 709cc1d0d93SQu Wenruo 710cc1d0d93SQu Wenruo if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 711cc1d0d93SQu Wenruo return; 712cc1d0d93SQu Wenruo 71355151ea9SQu Wenruo ASSERT(!folio_test_dirty(folio)); 71455151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 715cc1d0d93SQu Wenruo return; 716cc1d0d93SQu Wenruo 717cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 71872a69cd0SQu Wenruo ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty)); 719cc1d0d93SQu Wenruo } 720e55a0de1SQu Wenruo 721e55a0de1SQu Wenruo /* 722e55a0de1SQu Wenruo * Handle different locked pages with different page sizes: 723e55a0de1SQu Wenruo * 724e55a0de1SQu Wenruo * - Page locked by plain lock_page() 725e55a0de1SQu Wenruo * It should not have any subpage::writers count. 726e55a0de1SQu Wenruo * Can be unlocked by unlock_page(). 727e55a0de1SQu Wenruo * This is the most common locked page for __extent_writepage() called 728f3e90c1cSChristoph Hellwig * inside extent_write_cache_pages(). 729e55a0de1SQu Wenruo * Rarer cases include the @locked_page from extent_write_locked_range(). 730e55a0de1SQu Wenruo * 731e55a0de1SQu Wenruo * - Page locked by lock_delalloc_pages() 732e55a0de1SQu Wenruo * There is only one caller, all pages except @locked_page for 733e55a0de1SQu Wenruo * extent_write_locked_range(). 734e55a0de1SQu Wenruo * In this case, we have to call subpage helper to handle the case. 735e55a0de1SQu Wenruo */ 73655151ea9SQu Wenruo void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, 73755151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 738e55a0de1SQu Wenruo { 739e55a0de1SQu Wenruo struct btrfs_subpage *subpage; 740e55a0de1SQu Wenruo 74155151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 742fbca46ebSQu Wenruo /* For non-subpage case, we just unlock the page */ 74355151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) { 74455151ea9SQu Wenruo folio_unlock(folio); 74555151ea9SQu Wenruo return; 74655151ea9SQu Wenruo } 747e55a0de1SQu Wenruo 748cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 749cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 750e55a0de1SQu Wenruo 751e55a0de1SQu Wenruo /* 752e55a0de1SQu Wenruo * For subpage case, there are two types of locked page. With or 753e55a0de1SQu Wenruo * without writers number. 754e55a0de1SQu Wenruo * 755e55a0de1SQu Wenruo * Since we own the page lock, no one else could touch subpage::writers 756e55a0de1SQu Wenruo * and we are safe to do several atomic operations without spinlock. 757e55a0de1SQu Wenruo */ 75855151ea9SQu Wenruo if (atomic_read(&subpage->writers) == 0) { 759e55a0de1SQu Wenruo /* No writers, locked by plain lock_page() */ 76055151ea9SQu Wenruo folio_unlock(folio); 76155151ea9SQu Wenruo return; 76255151ea9SQu Wenruo } 763e55a0de1SQu Wenruo 764e55a0de1SQu Wenruo /* Have writers, use proper subpage helper to end it */ 76555151ea9SQu Wenruo btrfs_folio_end_writer_lock(fs_info, folio, start, len); 766e55a0de1SQu Wenruo } 76775258f20SQu Wenruo 76875258f20SQu Wenruo #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ 76975258f20SQu Wenruo bitmap_cut(dst, subpage->bitmaps, 0, \ 77075258f20SQu Wenruo subpage_info->name##_offset, subpage_info->bitmap_nr_bits) 77175258f20SQu Wenruo 77275258f20SQu Wenruo void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, 77355151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 77475258f20SQu Wenruo { 77575258f20SQu Wenruo struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; 77675258f20SQu Wenruo struct btrfs_subpage *subpage; 77775258f20SQu Wenruo unsigned long uptodate_bitmap; 77875258f20SQu Wenruo unsigned long error_bitmap; 77975258f20SQu Wenruo unsigned long dirty_bitmap; 78075258f20SQu Wenruo unsigned long writeback_bitmap; 78175258f20SQu Wenruo unsigned long ordered_bitmap; 78275258f20SQu Wenruo unsigned long checked_bitmap; 78375258f20SQu Wenruo unsigned long flags; 78475258f20SQu Wenruo 785cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 78675258f20SQu Wenruo ASSERT(subpage_info); 787cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 78875258f20SQu Wenruo 78975258f20SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 79075258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); 79175258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); 79275258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); 79375258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); 79475258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); 7958e7e9c67SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap); 79675258f20SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 79775258f20SQu Wenruo 79855151ea9SQu Wenruo dump_page(folio_page(folio, 0), "btrfs subpage dump"); 79975258f20SQu Wenruo btrfs_warn(fs_info, 80075258f20SQu Wenruo "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", 80155151ea9SQu Wenruo start, len, folio_pos(folio), 80275258f20SQu Wenruo subpage_info->bitmap_nr_bits, &uptodate_bitmap, 80375258f20SQu Wenruo subpage_info->bitmap_nr_bits, &error_bitmap, 80475258f20SQu Wenruo subpage_info->bitmap_nr_bits, &dirty_bitmap, 80575258f20SQu Wenruo subpage_info->bitmap_nr_bits, &writeback_bitmap, 80675258f20SQu Wenruo subpage_info->bitmap_nr_bits, &ordered_bitmap, 80775258f20SQu Wenruo subpage_info->bitmap_nr_bits, &checked_bitmap); 80875258f20SQu Wenruo } 809