1cac06d84SQu Wenruo // SPDX-License-Identifier: GPL-2.0 2cac06d84SQu Wenruo 3cac06d84SQu Wenruo #include <linux/slab.h> 49b569ea0SJosef Bacik #include "messages.h" 5cac06d84SQu Wenruo #include "ctree.h" 6cac06d84SQu Wenruo #include "subpage.h" 73d078efaSQu Wenruo #include "btrfs_inode.h" 8cac06d84SQu Wenruo 9894d1378SQu Wenruo /* 10894d1378SQu Wenruo * Subpage (sectorsize < PAGE_SIZE) support overview: 11894d1378SQu Wenruo * 12894d1378SQu Wenruo * Limitations: 13894d1378SQu Wenruo * 14894d1378SQu Wenruo * - Only support 64K page size for now 15894d1378SQu Wenruo * This is to make metadata handling easier, as 64K page would ensure 16894d1378SQu Wenruo * all nodesize would fit inside one page, thus we don't need to handle 17894d1378SQu Wenruo * cases where a tree block crosses several pages. 18894d1378SQu Wenruo * 19894d1378SQu Wenruo * - Only metadata read-write for now 20894d1378SQu Wenruo * The data read-write part is in development. 21894d1378SQu Wenruo * 22894d1378SQu Wenruo * - Metadata can't cross 64K page boundary 23894d1378SQu Wenruo * btrfs-progs and kernel have done that for a while, thus only ancient 24894d1378SQu Wenruo * filesystems could have such problem. For such case, do a graceful 25894d1378SQu Wenruo * rejection. 26894d1378SQu Wenruo * 27894d1378SQu Wenruo * Special behavior: 28894d1378SQu Wenruo * 29894d1378SQu Wenruo * - Metadata 30894d1378SQu Wenruo * Metadata read is fully supported. 31894d1378SQu Wenruo * Meaning when reading one tree block will only trigger the read for the 32894d1378SQu Wenruo * needed range, other unrelated range in the same page will not be touched. 33894d1378SQu Wenruo * 34894d1378SQu Wenruo * Metadata write support is partial. 35894d1378SQu Wenruo * The writeback is still for the full page, but we will only submit 36894d1378SQu Wenruo * the dirty extent buffers in the page. 37894d1378SQu Wenruo * 38894d1378SQu Wenruo * This means, if we have a metadata page like this: 39894d1378SQu Wenruo * 40894d1378SQu Wenruo * Page offset 41894d1378SQu Wenruo * 0 16K 32K 48K 64K 42894d1378SQu Wenruo * |/////////| |///////////| 43894d1378SQu Wenruo * \- Tree block A \- Tree block B 44894d1378SQu Wenruo * 45894d1378SQu Wenruo * Even if we just want to writeback tree block A, we will also writeback 46894d1378SQu Wenruo * tree block B if it's also dirty. 47894d1378SQu Wenruo * 48894d1378SQu Wenruo * This may cause extra metadata writeback which results more COW. 49894d1378SQu Wenruo * 50894d1378SQu Wenruo * Implementation: 51894d1378SQu Wenruo * 52894d1378SQu Wenruo * - Common 53894d1378SQu Wenruo * Both metadata and data will use a new structure, btrfs_subpage, to 54894d1378SQu Wenruo * record the status of each sector inside a page. This provides the extra 55894d1378SQu Wenruo * granularity needed. 56894d1378SQu Wenruo * 57894d1378SQu Wenruo * - Metadata 58894d1378SQu Wenruo * Since we have multiple tree blocks inside one page, we can't rely on page 59894d1378SQu Wenruo * locking anymore, or we will have greatly reduced concurrency or even 60894d1378SQu Wenruo * deadlocks (hold one tree lock while trying to lock another tree lock in 61894d1378SQu Wenruo * the same page). 62894d1378SQu Wenruo * 63894d1378SQu Wenruo * Thus for metadata locking, subpage support relies on io_tree locking only. 64894d1378SQu Wenruo * This means a slightly higher tree locking latency. 65894d1378SQu Wenruo */ 66894d1378SQu Wenruo 6713df3775SQu Wenruo bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) 68fbca46ebSQu Wenruo { 69fbca46ebSQu Wenruo if (fs_info->sectorsize >= PAGE_SIZE) 70fbca46ebSQu Wenruo return false; 71fbca46ebSQu Wenruo 72fbca46ebSQu Wenruo /* 73fbca46ebSQu Wenruo * Only data pages (either through DIO or compression) can have no 74fbca46ebSQu Wenruo * mapping. And if page->mapping->host is data inode, it's subpage. 75fbca46ebSQu Wenruo * As we have ruled our sectorsize >= PAGE_SIZE case already. 76fbca46ebSQu Wenruo */ 7713df3775SQu Wenruo if (!mapping || !mapping->host || is_data_inode(mapping->host)) 78fbca46ebSQu Wenruo return true; 79fbca46ebSQu Wenruo 80fbca46ebSQu Wenruo /* 81fbca46ebSQu Wenruo * Now the only remaining case is metadata, which we only go subpage 82fbca46ebSQu Wenruo * routine if nodesize < PAGE_SIZE. 83fbca46ebSQu Wenruo */ 84fbca46ebSQu Wenruo if (fs_info->nodesize < PAGE_SIZE) 85fbca46ebSQu Wenruo return true; 86fbca46ebSQu Wenruo return false; 87fbca46ebSQu Wenruo } 88fbca46ebSQu Wenruo 898481dd80SQu Wenruo void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) 908481dd80SQu Wenruo { 918481dd80SQu Wenruo unsigned int cur = 0; 928481dd80SQu Wenruo unsigned int nr_bits; 938481dd80SQu Wenruo 948481dd80SQu Wenruo ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); 958481dd80SQu Wenruo 968481dd80SQu Wenruo nr_bits = PAGE_SIZE / sectorsize; 978481dd80SQu Wenruo subpage_info->bitmap_nr_bits = nr_bits; 988481dd80SQu Wenruo 998481dd80SQu Wenruo subpage_info->uptodate_offset = cur; 1008481dd80SQu Wenruo cur += nr_bits; 1018481dd80SQu Wenruo 1028481dd80SQu Wenruo subpage_info->dirty_offset = cur; 1038481dd80SQu Wenruo cur += nr_bits; 1048481dd80SQu Wenruo 1058481dd80SQu Wenruo subpage_info->writeback_offset = cur; 1068481dd80SQu Wenruo cur += nr_bits; 1078481dd80SQu Wenruo 1088481dd80SQu Wenruo subpage_info->ordered_offset = cur; 1098481dd80SQu Wenruo cur += nr_bits; 1108481dd80SQu Wenruo 111e4f94347SQu Wenruo subpage_info->checked_offset = cur; 112e4f94347SQu Wenruo cur += nr_bits; 113e4f94347SQu Wenruo 1148481dd80SQu Wenruo subpage_info->total_nr_bits = cur; 1158481dd80SQu Wenruo } 1168481dd80SQu Wenruo 117cac06d84SQu Wenruo int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, 118*55151ea9SQu Wenruo struct folio *folio, enum btrfs_subpage_type type) 119cac06d84SQu Wenruo { 120651fb419SQu Wenruo struct btrfs_subpage *subpage; 121cac06d84SQu Wenruo 122cac06d84SQu Wenruo /* 123143823cfSDavid Sterba * We have cases like a dummy extent buffer page, which is not mapped 124cac06d84SQu Wenruo * and doesn't need to be locked. 125cac06d84SQu Wenruo */ 126*55151ea9SQu Wenruo if (folio->mapping) 127*55151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 128651fb419SQu Wenruo 129cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 130*55151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) 131cac06d84SQu Wenruo return 0; 132cac06d84SQu Wenruo 133651fb419SQu Wenruo subpage = btrfs_alloc_subpage(fs_info, type); 134651fb419SQu Wenruo if (IS_ERR(subpage)) 135651fb419SQu Wenruo return PTR_ERR(subpage); 136651fb419SQu Wenruo 137cfbf07e2SQu Wenruo folio_attach_private(folio, subpage); 138cac06d84SQu Wenruo return 0; 139cac06d84SQu Wenruo } 140cac06d84SQu Wenruo 141*55151ea9SQu Wenruo void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) 142cac06d84SQu Wenruo { 143cac06d84SQu Wenruo struct btrfs_subpage *subpage; 144cac06d84SQu Wenruo 145cfbf07e2SQu Wenruo /* Either not subpage, or the folio already has private attached. */ 146*55151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) 147cac06d84SQu Wenruo return; 148cac06d84SQu Wenruo 149cfbf07e2SQu Wenruo subpage = folio_detach_private(folio); 150cac06d84SQu Wenruo ASSERT(subpage); 151760f991fSQu Wenruo btrfs_free_subpage(subpage); 152760f991fSQu Wenruo } 153760f991fSQu Wenruo 154651fb419SQu Wenruo struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, 155760f991fSQu Wenruo enum btrfs_subpage_type type) 156760f991fSQu Wenruo { 157651fb419SQu Wenruo struct btrfs_subpage *ret; 15872a69cd0SQu Wenruo unsigned int real_size; 159651fb419SQu Wenruo 160fdf250dbSQu Wenruo ASSERT(fs_info->sectorsize < PAGE_SIZE); 161760f991fSQu Wenruo 16272a69cd0SQu Wenruo real_size = struct_size(ret, bitmaps, 16372a69cd0SQu Wenruo BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); 16472a69cd0SQu Wenruo ret = kzalloc(real_size, GFP_NOFS); 165651fb419SQu Wenruo if (!ret) 166651fb419SQu Wenruo return ERR_PTR(-ENOMEM); 167651fb419SQu Wenruo 168651fb419SQu Wenruo spin_lock_init(&ret->lock); 1691e1de387SQu Wenruo if (type == BTRFS_SUBPAGE_METADATA) { 170651fb419SQu Wenruo atomic_set(&ret->eb_refs, 0); 1711e1de387SQu Wenruo } else { 172651fb419SQu Wenruo atomic_set(&ret->readers, 0); 173651fb419SQu Wenruo atomic_set(&ret->writers, 0); 1741e1de387SQu Wenruo } 175651fb419SQu Wenruo return ret; 176760f991fSQu Wenruo } 177760f991fSQu Wenruo 178760f991fSQu Wenruo void btrfs_free_subpage(struct btrfs_subpage *subpage) 179760f991fSQu Wenruo { 180cac06d84SQu Wenruo kfree(subpage); 181cac06d84SQu Wenruo } 1828ff8466dSQu Wenruo 1838ff8466dSQu Wenruo /* 1848ff8466dSQu Wenruo * Increase the eb_refs of current subpage. 1858ff8466dSQu Wenruo * 1868ff8466dSQu Wenruo * This is important for eb allocation, to prevent race with last eb freeing 1878ff8466dSQu Wenruo * of the same page. 1888ff8466dSQu Wenruo * With the eb_refs increased before the eb inserted into radix tree, 189cfbf07e2SQu Wenruo * detach_extent_buffer_page() won't detach the folio private while we're still 1908ff8466dSQu Wenruo * allocating the extent buffer. 1918ff8466dSQu Wenruo */ 19213df3775SQu Wenruo void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 1938ff8466dSQu Wenruo { 1948ff8466dSQu Wenruo struct btrfs_subpage *subpage; 1958ff8466dSQu Wenruo 19613df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 1978ff8466dSQu Wenruo return; 1988ff8466dSQu Wenruo 19913df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 20013df3775SQu Wenruo lockdep_assert_held(&folio->mapping->private_lock); 2018ff8466dSQu Wenruo 202cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2038ff8466dSQu Wenruo atomic_inc(&subpage->eb_refs); 2048ff8466dSQu Wenruo } 2058ff8466dSQu Wenruo 20613df3775SQu Wenruo void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) 2078ff8466dSQu Wenruo { 2088ff8466dSQu Wenruo struct btrfs_subpage *subpage; 2098ff8466dSQu Wenruo 21013df3775SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 2118ff8466dSQu Wenruo return; 2128ff8466dSQu Wenruo 21313df3775SQu Wenruo ASSERT(folio_test_private(folio) && folio->mapping); 21413df3775SQu Wenruo lockdep_assert_held(&folio->mapping->private_lock); 2158ff8466dSQu Wenruo 216cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 2178ff8466dSQu Wenruo ASSERT(atomic_read(&subpage->eb_refs)); 2188ff8466dSQu Wenruo atomic_dec(&subpage->eb_refs); 2198ff8466dSQu Wenruo } 220a1d767c1SQu Wenruo 22192082d40SQu Wenruo static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, 222*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 22392082d40SQu Wenruo { 224*55151ea9SQu Wenruo /* For subpage support, the folio must be single page. */ 225*55151ea9SQu Wenruo ASSERT(folio_order(folio) == 0); 226cfbf07e2SQu Wenruo 22792082d40SQu Wenruo /* Basic checks */ 228cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 22992082d40SQu Wenruo ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 23092082d40SQu Wenruo IS_ALIGNED(len, fs_info->sectorsize)); 23192082d40SQu Wenruo /* 23292082d40SQu Wenruo * The range check only works for mapped page, we can still have 23392082d40SQu Wenruo * unmapped page like dummy extent buffer pages. 23492082d40SQu Wenruo */ 235*55151ea9SQu Wenruo if (folio->mapping) 236*55151ea9SQu Wenruo ASSERT(folio_pos(folio) <= start && 237*55151ea9SQu Wenruo start + len <= folio_pos(folio) + PAGE_SIZE); 23892082d40SQu Wenruo } 23992082d40SQu Wenruo 24092082d40SQu Wenruo void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, 241*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 24292082d40SQu Wenruo { 243cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 24492082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 24592082d40SQu Wenruo 246*55151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 24792082d40SQu Wenruo 2483d078efaSQu Wenruo atomic_add(nbits, &subpage->readers); 24992082d40SQu Wenruo } 25092082d40SQu Wenruo 25192082d40SQu Wenruo void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, 252*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 25392082d40SQu Wenruo { 254cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 25592082d40SQu Wenruo const int nbits = len >> fs_info->sectorsize_bits; 2563d078efaSQu Wenruo bool is_data; 2573d078efaSQu Wenruo bool last; 25892082d40SQu Wenruo 259*55151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 260*55151ea9SQu Wenruo is_data = is_data_inode(folio->mapping->host); 26192082d40SQu Wenruo ASSERT(atomic_read(&subpage->readers) >= nbits); 2623d078efaSQu Wenruo last = atomic_sub_and_test(nbits, &subpage->readers); 2633d078efaSQu Wenruo 2643d078efaSQu Wenruo /* 2653d078efaSQu Wenruo * For data we need to unlock the page if the last read has finished. 2663d078efaSQu Wenruo * 2673d078efaSQu Wenruo * And please don't replace @last with atomic_sub_and_test() call 2683d078efaSQu Wenruo * inside if () condition. 2693d078efaSQu Wenruo * As we want the atomic_sub_and_test() to be always executed. 2703d078efaSQu Wenruo */ 2713d078efaSQu Wenruo if (is_data && last) 272*55151ea9SQu Wenruo folio_unlock(folio); 27392082d40SQu Wenruo } 27492082d40SQu Wenruo 275*55151ea9SQu Wenruo static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) 2761e1de387SQu Wenruo { 2771e1de387SQu Wenruo u64 orig_start = *start; 2781e1de387SQu Wenruo u32 orig_len = *len; 2791e1de387SQu Wenruo 280*55151ea9SQu Wenruo *start = max_t(u64, folio_pos(folio), orig_start); 281e4f94347SQu Wenruo /* 282e4f94347SQu Wenruo * For certain call sites like btrfs_drop_pages(), we may have pages 283e4f94347SQu Wenruo * beyond the target range. In that case, just set @len to 0, subpage 284e4f94347SQu Wenruo * helpers can handle @len == 0 without any problem. 285e4f94347SQu Wenruo */ 286*55151ea9SQu Wenruo if (folio_pos(folio) >= orig_start + orig_len) 287e4f94347SQu Wenruo *len = 0; 288e4f94347SQu Wenruo else 289*55151ea9SQu Wenruo *len = min_t(u64, folio_pos(folio) + PAGE_SIZE, 2901e1de387SQu Wenruo orig_start + orig_len) - *start; 2911e1de387SQu Wenruo } 2921e1de387SQu Wenruo 2931e1de387SQu Wenruo void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, 294*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 2951e1de387SQu Wenruo { 296cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 2971e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 2981e1de387SQu Wenruo int ret; 2991e1de387SQu Wenruo 300*55151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3011e1de387SQu Wenruo 3021e1de387SQu Wenruo ASSERT(atomic_read(&subpage->readers) == 0); 3031e1de387SQu Wenruo ret = atomic_add_return(nbits, &subpage->writers); 3041e1de387SQu Wenruo ASSERT(ret == nbits); 3051e1de387SQu Wenruo } 3061e1de387SQu Wenruo 3071e1de387SQu Wenruo bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, 308*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3091e1de387SQu Wenruo { 310cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 3111e1de387SQu Wenruo const int nbits = (len >> fs_info->sectorsize_bits); 3121e1de387SQu Wenruo 313*55151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); 3141e1de387SQu Wenruo 315164674a7SQu Wenruo /* 316164674a7SQu Wenruo * We have call sites passing @lock_page into 317164674a7SQu Wenruo * extent_clear_unlock_delalloc() for compression path. 318164674a7SQu Wenruo * 319164674a7SQu Wenruo * This @locked_page is locked by plain lock_page(), thus its 320164674a7SQu Wenruo * subpage::writers is 0. Handle them in a special way. 321164674a7SQu Wenruo */ 322164674a7SQu Wenruo if (atomic_read(&subpage->writers) == 0) 323164674a7SQu Wenruo return true; 324164674a7SQu Wenruo 3251e1de387SQu Wenruo ASSERT(atomic_read(&subpage->writers) >= nbits); 3261e1de387SQu Wenruo return atomic_sub_and_test(nbits, &subpage->writers); 3271e1de387SQu Wenruo } 3281e1de387SQu Wenruo 3291e1de387SQu Wenruo /* 330*55151ea9SQu Wenruo * Lock a folio for delalloc page writeback. 3311e1de387SQu Wenruo * 3321e1de387SQu Wenruo * Return -EAGAIN if the page is not properly initialized. 3331e1de387SQu Wenruo * Return 0 with the page locked, and writer counter updated. 3341e1de387SQu Wenruo * 3351e1de387SQu Wenruo * Even with 0 returned, the page still need extra check to make sure 3361e1de387SQu Wenruo * it's really the correct page, as the caller is using 33747d55419SVishal Moola (Oracle) * filemap_get_folios_contig(), which can race with page invalidating. 3381e1de387SQu Wenruo */ 339*55151ea9SQu Wenruo int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, 340*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3411e1de387SQu Wenruo { 342*55151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 343*55151ea9SQu Wenruo folio_lock(folio); 3441e1de387SQu Wenruo return 0; 3451e1de387SQu Wenruo } 346*55151ea9SQu Wenruo folio_lock(folio); 347cfbf07e2SQu Wenruo if (!folio_test_private(folio) || !folio_get_private(folio)) { 348*55151ea9SQu Wenruo folio_unlock(folio); 3491e1de387SQu Wenruo return -EAGAIN; 3501e1de387SQu Wenruo } 351*55151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 352*55151ea9SQu Wenruo btrfs_subpage_start_writer(fs_info, folio, start, len); 3531e1de387SQu Wenruo return 0; 3541e1de387SQu Wenruo } 3551e1de387SQu Wenruo 356*55151ea9SQu Wenruo void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, 357*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 3581e1de387SQu Wenruo { 359*55151ea9SQu Wenruo if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { 360*55151ea9SQu Wenruo folio_unlock(folio); 361*55151ea9SQu Wenruo return; 362*55151ea9SQu Wenruo } 363*55151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); 364*55151ea9SQu Wenruo if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) 365*55151ea9SQu Wenruo folio_unlock(folio); 3661e1de387SQu Wenruo } 3671e1de387SQu Wenruo 368*55151ea9SQu Wenruo #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ 36972a69cd0SQu Wenruo ({ \ 37072a69cd0SQu Wenruo unsigned int start_bit; \ 37172a69cd0SQu Wenruo \ 372*55151ea9SQu Wenruo btrfs_subpage_assert(fs_info, folio, start, len); \ 37372a69cd0SQu Wenruo start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ 37472a69cd0SQu Wenruo start_bit += fs_info->subpage_info->name##_offset; \ 37572a69cd0SQu Wenruo start_bit; \ 37672a69cd0SQu Wenruo }) 37772a69cd0SQu Wenruo 37872a69cd0SQu Wenruo #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ 37972a69cd0SQu Wenruo bitmap_test_range_all_set(subpage->bitmaps, \ 38072a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 38172a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 38272a69cd0SQu Wenruo 38372a69cd0SQu Wenruo #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ 38472a69cd0SQu Wenruo bitmap_test_range_all_zero(subpage->bitmaps, \ 38572a69cd0SQu Wenruo fs_info->subpage_info->name##_offset, \ 38672a69cd0SQu Wenruo fs_info->subpage_info->bitmap_nr_bits) 38772a69cd0SQu Wenruo 388a1d767c1SQu Wenruo void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, 389*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 390a1d767c1SQu Wenruo { 391cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 392*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 39372a69cd0SQu Wenruo uptodate, start, len); 394a1d767c1SQu Wenruo unsigned long flags; 395a1d767c1SQu Wenruo 396a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 39772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 39872a69cd0SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) 399*55151ea9SQu Wenruo folio_mark_uptodate(folio); 400a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 401a1d767c1SQu Wenruo } 402a1d767c1SQu Wenruo 403a1d767c1SQu Wenruo void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, 404*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 405a1d767c1SQu Wenruo { 406cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 407*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 40872a69cd0SQu Wenruo uptodate, start, len); 409a1d767c1SQu Wenruo unsigned long flags; 410a1d767c1SQu Wenruo 411a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 41272a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 413*55151ea9SQu Wenruo folio_clear_uptodate(folio); 414a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 415a1d767c1SQu Wenruo } 416a1d767c1SQu Wenruo 417d8a5713eSQu Wenruo void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, 418*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 419d8a5713eSQu Wenruo { 420cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 421*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 42272a69cd0SQu Wenruo dirty, start, len); 423d8a5713eSQu Wenruo unsigned long flags; 424d8a5713eSQu Wenruo 425d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 42672a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 427d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 428*55151ea9SQu Wenruo folio_mark_dirty(folio); 429d8a5713eSQu Wenruo } 430d8a5713eSQu Wenruo 431d8a5713eSQu Wenruo /* 432d8a5713eSQu Wenruo * Extra clear_and_test function for subpage dirty bitmap. 433d8a5713eSQu Wenruo * 434d8a5713eSQu Wenruo * Return true if we're the last bits in the dirty_bitmap and clear the 435d8a5713eSQu Wenruo * dirty_bitmap. 436d8a5713eSQu Wenruo * Return false otherwise. 437d8a5713eSQu Wenruo * 438d8a5713eSQu Wenruo * NOTE: Callers should manually clear page dirty for true case, as we have 439d8a5713eSQu Wenruo * extra handling for tree blocks. 440d8a5713eSQu Wenruo */ 441d8a5713eSQu Wenruo bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, 442*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 443d8a5713eSQu Wenruo { 444cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 445*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 44672a69cd0SQu Wenruo dirty, start, len); 447d8a5713eSQu Wenruo unsigned long flags; 448d8a5713eSQu Wenruo bool last = false; 449d8a5713eSQu Wenruo 450d8a5713eSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 45172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 45272a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) 453d8a5713eSQu Wenruo last = true; 454d8a5713eSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 455d8a5713eSQu Wenruo return last; 456d8a5713eSQu Wenruo } 457d8a5713eSQu Wenruo 458d8a5713eSQu Wenruo void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, 459*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 460d8a5713eSQu Wenruo { 461d8a5713eSQu Wenruo bool last; 462d8a5713eSQu Wenruo 463*55151ea9SQu Wenruo last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len); 464d8a5713eSQu Wenruo if (last) 465*55151ea9SQu Wenruo folio_clear_dirty_for_io(folio); 466d8a5713eSQu Wenruo } 467d8a5713eSQu Wenruo 4683470da3bSQu Wenruo void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, 469*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 4703470da3bSQu Wenruo { 471cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 472*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 47372a69cd0SQu Wenruo writeback, start, len); 4743470da3bSQu Wenruo unsigned long flags; 4753470da3bSQu Wenruo 4763470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 47772a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 478*55151ea9SQu Wenruo folio_start_writeback(folio); 4793470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 4803470da3bSQu Wenruo } 4813470da3bSQu Wenruo 4823470da3bSQu Wenruo void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, 483*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 4843470da3bSQu Wenruo { 485cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 486*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 48772a69cd0SQu Wenruo writeback, start, len); 4883470da3bSQu Wenruo unsigned long flags; 4893470da3bSQu Wenruo 4903470da3bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 49172a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 49272a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { 493*55151ea9SQu Wenruo ASSERT(folio_test_writeback(folio)); 494*55151ea9SQu Wenruo folio_end_writeback(folio); 4957c11d0aeSQu Wenruo } 4963470da3bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 4973470da3bSQu Wenruo } 4983470da3bSQu Wenruo 4996f17400bSQu Wenruo void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, 500*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5016f17400bSQu Wenruo { 502cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 503*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 50472a69cd0SQu Wenruo ordered, start, len); 5056f17400bSQu Wenruo unsigned long flags; 5066f17400bSQu Wenruo 5076f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 50872a69cd0SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 509*55151ea9SQu Wenruo folio_set_ordered(folio); 5106f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5116f17400bSQu Wenruo } 5126f17400bSQu Wenruo 5136f17400bSQu Wenruo void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, 514*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 5156f17400bSQu Wenruo { 516cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 517*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 51872a69cd0SQu Wenruo ordered, start, len); 5196f17400bSQu Wenruo unsigned long flags; 5206f17400bSQu Wenruo 5216f17400bSQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 52272a69cd0SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 52372a69cd0SQu Wenruo if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) 524*55151ea9SQu Wenruo folio_clear_ordered(folio); 5256f17400bSQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 5266f17400bSQu Wenruo } 527e4f94347SQu Wenruo 528e4f94347SQu Wenruo void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, 529*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 530e4f94347SQu Wenruo { 531cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 532*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 533e4f94347SQu Wenruo checked, start, len); 534e4f94347SQu Wenruo unsigned long flags; 535e4f94347SQu Wenruo 536e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 537e4f94347SQu Wenruo bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 538e4f94347SQu Wenruo if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) 539*55151ea9SQu Wenruo folio_set_checked(folio); 540e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 541e4f94347SQu Wenruo } 542e4f94347SQu Wenruo 543e4f94347SQu Wenruo void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, 544*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 545e4f94347SQu Wenruo { 546cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 547*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, 548e4f94347SQu Wenruo checked, start, len); 549e4f94347SQu Wenruo unsigned long flags; 550e4f94347SQu Wenruo 551e4f94347SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 552e4f94347SQu Wenruo bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); 553*55151ea9SQu Wenruo folio_clear_checked(folio); 554e4f94347SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 555e4f94347SQu Wenruo } 556e4f94347SQu Wenruo 557a1d767c1SQu Wenruo /* 558a1d767c1SQu Wenruo * Unlike set/clear which is dependent on each page status, for test all bits 559a1d767c1SQu Wenruo * are tested in the same way. 560a1d767c1SQu Wenruo */ 561a1d767c1SQu Wenruo #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ 562a1d767c1SQu Wenruo bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ 563*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 564a1d767c1SQu Wenruo { \ 565cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); \ 566*55151ea9SQu Wenruo unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \ 56772a69cd0SQu Wenruo name, start, len); \ 568a1d767c1SQu Wenruo unsigned long flags; \ 569a1d767c1SQu Wenruo bool ret; \ 570a1d767c1SQu Wenruo \ 571a1d767c1SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); \ 57272a69cd0SQu Wenruo ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ 57372a69cd0SQu Wenruo len >> fs_info->sectorsize_bits); \ 574a1d767c1SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); \ 575a1d767c1SQu Wenruo return ret; \ 576a1d767c1SQu Wenruo } 577a1d767c1SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); 578d8a5713eSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); 5793470da3bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); 5806f17400bSQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); 581e4f94347SQu Wenruo IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); 582a1d767c1SQu Wenruo 583a1d767c1SQu Wenruo /* 584a1d767c1SQu Wenruo * Note that, in selftests (extent-io-tests), we can have empty fs_info passed 585a1d767c1SQu Wenruo * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall 586a1d767c1SQu Wenruo * back to regular sectorsize branch. 587a1d767c1SQu Wenruo */ 588*55151ea9SQu Wenruo #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \ 589*55151ea9SQu Wenruo folio_clear_func, folio_test_func) \ 590*55151ea9SQu Wenruo void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \ 591*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 592a1d767c1SQu Wenruo { \ 59313df3775SQu Wenruo if (unlikely(!fs_info) || \ 594*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 595*55151ea9SQu Wenruo folio_set_func(folio); \ 596a1d767c1SQu Wenruo return; \ 597a1d767c1SQu Wenruo } \ 598*55151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 599a1d767c1SQu Wenruo } \ 600*55151ea9SQu Wenruo void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \ 601*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 602a1d767c1SQu Wenruo { \ 60313df3775SQu Wenruo if (unlikely(!fs_info) || \ 604*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 605*55151ea9SQu Wenruo folio_clear_func(folio); \ 606a1d767c1SQu Wenruo return; \ 607a1d767c1SQu Wenruo } \ 608*55151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 609a1d767c1SQu Wenruo } \ 610*55151ea9SQu Wenruo bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \ 611*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 612a1d767c1SQu Wenruo { \ 61313df3775SQu Wenruo if (unlikely(!fs_info) || \ 614*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 615*55151ea9SQu Wenruo return folio_test_func(folio); \ 616*55151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 61760e2d255SQu Wenruo } \ 618*55151ea9SQu Wenruo void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ 619*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 62060e2d255SQu Wenruo { \ 62113df3775SQu Wenruo if (unlikely(!fs_info) || \ 622*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 623*55151ea9SQu Wenruo folio_set_func(folio); \ 62460e2d255SQu Wenruo return; \ 62560e2d255SQu Wenruo } \ 626*55151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 627*55151ea9SQu Wenruo btrfs_subpage_set_##name(fs_info, folio, start, len); \ 62860e2d255SQu Wenruo } \ 629*55151ea9SQu Wenruo void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ 630*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 63160e2d255SQu Wenruo { \ 63213df3775SQu Wenruo if (unlikely(!fs_info) || \ 633*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) { \ 634*55151ea9SQu Wenruo folio_clear_func(folio); \ 63560e2d255SQu Wenruo return; \ 63660e2d255SQu Wenruo } \ 637*55151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 638*55151ea9SQu Wenruo btrfs_subpage_clear_##name(fs_info, folio, start, len); \ 63960e2d255SQu Wenruo } \ 640*55151ea9SQu Wenruo bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ 641*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) \ 64260e2d255SQu Wenruo { \ 64313df3775SQu Wenruo if (unlikely(!fs_info) || \ 644*55151ea9SQu Wenruo !btrfs_is_subpage(fs_info, folio->mapping)) \ 645*55151ea9SQu Wenruo return folio_test_func(folio); \ 646*55151ea9SQu Wenruo btrfs_subpage_clamp_range(folio, &start, &len); \ 647*55151ea9SQu Wenruo return btrfs_subpage_test_##name(fs_info, folio, start, len); \ 648a1d767c1SQu Wenruo } 649*55151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate, 650*55151ea9SQu Wenruo folio_test_uptodate); 651*55151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io, 652*55151ea9SQu Wenruo folio_test_dirty); 653*55151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback, 654*55151ea9SQu Wenruo folio_test_writeback); 655*55151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered, 656*55151ea9SQu Wenruo folio_test_ordered); 657*55151ea9SQu Wenruo IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, 658*55151ea9SQu Wenruo folio_test_checked); 659cc1d0d93SQu Wenruo 660cc1d0d93SQu Wenruo /* 661cc1d0d93SQu Wenruo * Make sure not only the page dirty bit is cleared, but also subpage dirty bit 662cc1d0d93SQu Wenruo * is cleared. 663cc1d0d93SQu Wenruo */ 664*55151ea9SQu Wenruo void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio) 665cc1d0d93SQu Wenruo { 666cfbf07e2SQu Wenruo struct btrfs_subpage *subpage = folio_get_private(folio); 667cc1d0d93SQu Wenruo 668cc1d0d93SQu Wenruo if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) 669cc1d0d93SQu Wenruo return; 670cc1d0d93SQu Wenruo 671*55151ea9SQu Wenruo ASSERT(!folio_test_dirty(folio)); 672*55151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) 673cc1d0d93SQu Wenruo return; 674cc1d0d93SQu Wenruo 675cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 67672a69cd0SQu Wenruo ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty)); 677cc1d0d93SQu Wenruo } 678e55a0de1SQu Wenruo 679e55a0de1SQu Wenruo /* 680e55a0de1SQu Wenruo * Handle different locked pages with different page sizes: 681e55a0de1SQu Wenruo * 682e55a0de1SQu Wenruo * - Page locked by plain lock_page() 683e55a0de1SQu Wenruo * It should not have any subpage::writers count. 684e55a0de1SQu Wenruo * Can be unlocked by unlock_page(). 685e55a0de1SQu Wenruo * This is the most common locked page for __extent_writepage() called 686f3e90c1cSChristoph Hellwig * inside extent_write_cache_pages(). 687e55a0de1SQu Wenruo * Rarer cases include the @locked_page from extent_write_locked_range(). 688e55a0de1SQu Wenruo * 689e55a0de1SQu Wenruo * - Page locked by lock_delalloc_pages() 690e55a0de1SQu Wenruo * There is only one caller, all pages except @locked_page for 691e55a0de1SQu Wenruo * extent_write_locked_range(). 692e55a0de1SQu Wenruo * In this case, we have to call subpage helper to handle the case. 693e55a0de1SQu Wenruo */ 694*55151ea9SQu Wenruo void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, 695*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 696e55a0de1SQu Wenruo { 697e55a0de1SQu Wenruo struct btrfs_subpage *subpage; 698e55a0de1SQu Wenruo 699*55151ea9SQu Wenruo ASSERT(folio_test_locked(folio)); 700fbca46ebSQu Wenruo /* For non-subpage case, we just unlock the page */ 701*55151ea9SQu Wenruo if (!btrfs_is_subpage(fs_info, folio->mapping)) { 702*55151ea9SQu Wenruo folio_unlock(folio); 703*55151ea9SQu Wenruo return; 704*55151ea9SQu Wenruo } 705e55a0de1SQu Wenruo 706cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 707cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 708e55a0de1SQu Wenruo 709e55a0de1SQu Wenruo /* 710e55a0de1SQu Wenruo * For subpage case, there are two types of locked page. With or 711e55a0de1SQu Wenruo * without writers number. 712e55a0de1SQu Wenruo * 713e55a0de1SQu Wenruo * Since we own the page lock, no one else could touch subpage::writers 714e55a0de1SQu Wenruo * and we are safe to do several atomic operations without spinlock. 715e55a0de1SQu Wenruo */ 716*55151ea9SQu Wenruo if (atomic_read(&subpage->writers) == 0) { 717e55a0de1SQu Wenruo /* No writers, locked by plain lock_page() */ 718*55151ea9SQu Wenruo folio_unlock(folio); 719*55151ea9SQu Wenruo return; 720*55151ea9SQu Wenruo } 721e55a0de1SQu Wenruo 722e55a0de1SQu Wenruo /* Have writers, use proper subpage helper to end it */ 723*55151ea9SQu Wenruo btrfs_folio_end_writer_lock(fs_info, folio, start, len); 724e55a0de1SQu Wenruo } 72575258f20SQu Wenruo 72675258f20SQu Wenruo #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ 72775258f20SQu Wenruo bitmap_cut(dst, subpage->bitmaps, 0, \ 72875258f20SQu Wenruo subpage_info->name##_offset, subpage_info->bitmap_nr_bits) 72975258f20SQu Wenruo 73075258f20SQu Wenruo void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, 731*55151ea9SQu Wenruo struct folio *folio, u64 start, u32 len) 73275258f20SQu Wenruo { 73375258f20SQu Wenruo struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; 73475258f20SQu Wenruo struct btrfs_subpage *subpage; 73575258f20SQu Wenruo unsigned long uptodate_bitmap; 73675258f20SQu Wenruo unsigned long error_bitmap; 73775258f20SQu Wenruo unsigned long dirty_bitmap; 73875258f20SQu Wenruo unsigned long writeback_bitmap; 73975258f20SQu Wenruo unsigned long ordered_bitmap; 74075258f20SQu Wenruo unsigned long checked_bitmap; 74175258f20SQu Wenruo unsigned long flags; 74275258f20SQu Wenruo 743cfbf07e2SQu Wenruo ASSERT(folio_test_private(folio) && folio_get_private(folio)); 74475258f20SQu Wenruo ASSERT(subpage_info); 745cfbf07e2SQu Wenruo subpage = folio_get_private(folio); 74675258f20SQu Wenruo 74775258f20SQu Wenruo spin_lock_irqsave(&subpage->lock, flags); 74875258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); 74975258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); 75075258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); 75175258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); 75275258f20SQu Wenruo GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); 75375258f20SQu Wenruo spin_unlock_irqrestore(&subpage->lock, flags); 75475258f20SQu Wenruo 755*55151ea9SQu Wenruo dump_page(folio_page(folio, 0), "btrfs subpage dump"); 75675258f20SQu Wenruo btrfs_warn(fs_info, 75775258f20SQu Wenruo "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", 758*55151ea9SQu Wenruo start, len, folio_pos(folio), 75975258f20SQu Wenruo subpage_info->bitmap_nr_bits, &uptodate_bitmap, 76075258f20SQu Wenruo subpage_info->bitmap_nr_bits, &error_bitmap, 76175258f20SQu Wenruo subpage_info->bitmap_nr_bits, &dirty_bitmap, 76275258f20SQu Wenruo subpage_info->bitmap_nr_bits, &writeback_bitmap, 76375258f20SQu Wenruo subpage_info->bitmap_nr_bits, &ordered_bitmap, 76475258f20SQu Wenruo subpage_info->bitmap_nr_bits, &checked_bitmap); 76575258f20SQu Wenruo } 766